hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
08eb92b5d4c0713aa84587aaceac1a5e5f5315a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cuda_kernels.h" #include <assert.h> #include <cstdio> #include <cstdlib> namespace fastertransformer{ #define FINAL_MASK 0xffffffff #define CUDART_PI_F 3.141592654f template <typename T> __inline__ __device__ T gelu(T x) { float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x)))); return x * cdf; } template <> __inline__ __device__ half2 gelu(half2 val) { half2 val_pow3 = __hmul2(val, __hmul2(val, val)); float2 tmp_pow = __half22float2(val_pow3); float2 tmp = __half22float2(val); tmp.x = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.x + 0.044715f * tmp_pow.x)))); tmp.y = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.y + 0.044715f * tmp_pow.y)))); return __hmul2(val, __float22half2_rn(tmp)); } template <typename T> __inline__ __device__ T warpReduceSum(T val) { for(int mask = 16; mask > 0; mask >>= 1) val += __shfl_xor_sync(FINAL_MASK, val, mask, 32); return val; } template <typename T> __inline__ __device__ T blockReduceSum(T val) { static __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; int wid = threadIdx.x >> 5; val = warpReduceSum<T>(val); if(lane == 0) shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)0.0f; val = warpReduceSum(val); return val; } template <typename T> __global__ void add_bias_act(T* out, const T* bias, int m, int n) { T val, reg_bias; int row_id = blockIdx.x; int ite = n / blockDim.x; int tid = threadIdx.x; for(int i = 0; i < ite; ++i) { reg_bias = __ldg(&bias[i * blockDim.x + tid]); row_id = blockIdx.x; while(row_id < m){ val = out[tid + i * blockDim.x + row_id * n]+ reg_bias; out[tid + i * blockDim.x + row_id * n] = gelu<T>(val); row_id += gridDim.x; } } } template <> __global__ void add_bias_act(__half* out, const __half* bias, int m, int n) { half2 val, reg_bias; int row_id = blockIdx.x; int ite = n / blockDim.x / 2; int tid = threadIdx.x; half2* out_ptr = (half2*) out; const half2* bias_ptr = (half2*) bias; for(int i = 0; i < ite; ++i) { reg_bias = __ldg(&bias_ptr[i * blockDim.x + tid]); row_id = blockIdx.x; while(row_id < m){ val = out_ptr[tid + i * blockDim.x + row_id * n / 2]; val = __hadd2(val, reg_bias); out_ptr[tid + i * blockDim.x + row_id * n / 2] = gelu<half2>(val); row_id += gridDim.x; } } } template <typename T> __global__ void add_bias_input_layernorm(T* out, const T* input, const T* bias, const T* gamma, const T* beta, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out = 0.0f; for(int i = tid; i < n; i += blockDim.x) local_out += (float)(out[blockIdx.x * n + i] + input[blockIdx.x * n + i] + __ldg(&bias[i])); mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>((local_out - s_mean) * (local_out - s_mean)); if(threadIdx.x == 0) s_variance = variance / n + 1e-6f; __syncthreads(); for(int i = tid; i < n; i += blockDim.x) out[blockIdx.x * n + i] = (T)(((local_out - s_mean) * rsqrtf(s_variance)) * (float)(__ldg(&gamma[i])) + (float)(__ldg(&beta[i]))); } template <> __global__ void add_bias_input_layernorm(__half* out, const __half* input, const __half* bias, const __half* gamma, const __half* beta, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float2 local_out_fp2; half2* out_ptr = (half2*)out; const half2* input_ptr = (const half2*)input; const half2* bias_ptr = (const half2*)bias; const half2* gamma_ptr = (const half2*)gamma; const half2* beta_ptr = (const half2*)beta; float local_out = 0.0f; int id = blockIdx.x * n / 2 + tid; local_out_fp2 = __half22float2(__hadd2(__hadd2(out_ptr[id], input_ptr[id]), __ldg(&bias_ptr[tid]))); local_out += local_out_fp2.x; local_out += local_out_fp2.y; mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean / n; __syncthreads(); variance = (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean); variance += (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean); variance = blockReduceSum<float>(variance); if(threadIdx.x == 0) s_variance = rsqrtf(variance / n + 1e-6f); __syncthreads(); float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid])); float2 beta_val = __half22float2(__ldg(&beta_ptr[tid])); local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x; local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y; out_ptr[id] = __float22half2_rn(local_out_fp2); } template <typename T> void add_bias_act_kernelLauncher(T* out, const T* bias, int m, int n, hipStream_t stream) { // dim3 grid(m / 64); dim3 grid(m / 4); dim3 block(n / 4); assert(block.x > 1024); // dim3 block(n); hipLaunchKernelGGL(( add_bias_act<T>), dim3(grid), dim3(block), 0, stream, out, bias, m, n); } template<typename T> void add_bias_input_layernorm_kernelLauncher(T* out, const T* input, const T* bias, const T* gamma, const T* beta, int m, int n, hipStream_t stream) { assert(n > 1024); dim3 grid(m); dim3 block(n); hipLaunchKernelGGL(( add_bias_input_layernorm<T>), dim3(grid), dim3(block), 0, stream, out, input, bias, gamma, beta, m, n); } template <> void add_bias_input_layernorm_kernelLauncher(__half* out, const __half* input, const __half* bias, const __half* gamma, const __half* beta, int m, int n, hipStream_t stream) { assert(n / 2 > 1024); dim3 grid(m); dim3 block(n / 2); hipLaunchKernelGGL(( add_bias_input_layernorm<__half>), dim3(grid), dim3(block), 0, stream, out, input, bias, gamma, beta, m, n); } template void add_bias_act_kernelLauncher<float>( float* out, const float* bias, int m, int n, hipStream_t stream); template void add_bias_input_layernorm_kernelLauncher<float>( float* out, const float* input, const float* bias, const float* gamma, const float* beta, int m, int n, hipStream_t stream); template void add_bias_act_kernelLauncher<__half>( __half* out, const __half* bias, int m, int n, hipStream_t stream); template void add_bias_input_layernorm_kernelLauncher<__half>( __half* out, const __half* input, const __half* bias, const __half* gamma, const __half* beta, int m, int n, hipStream_t stream); }//namespace
08eb92b5d4c0713aa84587aaceac1a5e5f5315a0.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cuda_kernels.h" #include <assert.h> #include <cstdio> #include <cstdlib> namespace fastertransformer{ #define FINAL_MASK 0xffffffff #define CUDART_PI_F 3.141592654f template <typename T> __inline__ __device__ T gelu(T x) { float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x)))); return x * cdf; } template <> __inline__ __device__ half2 gelu(half2 val) { half2 val_pow3 = __hmul2(val, __hmul2(val, val)); float2 tmp_pow = __half22float2(val_pow3); float2 tmp = __half22float2(val); tmp.x = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.x + 0.044715f * tmp_pow.x)))); tmp.y = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.y + 0.044715f * tmp_pow.y)))); return __hmul2(val, __float22half2_rn(tmp)); } template <typename T> __inline__ __device__ T warpReduceSum(T val) { for(int mask = 16; mask > 0; mask >>= 1) val += __shfl_xor_sync(FINAL_MASK, val, mask, 32); return val; } template <typename T> __inline__ __device__ T blockReduceSum(T val) { static __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; int wid = threadIdx.x >> 5; val = warpReduceSum<T>(val); if(lane == 0) shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)0.0f; val = warpReduceSum(val); return val; } template <typename T> __global__ void add_bias_act(T* out, const T* bias, int m, int n) { T val, reg_bias; int row_id = blockIdx.x; int ite = n / blockDim.x; int tid = threadIdx.x; for(int i = 0; i < ite; ++i) { reg_bias = __ldg(&bias[i * blockDim.x + tid]); row_id = blockIdx.x; while(row_id < m){ val = out[tid + i * blockDim.x + row_id * n]+ reg_bias; out[tid + i * blockDim.x + row_id * n] = gelu<T>(val); row_id += gridDim.x; } } } template <> __global__ void add_bias_act(__half* out, const __half* bias, int m, int n) { half2 val, reg_bias; int row_id = blockIdx.x; int ite = n / blockDim.x / 2; int tid = threadIdx.x; half2* out_ptr = (half2*) out; const half2* bias_ptr = (half2*) bias; for(int i = 0; i < ite; ++i) { reg_bias = __ldg(&bias_ptr[i * blockDim.x + tid]); row_id = blockIdx.x; while(row_id < m){ val = out_ptr[tid + i * blockDim.x + row_id * n / 2]; val = __hadd2(val, reg_bias); out_ptr[tid + i * blockDim.x + row_id * n / 2] = gelu<half2>(val); row_id += gridDim.x; } } } template <typename T> __global__ void add_bias_input_layernorm(T* out, const T* input, const T* bias, const T* gamma, const T* beta, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out = 0.0f; for(int i = tid; i < n; i += blockDim.x) local_out += (float)(out[blockIdx.x * n + i] + input[blockIdx.x * n + i] + __ldg(&bias[i])); mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>((local_out - s_mean) * (local_out - s_mean)); if(threadIdx.x == 0) s_variance = variance / n + 1e-6f; __syncthreads(); for(int i = tid; i < n; i += blockDim.x) out[blockIdx.x * n + i] = (T)(((local_out - s_mean) * rsqrtf(s_variance)) * (float)(__ldg(&gamma[i])) + (float)(__ldg(&beta[i]))); } template <> __global__ void add_bias_input_layernorm(__half* out, const __half* input, const __half* bias, const __half* gamma, const __half* beta, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float2 local_out_fp2; half2* out_ptr = (half2*)out; const half2* input_ptr = (const half2*)input; const half2* bias_ptr = (const half2*)bias; const half2* gamma_ptr = (const half2*)gamma; const half2* beta_ptr = (const half2*)beta; float local_out = 0.0f; int id = blockIdx.x * n / 2 + tid; local_out_fp2 = __half22float2(__hadd2(__hadd2(out_ptr[id], input_ptr[id]), __ldg(&bias_ptr[tid]))); local_out += local_out_fp2.x; local_out += local_out_fp2.y; mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean / n; __syncthreads(); variance = (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean); variance += (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean); variance = blockReduceSum<float>(variance); if(threadIdx.x == 0) s_variance = rsqrtf(variance / n + 1e-6f); __syncthreads(); float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid])); float2 beta_val = __half22float2(__ldg(&beta_ptr[tid])); local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x; local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y; out_ptr[id] = __float22half2_rn(local_out_fp2); } template <typename T> void add_bias_act_kernelLauncher(T* out, const T* bias, int m, int n, cudaStream_t stream) { // dim3 grid(m / 64); dim3 grid(m / 4); dim3 block(n / 4); assert(block.x > 1024); // dim3 block(n); add_bias_act<T><<<grid, block, 0, stream>>>(out, bias, m, n); } template<typename T> void add_bias_input_layernorm_kernelLauncher(T* out, const T* input, const T* bias, const T* gamma, const T* beta, int m, int n, cudaStream_t stream) { assert(n > 1024); dim3 grid(m); dim3 block(n); add_bias_input_layernorm<T><<<grid, block, 0, stream>>>(out, input, bias, gamma, beta, m, n); } template <> void add_bias_input_layernorm_kernelLauncher(__half* out, const __half* input, const __half* bias, const __half* gamma, const __half* beta, int m, int n, cudaStream_t stream) { assert(n / 2 > 1024); dim3 grid(m); dim3 block(n / 2); add_bias_input_layernorm<__half><<<grid, block, 0, stream>>>(out, input, bias, gamma, beta, m, n); } template void add_bias_act_kernelLauncher<float>( float* out, const float* bias, int m, int n, cudaStream_t stream); template void add_bias_input_layernorm_kernelLauncher<float>( float* out, const float* input, const float* bias, const float* gamma, const float* beta, int m, int n, cudaStream_t stream); template void add_bias_act_kernelLauncher<__half>( __half* out, const __half* bias, int m, int n, cudaStream_t stream); template void add_bias_input_layernorm_kernelLauncher<__half>( __half* out, const __half* input, const __half* bias, const __half* gamma, const __half* beta, int m, int n, cudaStream_t stream); }//namespace
d464b59d6275028bfa04cd3f91ce88308873f1df.hip
// !!! This is a file automatically generated by hipify!!! #include "core/gpu/euler_2d.cuh" #include "core/gpu/reduce.cuh" #include "core/grid/grid.h" #include <hip/hip_runtime.h> #include <algorithm> template <class float_type, int warps_count> __global__ void euler_2d_calculate_dt_gpu_kernel ( float_type gamma, const grid_topology topology, const grid_geometry geometry, float_type *workspace, const float_type *p_rho, const float_type *p_u, const float_type *p_v, const float_type *p_p) { const unsigned int first_cell_id = blockDim.x * blockIdx.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; float_type min_len = std::numeric_limits<float_type>::max (); float_type max_speed = std::numeric_limits<float_type>::min (); for (unsigned int cell_id = first_cell_id; cell_id < topology.get_cells_count (); cell_id += stride) { const float_type rho = p_rho[cell_id]; const float_type p = p_p[cell_id]; const float_type a = speed_of_sound_in_gas (gamma, p, rho); const float_type u = p_u[cell_id]; const float_type v = p_v[cell_id]; max_speed = fmax (max_speed, fmax (fabs (u + a), fabs (u - a))); max_speed = fmax (max_speed, fmax (fabs (v + a), fabs (v - a))); for (unsigned int edge_id = 0; edge_id < topology.get_edges_count (cell_id); edge_id++) { const float_type edge_len = geometry.get_edge_area (cell_id, edge_id); if (edge_len < min_len) min_len = edge_len; } } min_len = block_reduce <float_type, reduce_operation::min, warps_count> (min_len); max_speed = block_reduce <float_type, reduce_operation::max, warps_count> (max_speed); if (threadIdx.x == 0) { atomicMin (workspace + 0, min_len); atomicMax (workspace + 1, max_speed); } } template <class float_type> float_type euler_2d_calculate_dt_gpu ( float_type gamma, float_type cfl, const grid_topology &topology, const grid_geometry &geometry, float_type *workspace, const float_type *p_rho, const float_type *p_u, const float_type *p_v, const float_type *p_p) { float_type cpu_workspace_copy[2]; float_type &min_len = cpu_workspace_copy[0]; float_type &max_speed = cpu_workspace_copy[1]; min_len = std::numeric_limits<float_type>::max (); max_speed = std::numeric_limits<float_type>::min (); hipMemcpy (workspace, cpu_workspace_copy, 2 * sizeof (float_type), hipMemcpyHostToDevice); constexpr int warps_per_block = 32; constexpr int warp_size = 32; constexpr int threads_per_block = warps_per_block * warp_size; const int blocks = std::min ((topology.get_cells_count () + threads_per_block - 1) / threads_per_block, 1024u); hipLaunchKernelGGL(( euler_2d_calculate_dt_gpu_kernel<float_type, warp_size>) , dim3(blocks), dim3(threads_per_block), 0, 0, gamma, topology, geometry, workspace, p_rho, p_u, p_v, p_p); hipMemcpy (cpu_workspace_copy, workspace, 2 * sizeof (float_type), hipMemcpyDeviceToHost); float_type new_dt = cfl * min_len / max_speed; return new_dt; } template <class float_type> __global__ void euler_2d_calculate_next_time_step_gpu_kernel ( float_type dt, float_type gamma, const grid_topology topology, const grid_geometry geometry, const float_type *p_rho, float_type *p_rho_next, const float_type *p_u, float_type *p_u_next, const float_type *p_v, float_type *p_v_next, const float_type *p_p, float_type *p_p_next) { const unsigned int cell_id = blockIdx.x * blockDim.x + threadIdx.x; if (cell_id < topology.get_cells_count ()) euler_2d_calculate_next_cell_values ( cell_id, dt, gamma, topology, geometry, p_rho, p_rho_next, p_u, p_u_next, p_v, p_v_next, p_p, p_p_next); } template <class float_type> void euler_2d_calculate_next_time_step_gpu ( float_type dt, float_type gamma, const grid_topology &topology, const grid_geometry &geometry, const float_type *p_rho, float_type *p_rho_next, const float_type *p_u, float_type *p_u_next, const float_type *p_v, float_type *p_v_next, const float_type *p_p, float_type *p_p_next) { constexpr int threads_per_block = 1024; const unsigned int blocks = (topology.get_cells_count () + threads_per_block - 1) / threads_per_block; hipLaunchKernelGGL(( euler_2d_calculate_next_time_step_gpu_kernel) , dim3(blocks), dim3(threads_per_block), 0, 0, dt, gamma, topology, geometry, p_rho, p_rho_next, p_u, p_u_next, p_v, p_v_next, p_p, p_p_next); } #define GEN_EULER_2D_INSTANCE_FOR(type) \ template type euler_2d_calculate_dt_gpu <type>( \ type gamma, type cfl, \ const grid_topology &, const grid_geometry &, \ type *workspace, const type *p_rho, \ const type *p_u, const type *p_v, const type *p_p); \ template void euler_2d_calculate_next_time_step_gpu ( \ type dt, type gamma, \ const grid_topology &, const grid_geometry &, \ const type *p_rho, type *p_rho_next, \ const type *p_u, type *p_u_next, const type *p_v, \ type *p_v_next, const type *p_p, type *p_p_next); GEN_EULER_2D_INSTANCE_FOR (float) GEN_EULER_2D_INSTANCE_FOR (double) #undef GEN_EULER_2D_INTERFACE_INSTANCE_FOR
d464b59d6275028bfa04cd3f91ce88308873f1df.cu
#include "core/gpu/euler_2d.cuh" #include "core/gpu/reduce.cuh" #include "core/grid/grid.h" #include <cuda_runtime.h> #include <algorithm> template <class float_type, int warps_count> __global__ void euler_2d_calculate_dt_gpu_kernel ( float_type gamma, const grid_topology topology, const grid_geometry geometry, float_type *workspace, const float_type *p_rho, const float_type *p_u, const float_type *p_v, const float_type *p_p) { const unsigned int first_cell_id = blockDim.x * blockIdx.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; float_type min_len = std::numeric_limits<float_type>::max (); float_type max_speed = std::numeric_limits<float_type>::min (); for (unsigned int cell_id = first_cell_id; cell_id < topology.get_cells_count (); cell_id += stride) { const float_type rho = p_rho[cell_id]; const float_type p = p_p[cell_id]; const float_type a = speed_of_sound_in_gas (gamma, p, rho); const float_type u = p_u[cell_id]; const float_type v = p_v[cell_id]; max_speed = fmax (max_speed, fmax (fabs (u + a), fabs (u - a))); max_speed = fmax (max_speed, fmax (fabs (v + a), fabs (v - a))); for (unsigned int edge_id = 0; edge_id < topology.get_edges_count (cell_id); edge_id++) { const float_type edge_len = geometry.get_edge_area (cell_id, edge_id); if (edge_len < min_len) min_len = edge_len; } } min_len = block_reduce <float_type, reduce_operation::min, warps_count> (min_len); max_speed = block_reduce <float_type, reduce_operation::max, warps_count> (max_speed); if (threadIdx.x == 0) { atomicMin (workspace + 0, min_len); atomicMax (workspace + 1, max_speed); } } template <class float_type> float_type euler_2d_calculate_dt_gpu ( float_type gamma, float_type cfl, const grid_topology &topology, const grid_geometry &geometry, float_type *workspace, const float_type *p_rho, const float_type *p_u, const float_type *p_v, const float_type *p_p) { float_type cpu_workspace_copy[2]; float_type &min_len = cpu_workspace_copy[0]; float_type &max_speed = cpu_workspace_copy[1]; min_len = std::numeric_limits<float_type>::max (); max_speed = std::numeric_limits<float_type>::min (); cudaMemcpy (workspace, cpu_workspace_copy, 2 * sizeof (float_type), cudaMemcpyHostToDevice); constexpr int warps_per_block = 32; constexpr int warp_size = 32; constexpr int threads_per_block = warps_per_block * warp_size; const int blocks = std::min ((topology.get_cells_count () + threads_per_block - 1) / threads_per_block, 1024u); euler_2d_calculate_dt_gpu_kernel<float_type, warp_size> <<<blocks, threads_per_block>>> ( gamma, topology, geometry, workspace, p_rho, p_u, p_v, p_p); cudaMemcpy (cpu_workspace_copy, workspace, 2 * sizeof (float_type), cudaMemcpyDeviceToHost); float_type new_dt = cfl * min_len / max_speed; return new_dt; } template <class float_type> __global__ void euler_2d_calculate_next_time_step_gpu_kernel ( float_type dt, float_type gamma, const grid_topology topology, const grid_geometry geometry, const float_type *p_rho, float_type *p_rho_next, const float_type *p_u, float_type *p_u_next, const float_type *p_v, float_type *p_v_next, const float_type *p_p, float_type *p_p_next) { const unsigned int cell_id = blockIdx.x * blockDim.x + threadIdx.x; if (cell_id < topology.get_cells_count ()) euler_2d_calculate_next_cell_values ( cell_id, dt, gamma, topology, geometry, p_rho, p_rho_next, p_u, p_u_next, p_v, p_v_next, p_p, p_p_next); } template <class float_type> void euler_2d_calculate_next_time_step_gpu ( float_type dt, float_type gamma, const grid_topology &topology, const grid_geometry &geometry, const float_type *p_rho, float_type *p_rho_next, const float_type *p_u, float_type *p_u_next, const float_type *p_v, float_type *p_v_next, const float_type *p_p, float_type *p_p_next) { constexpr int threads_per_block = 1024; const unsigned int blocks = (topology.get_cells_count () + threads_per_block - 1) / threads_per_block; euler_2d_calculate_next_time_step_gpu_kernel <<<blocks, threads_per_block>>> ( dt, gamma, topology, geometry, p_rho, p_rho_next, p_u, p_u_next, p_v, p_v_next, p_p, p_p_next); } #define GEN_EULER_2D_INSTANCE_FOR(type) \ template type euler_2d_calculate_dt_gpu <type>( \ type gamma, type cfl, \ const grid_topology &, const grid_geometry &, \ type *workspace, const type *p_rho, \ const type *p_u, const type *p_v, const type *p_p); \ template void euler_2d_calculate_next_time_step_gpu ( \ type dt, type gamma, \ const grid_topology &, const grid_geometry &, \ const type *p_rho, type *p_rho_next, \ const type *p_u, type *p_u_next, const type *p_v, \ type *p_v_next, const type *p_p, type *p_p_next); GEN_EULER_2D_INSTANCE_FOR (float) GEN_EULER_2D_INSTANCE_FOR (double) #undef GEN_EULER_2D_INTERFACE_INSTANCE_FOR
b07275d64c0564006dc440b3ec917c6b87fc15f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <math.h> #include <iostream> #include <chrono> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1 << 20; // 1048576 elements float *x, *y; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N * sizeof(float)); hipMallocManaged(&y, N * sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } auto start = std::chrono::steady_clock::now(); // Run kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( add) , dim3(numBlocks), dim3(blockSize) , 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "Max error: " << maxError << std::endl; auto end = std::chrono::steady_clock::now(); std::cout << "Elapsed time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
b07275d64c0564006dc440b3ec917c6b87fc15f0.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <math.h> #include <iostream> #include <chrono> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1 << 20; // 1048576 elements float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N * sizeof(float)); cudaMallocManaged(&y, N * sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } auto start = std::chrono::steady_clock::now(); // Run kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add <<< numBlocks, blockSize >>> (N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "Max error: " << maxError << std::endl; auto end = std::chrono::steady_clock::now(); std::cout << "Elapsed time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
32cfd75917d26b10fb73a1e688dd6568b689974d.hip
// !!! This is a file automatically generated by hipify!!! /** * @file electric_density_map_cuda_kernel.cu * @author Yibo Lin * @date Aug 2018 */ #include <float.h> #include <cstdint> #include <math.h> #include <stdio.h> #include "hip/hip_runtime.h" #include "utility/src/utils.cuh" // local dependency #include "electric_potential/src/density_function.h" DREAMPLACE_BEGIN_NAMESPACE /// define triangle_density_function template <typename T> inline __device__ DEFINE_TRIANGLE_DENSITY_FUNCTION(T); /// define exact_density_function template <typename T> inline __device__ DEFINE_EXACT_DENSITY_FUNCTION(T); template <typename T, typename AtomicOp> __global__ void __launch_bounds__(1024, 8) computeTriangleDensityMap( const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const T xl, const T yl, const T xh, const T yh, const T half_bin_size_x, const T half_bin_size_y, const T bin_size_x, const T bin_size_y, const T inv_bin_size_x, const T inv_bin_size_y, AtomicOp atomic_add_op, typename AtomicOp::type *density_map_tensor, const int *sorted_node_map ///< can be NULL if not sorted ) { int index = blockIdx.x * blockDim.z + threadIdx.z; if (index < num_nodes) { int i = (sorted_node_map) ? sorted_node_map[index] : index; // use stretched node size T node_size_x = node_size_x_clamped_tensor[i]; T node_size_y = node_size_y_clamped_tensor[i]; T node_x = x_tensor[i] + offset_x_tensor[i]; T node_y = y_tensor[i] + offset_y_tensor[i]; T ratio = ratio_tensor[i]; int bin_index_xl = int((node_x - xl) * inv_bin_size_x); int bin_index_xh = int(((node_x + node_size_x - xl) * inv_bin_size_x)) + 1; // exclusive bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x); int bin_index_yl = int((node_y - yl) * inv_bin_size_y); int bin_index_yh = int(((node_y + node_size_y - yl) * inv_bin_size_y)) + 1; // exclusive bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y); // update density potential map for (int k = bin_index_xl + threadIdx.y; k < bin_index_xh; k += blockDim.y) { T px = triangle_density_function(node_x, node_size_x, xl, k, bin_size_x); T px_by_ratio = px * ratio; for (int h = bin_index_yl + threadIdx.x; h < bin_index_yh; h += blockDim.x) { T py = triangle_density_function(node_y, node_size_y, yl, h, bin_size_y); T area = px_by_ratio * py; atomic_add_op(&density_map_tensor[k * num_bins_y + h], area); } } } } /// @brief An unrolled way to compute the density map. /// Currently it is not as efficient as computeTriangleDensityMap, /// it has the potential to be better. /// It is not used for now. template <typename T> __global__ void computeTriangleDensityMapUnroll( const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const T xl, const T yl, const T xh, const T yh, const T half_bin_size_x, const T half_bin_size_y, const T bin_size_x, const T bin_size_y, const T inv_bin_size_x, const T inv_bin_size_y, T *density_map_tensor, const int *sorted_node_map ///< can be NULL if not sorted ) { int index = blockIdx.x * blockDim.y + threadIdx.y; if (index < num_nodes) { int i = (sorted_node_map) ? sorted_node_map[index] : index; T node_size_x = node_size_x_clamped_tensor[i]; T node_size_y = node_size_y_clamped_tensor[i]; T node_x = x_tensor[i] + offset_x_tensor[i]; T node_y = y_tensor[i] + offset_y_tensor[i]; T ratio = ratio_tensor[i]; int bin_index_xl = int((node_x - xl) * inv_bin_size_x); int bin_index_xh = int(((node_x + node_size_x - xl) * inv_bin_size_x)); // inclusive bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x - 1); int bin_index_yl = int((node_y - yl) * inv_bin_size_y); int bin_index_yh = int(((node_y + node_size_y - yl) * inv_bin_size_y)); // inclusive bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y - 1); // update density potential map int k, h; int cond = ((bin_index_xl == bin_index_xh) << 1) | (bin_index_yl == bin_index_yh); switch (cond) { case 0: { T px_c = bin_size_x; T py_l = yl + bin_index_yl * bin_size_y + bin_size_y - node_y; T py_c = bin_size_y; T py_h = node_y + node_size_y - (bin_index_yh * bin_size_y + yl); T area_xc_yl = px_c * py_l * ratio; T area_xc_yc = px_c * py_c * ratio; T area_xc_yh = px_c * py_h * ratio; k = bin_index_xl; if (threadIdx.x == 0) { T px_l = xl + bin_index_xl * bin_size_x + bin_size_x - node_x; T area_xl_yl = px_l * py_l * ratio; T area_xl_yc = px_l * py_c * ratio; T area_xl_yh = px_l * py_h * ratio; h = bin_index_yl; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xl_yl); for (++h; h < bin_index_yh; ++h) { atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xl_yc); } atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xl_yh); k += blockDim.x; } for (k += threadIdx.x; k < bin_index_xh; k += blockDim.x) { h = bin_index_yl; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xc_yl); for (++h; h < bin_index_yh; ++h) { atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xc_yc); } atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xc_yh); } if (k == bin_index_xh) { T px_h = node_x + node_size_x - (bin_index_xh * bin_size_x + xl); T area_xh_yl = px_h * py_l * ratio; T area_xh_yc = px_h * py_c * ratio; T area_xh_yh = px_h * py_h * ratio; h = bin_index_yl; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xh_yl); for (++h; h < bin_index_yh; ++h) { atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xh_yc); } atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xh_yh); } return; } case 1: { T py = yl + bin_index_yl * bin_size_y + bin_size_y - node_y; h = bin_index_yl; k = bin_index_xl; if (threadIdx.x == 0) { T px_l = xl + bin_index_xl * bin_size_x + bin_size_x - node_x; T area_xl = px_l * py * ratio; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xl); k += blockDim.x; } T px_c = bin_size_x; T area_xc = px_c * py * ratio; for (k += threadIdx.x; k < bin_index_xh; k += blockDim.x) { atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xc); } if (k == bin_index_xh) { T px_h = node_x + node_size_x - (bin_index_xh * bin_size_x + xl); T area_xh = px_h * py * ratio; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xh); } return; } case 2: { T px = xl + bin_index_xl * bin_size_x + bin_size_x - node_x; k = bin_index_xl; h = bin_index_yl; if (threadIdx.x == 0) { T py_l = yl + bin_index_yl * bin_size_y + bin_size_y - node_y; T area_yl = px * py_l * ratio; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_yl); h += blockDim.x; } T py_c = bin_size_y; T area_yc = px * py_c * ratio; for (h += threadIdx.x; h < bin_index_yh; h += blockDim.x) { atomicAdd(&density_map_tensor[k * num_bins_y + h], area_yc); } if (h == bin_index_yh) { T py_h = node_y + node_size_y - (bin_index_yh * bin_size_y + yl); T area_yh = px * py_h * ratio; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_yh); } return; } case 3: { if (threadIdx.x == 0) { T px = xl + bin_index_xl * bin_size_x + bin_size_x - node_x; T py = yl + bin_index_yl * bin_size_y + bin_size_y - node_y; T area = px * py * ratio; k = bin_index_xl; h = bin_index_yl; atomicAdd(&density_map_tensor[k * num_bins_y + h], area); } return; } default: assert(0); } } } template <typename T, typename AtomicOp> __global__ void computeTriangleDensityMapSimpleLikeCPU( const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, // T* density_map_tensor AtomicOp atomic_add_op, typename AtomicOp::type *density_map_tensor) { // density_map_tensor should be initialized outside T inv_bin_size_x = 1.0 / bin_size_x; T inv_bin_size_y = 1.0 / bin_size_y; // int num_bins = num_bins_x * num_bins_y; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < num_nodes) { // use stretched node size T node_size_x = node_size_x_clamped_tensor[i]; T node_size_y = node_size_y_clamped_tensor[i]; T node_x = x_tensor[i] + offset_x_tensor[i]; T node_y = y_tensor[i] + offset_y_tensor[i]; T ratio = ratio_tensor[i]; int bin_index_xl = int((node_x - xl) * inv_bin_size_x); int bin_index_xh = int(((node_x + node_size_x - xl) * inv_bin_size_x)) + 1; // exclusive bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x); int bin_index_yl = int((node_y - yl) * inv_bin_size_y); int bin_index_yh = int(((node_y + node_size_y - yl) * inv_bin_size_y)) + 1; // exclusive bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y); // update density potential map for (int k = bin_index_xl; k < bin_index_xh; ++k) { T px = triangle_density_function(node_x, node_size_x, xl, k, bin_size_x); T px_by_ratio = px * ratio; for (int h = bin_index_yl; h < bin_index_yh; ++h) { T py = triangle_density_function(node_y, node_size_y, yl, h, bin_size_y); T area = px_by_ratio * py; atomic_add_op(&density_map_tensor[k * num_bins_y + h], area); } } } } template <typename T, typename AtomicOp> __global__ void computeExactDensityMap( const T *x_tensor, const T *y_tensor, const T *node_size_x_tensor, const T *node_size_y_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, const int num_impacted_bins_x, const int num_impacted_bins_y, bool fixed_node_flag, AtomicOp atomic_add_op, typename AtomicOp::type *density_map_tensor) { int64_t i = blockIdx.x * blockDim.x + threadIdx.x; int64_t bound = int64_t(num_nodes) * num_impacted_bins_x * num_impacted_bins_y; // rank-one update density map if (i < bound) { int node_id = i / (num_impacted_bins_x * num_impacted_bins_y); int residual_index = i - node_id * num_impacted_bins_x * num_impacted_bins_y; T bxl = x_tensor[node_id]; T byl = y_tensor[node_id]; T bxh = bxl + node_size_x_tensor[node_id]; T byh = byl + node_size_y_tensor[node_id]; // x direction int bin_index_xl = int((bxl - xl) / bin_size_x); bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); int k = bin_index_xl + int(residual_index / num_impacted_bins_y); if (k + 1 > num_bins_x) { return; } // y direction int bin_index_yl = int((byl - yl) / bin_size_y); bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); int h = bin_index_yl + (residual_index % num_impacted_bins_y); if (h + 1 > num_bins_y) { return; } T px = exact_density_function(bxl, bxh - bxl, bin_center_x_tensor[k], bin_size_x, xl, xh, fixed_node_flag); T py = exact_density_function(byl, byh - byl, bin_center_y_tensor[h], bin_size_y, yl, yh, fixed_node_flag); // still area atomic_add_op(&density_map_tensor[k * num_bins_y + h], px * py); } } /// @brief Compute exact density map using cell-by-cell parallelization strategy template <typename T, typename AtomicOp> __global__ void computeExactDensityMapCellByCell( const T *x_tensor, const T *y_tensor, const T *node_size_x_tensor, const T *node_size_y_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, const int num_impacted_bins_x, const int num_impacted_bins_y, bool fixed_node_flag, AtomicOp atomic_add_op, typename AtomicOp::type *density_map_tensor) { auto box2bin = [&](T bxl, T byl, T bxh, T byh) { // x direction int bin_index_xl = int((bxl - xl) / bin_size_x); int bin_index_xh = int(ceil((bxh - xl) / bin_size_x)) + 1; // exclusive bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x); // y direction int bin_index_yl = int((byl - yl) / bin_size_y); int bin_index_yh = int(ceil((byh - yl) / bin_size_y)) + 1; // exclusive bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y); for (int k = bin_index_xl; k < bin_index_xh; ++k) { T px = exact_density_function(bxl, bxh - bxl, bin_center_x_tensor[k], bin_size_x, xl, xh, fixed_node_flag); for (int h = bin_index_yl; h < bin_index_yh; ++h) { T py = exact_density_function(byl, byh - byl, bin_center_y_tensor[h], bin_size_y, yl, yh, fixed_node_flag); // still area atomic_add_op(&density_map_tensor[k * num_bins_y + h], px * py); } } }; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < num_nodes) { T bxl = x_tensor[i]; T byl = y_tensor[i]; T bxh = bxl + node_size_x_tensor[i]; T byh = byl + node_size_y_tensor[i]; box2bin(bxl, byl, bxh, byh); } } template <typename T, typename AtomicOp> int computeTriangleDensityMapCallKernel( const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, int num_nodes, const int num_bins_x, const int num_bins_y, int num_impacted_bins_x, int num_impacted_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, AtomicOp atomic_add_op, typename AtomicOp::type *density_map_tensor, const int *sorted_node_map) { int thread_count = 64; dim3 blockSize(2, 2, thread_count); int block_count = (num_nodes - 1 + thread_count) / thread_count; hipLaunchKernelGGL(( computeTriangleDensityMap), dim3(block_count), dim3(blockSize), 0, 0, x_tensor, y_tensor, node_size_x_clamped_tensor, node_size_y_clamped_tensor, offset_x_tensor, offset_y_tensor, ratio_tensor, bin_center_x_tensor, bin_center_y_tensor, num_nodes, num_bins_x, num_bins_y, xl, yl, xh, yh, bin_size_x / 2, bin_size_y / 2, bin_size_x, bin_size_y, 1 / bin_size_x, 1 / bin_size_y, atomic_add_op, density_map_tensor, sorted_node_map); // computeTriangleDensityMapSimpleLikeCPU<<<block_count, thread_count>>>( // x_tensor, y_tensor, // node_size_x_clamped_tensor, node_size_y_clamped_tensor, // offset_x_tensor, offset_y_tensor, // ratio_tensor, // bin_center_x_tensor, bin_center_y_tensor, // num_nodes, // num_bins_x, num_bins_y, // xl, yl, xh, yh, // bin_size_x, bin_size_y, // atomic_add_op, // density_map_tensor // ); return 0; } template <typename T> int computeTriangleDensityMapCudaLauncher( const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, int num_nodes, const int num_bins_x, const int num_bins_y, int num_impacted_bins_x, int num_impacted_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, bool deterministic_flag, T *density_map_tensor, const int *sorted_node_map) { if (deterministic_flag) // deterministic implementation using unsigned long // as fixed point number { // total die area double diearea = (xh - xl) * (yh - yl); int integer_bits = max((int)ceil(log2(diearea)) + 1, 32); int fraction_bits = max(64 - integer_bits, 0); unsigned long long int scale_factor = (1UL << fraction_bits); int num_bins = num_bins_x * num_bins_y; unsigned long long int *scaled_density_map_tensor = NULL; allocateCUDA(scaled_density_map_tensor, num_bins, unsigned long long int); AtomicAddCUDA<unsigned long long int> atomic_add_op(scale_factor); int thread_count = 512; hipLaunchKernelGGL(( copyScaleArray), dim3((num_bins + thread_count - 1) / thread_count), dim3(thread_count), 0, 0, scaled_density_map_tensor, density_map_tensor, scale_factor, num_bins); computeTriangleDensityMapCallKernel<T, decltype(atomic_add_op)>( x_tensor, y_tensor, node_size_x_clamped_tensor, node_size_y_clamped_tensor, offset_x_tensor, offset_y_tensor, ratio_tensor, bin_center_x_tensor, bin_center_y_tensor, num_nodes, num_bins_x, num_bins_y, num_impacted_bins_x, num_impacted_bins_y, xl, yl, xh, yh, bin_size_x, bin_size_y, atomic_add_op, scaled_density_map_tensor, sorted_node_map); hipLaunchKernelGGL(( copyScaleArray), dim3((num_bins + thread_count - 1) / thread_count), dim3(thread_count), 0, 0, density_map_tensor, scaled_density_map_tensor, T(1.0 / scale_factor), num_bins); destroyCUDA(scaled_density_map_tensor); } else { AtomicAddCUDA<T> atomic_add_op; computeTriangleDensityMapCallKernel<T, decltype(atomic_add_op)>( x_tensor, y_tensor, node_size_x_clamped_tensor, node_size_y_clamped_tensor, offset_x_tensor, offset_y_tensor, ratio_tensor, bin_center_x_tensor, bin_center_y_tensor, num_nodes, num_bins_x, num_bins_y, num_impacted_bins_x, num_impacted_bins_y, xl, yl, xh, yh, bin_size_x, bin_size_y, atomic_add_op, density_map_tensor, sorted_node_map); } return 0; } template <typename T, typename AtomicOp> int computeExactDensityMapCallKernel( const T *x_tensor, const T *y_tensor, const T *node_size_x_tensor, const T *node_size_y_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const int num_impacted_bins_x, const int num_impacted_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, bool fixed_node_flag, AtomicOp atomic_add_op, typename AtomicOp::type *density_map_tensor) { int thread_count = 512; //int block_count = (num_nodes * num_impacted_bins_x * num_impacted_bins_y - 1 + thread_count) / thread_count; // dreamplaceAssert(block_count >= 0); // avoid overflow int block_count = (num_nodes - 1 + thread_count) / thread_count; hipLaunchKernelGGL(( computeExactDensityMapCellByCell), dim3(block_count), dim3(thread_count), 0, 0, x_tensor, y_tensor, node_size_x_tensor, node_size_y_tensor, bin_center_x_tensor, bin_center_y_tensor, num_nodes, num_bins_x, num_bins_y, xl, yl, xh, yh, bin_size_x, bin_size_y, num_impacted_bins_x, num_impacted_bins_y, fixed_node_flag, atomic_add_op, density_map_tensor); return 0; } template <typename T> int computeExactDensityMapCudaLauncher( const T *x_tensor, const T *y_tensor, const T *node_size_x_tensor, const T *node_size_y_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const int num_impacted_bins_x, const int num_impacted_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, bool fixed_node_flag, bool deterministic_flag, T *density_map_tensor) { if (deterministic_flag) // deterministic implementation using unsigned long // as fixed point number { // total die area double diearea = (xh - xl) * (yh - yl); int integer_bits = max((int)ceil(log2(diearea)) + 1, 32); int fraction_bits = max(64 - integer_bits, 0); unsigned long long int scale_factor = (1UL << fraction_bits); // Yibo: usually exact is only invoked once, so I put the message here // If it prints too many message, comment it out dreamplacePrint(kDEBUG, "deterministic mode: integer %d bits, fraction %d bits, " "scale factor %llu\n", integer_bits, fraction_bits, scale_factor); int num_bins = num_bins_x * num_bins_y; unsigned long long int *scaled_density_map_tensor = NULL; allocateCUDA(scaled_density_map_tensor, num_bins, unsigned long long int); AtomicAddCUDA<unsigned long long int> atomic_add_op(scale_factor); int thread_count = 512; hipLaunchKernelGGL(( copyScaleArray), dim3((num_bins + thread_count - 1) / thread_count), dim3(thread_count), 0, 0, scaled_density_map_tensor, density_map_tensor, scale_factor, num_bins); computeExactDensityMapCallKernel<T, decltype(atomic_add_op)>( x_tensor, y_tensor, node_size_x_tensor, node_size_y_tensor, bin_center_x_tensor, bin_center_y_tensor, num_nodes, num_bins_x, num_bins_y, num_impacted_bins_x, num_impacted_bins_y, xl, yl, xh, yh, bin_size_x, bin_size_y, fixed_node_flag, atomic_add_op, scaled_density_map_tensor); hipLaunchKernelGGL(( copyScaleArray), dim3((num_bins + thread_count - 1) / thread_count), dim3(thread_count), 0, 0, density_map_tensor, scaled_density_map_tensor, T(1.0 / scale_factor), num_bins); destroyCUDA(scaled_density_map_tensor); } else { AtomicAddCUDA<T> atomic_add_op; computeExactDensityMapCallKernel<T, decltype(atomic_add_op)>( x_tensor, y_tensor, node_size_x_tensor, node_size_y_tensor, bin_center_x_tensor, bin_center_y_tensor, num_nodes, num_bins_x, num_bins_y, num_impacted_bins_x, num_impacted_bins_y, xl, yl, xh, yh, bin_size_x, bin_size_y, fixed_node_flag, atomic_add_op, density_map_tensor); } return 0; } #define REGISTER_KERNEL_LAUNCHER(T) \ template int computeTriangleDensityMapCudaLauncher<T>( \ const T *x_tensor, const T *y_tensor, \ const T *node_size_x_clamped_tensor, \ const T *node_size_y_clamped_tensor, const T *offset_x_tensor, \ const T *offset_y_tensor, const T *ratio_tensor, \ const T *bin_center_x_tensor, const T *bin_center_y_tensor, \ const int num_nodes, const int num_bins_x, const int num_bins_y, \ const int num_impacted_bins_x, const int num_impacted_bins_y, \ const T xl, const T yl, const T xh, const T yh, const T bin_size_x, \ const T bin_size_y, bool deterministic_flag, T *density_map_tensor, \ const int *sorted_node_map); \ \ template int computeExactDensityMapCudaLauncher<T>( \ const T *x_tensor, const T *y_tensor, const T *node_size_x_tensor, \ const T *node_size_y_tensor, const T *bin_center_x_tensor, \ const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, \ const int num_bins_y, const int num_impacted_bins_x, \ const int num_impacted_bins_y, const T xl, const T yl, const T xh, \ const T yh, const T bin_size_x, const T bin_size_y, \ bool fixed_node_flag, bool deterministic_flag, T *density_map_tensor); REGISTER_KERNEL_LAUNCHER(float); REGISTER_KERNEL_LAUNCHER(double); DREAMPLACE_END_NAMESPACE
32cfd75917d26b10fb73a1e688dd6568b689974d.cu
/** * @file electric_density_map_cuda_kernel.cu * @author Yibo Lin * @date Aug 2018 */ #include <float.h> #include <cstdint> #include <math.h> #include <stdio.h> #include "cuda_runtime.h" #include "utility/src/utils.cuh" // local dependency #include "electric_potential/src/density_function.h" DREAMPLACE_BEGIN_NAMESPACE /// define triangle_density_function template <typename T> inline __device__ DEFINE_TRIANGLE_DENSITY_FUNCTION(T); /// define exact_density_function template <typename T> inline __device__ DEFINE_EXACT_DENSITY_FUNCTION(T); template <typename T, typename AtomicOp> __global__ void __launch_bounds__(1024, 8) computeTriangleDensityMap( const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const T xl, const T yl, const T xh, const T yh, const T half_bin_size_x, const T half_bin_size_y, const T bin_size_x, const T bin_size_y, const T inv_bin_size_x, const T inv_bin_size_y, AtomicOp atomic_add_op, typename AtomicOp::type *density_map_tensor, const int *sorted_node_map ///< can be NULL if not sorted ) { int index = blockIdx.x * blockDim.z + threadIdx.z; if (index < num_nodes) { int i = (sorted_node_map) ? sorted_node_map[index] : index; // use stretched node size T node_size_x = node_size_x_clamped_tensor[i]; T node_size_y = node_size_y_clamped_tensor[i]; T node_x = x_tensor[i] + offset_x_tensor[i]; T node_y = y_tensor[i] + offset_y_tensor[i]; T ratio = ratio_tensor[i]; int bin_index_xl = int((node_x - xl) * inv_bin_size_x); int bin_index_xh = int(((node_x + node_size_x - xl) * inv_bin_size_x)) + 1; // exclusive bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x); int bin_index_yl = int((node_y - yl) * inv_bin_size_y); int bin_index_yh = int(((node_y + node_size_y - yl) * inv_bin_size_y)) + 1; // exclusive bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y); // update density potential map for (int k = bin_index_xl + threadIdx.y; k < bin_index_xh; k += blockDim.y) { T px = triangle_density_function(node_x, node_size_x, xl, k, bin_size_x); T px_by_ratio = px * ratio; for (int h = bin_index_yl + threadIdx.x; h < bin_index_yh; h += blockDim.x) { T py = triangle_density_function(node_y, node_size_y, yl, h, bin_size_y); T area = px_by_ratio * py; atomic_add_op(&density_map_tensor[k * num_bins_y + h], area); } } } } /// @brief An unrolled way to compute the density map. /// Currently it is not as efficient as computeTriangleDensityMap, /// it has the potential to be better. /// It is not used for now. template <typename T> __global__ void computeTriangleDensityMapUnroll( const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const T xl, const T yl, const T xh, const T yh, const T half_bin_size_x, const T half_bin_size_y, const T bin_size_x, const T bin_size_y, const T inv_bin_size_x, const T inv_bin_size_y, T *density_map_tensor, const int *sorted_node_map ///< can be NULL if not sorted ) { int index = blockIdx.x * blockDim.y + threadIdx.y; if (index < num_nodes) { int i = (sorted_node_map) ? sorted_node_map[index] : index; T node_size_x = node_size_x_clamped_tensor[i]; T node_size_y = node_size_y_clamped_tensor[i]; T node_x = x_tensor[i] + offset_x_tensor[i]; T node_y = y_tensor[i] + offset_y_tensor[i]; T ratio = ratio_tensor[i]; int bin_index_xl = int((node_x - xl) * inv_bin_size_x); int bin_index_xh = int(((node_x + node_size_x - xl) * inv_bin_size_x)); // inclusive bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x - 1); int bin_index_yl = int((node_y - yl) * inv_bin_size_y); int bin_index_yh = int(((node_y + node_size_y - yl) * inv_bin_size_y)); // inclusive bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y - 1); // update density potential map int k, h; int cond = ((bin_index_xl == bin_index_xh) << 1) | (bin_index_yl == bin_index_yh); switch (cond) { case 0: { T px_c = bin_size_x; T py_l = yl + bin_index_yl * bin_size_y + bin_size_y - node_y; T py_c = bin_size_y; T py_h = node_y + node_size_y - (bin_index_yh * bin_size_y + yl); T area_xc_yl = px_c * py_l * ratio; T area_xc_yc = px_c * py_c * ratio; T area_xc_yh = px_c * py_h * ratio; k = bin_index_xl; if (threadIdx.x == 0) { T px_l = xl + bin_index_xl * bin_size_x + bin_size_x - node_x; T area_xl_yl = px_l * py_l * ratio; T area_xl_yc = px_l * py_c * ratio; T area_xl_yh = px_l * py_h * ratio; h = bin_index_yl; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xl_yl); for (++h; h < bin_index_yh; ++h) { atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xl_yc); } atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xl_yh); k += blockDim.x; } for (k += threadIdx.x; k < bin_index_xh; k += blockDim.x) { h = bin_index_yl; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xc_yl); for (++h; h < bin_index_yh; ++h) { atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xc_yc); } atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xc_yh); } if (k == bin_index_xh) { T px_h = node_x + node_size_x - (bin_index_xh * bin_size_x + xl); T area_xh_yl = px_h * py_l * ratio; T area_xh_yc = px_h * py_c * ratio; T area_xh_yh = px_h * py_h * ratio; h = bin_index_yl; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xh_yl); for (++h; h < bin_index_yh; ++h) { atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xh_yc); } atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xh_yh); } return; } case 1: { T py = yl + bin_index_yl * bin_size_y + bin_size_y - node_y; h = bin_index_yl; k = bin_index_xl; if (threadIdx.x == 0) { T px_l = xl + bin_index_xl * bin_size_x + bin_size_x - node_x; T area_xl = px_l * py * ratio; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xl); k += blockDim.x; } T px_c = bin_size_x; T area_xc = px_c * py * ratio; for (k += threadIdx.x; k < bin_index_xh; k += blockDim.x) { atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xc); } if (k == bin_index_xh) { T px_h = node_x + node_size_x - (bin_index_xh * bin_size_x + xl); T area_xh = px_h * py * ratio; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_xh); } return; } case 2: { T px = xl + bin_index_xl * bin_size_x + bin_size_x - node_x; k = bin_index_xl; h = bin_index_yl; if (threadIdx.x == 0) { T py_l = yl + bin_index_yl * bin_size_y + bin_size_y - node_y; T area_yl = px * py_l * ratio; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_yl); h += blockDim.x; } T py_c = bin_size_y; T area_yc = px * py_c * ratio; for (h += threadIdx.x; h < bin_index_yh; h += blockDim.x) { atomicAdd(&density_map_tensor[k * num_bins_y + h], area_yc); } if (h == bin_index_yh) { T py_h = node_y + node_size_y - (bin_index_yh * bin_size_y + yl); T area_yh = px * py_h * ratio; atomicAdd(&density_map_tensor[k * num_bins_y + h], area_yh); } return; } case 3: { if (threadIdx.x == 0) { T px = xl + bin_index_xl * bin_size_x + bin_size_x - node_x; T py = yl + bin_index_yl * bin_size_y + bin_size_y - node_y; T area = px * py * ratio; k = bin_index_xl; h = bin_index_yl; atomicAdd(&density_map_tensor[k * num_bins_y + h], area); } return; } default: assert(0); } } } template <typename T, typename AtomicOp> __global__ void computeTriangleDensityMapSimpleLikeCPU( const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, // T* density_map_tensor AtomicOp atomic_add_op, typename AtomicOp::type *density_map_tensor) { // density_map_tensor should be initialized outside T inv_bin_size_x = 1.0 / bin_size_x; T inv_bin_size_y = 1.0 / bin_size_y; // int num_bins = num_bins_x * num_bins_y; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < num_nodes) { // use stretched node size T node_size_x = node_size_x_clamped_tensor[i]; T node_size_y = node_size_y_clamped_tensor[i]; T node_x = x_tensor[i] + offset_x_tensor[i]; T node_y = y_tensor[i] + offset_y_tensor[i]; T ratio = ratio_tensor[i]; int bin_index_xl = int((node_x - xl) * inv_bin_size_x); int bin_index_xh = int(((node_x + node_size_x - xl) * inv_bin_size_x)) + 1; // exclusive bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x); int bin_index_yl = int((node_y - yl) * inv_bin_size_y); int bin_index_yh = int(((node_y + node_size_y - yl) * inv_bin_size_y)) + 1; // exclusive bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y); // update density potential map for (int k = bin_index_xl; k < bin_index_xh; ++k) { T px = triangle_density_function(node_x, node_size_x, xl, k, bin_size_x); T px_by_ratio = px * ratio; for (int h = bin_index_yl; h < bin_index_yh; ++h) { T py = triangle_density_function(node_y, node_size_y, yl, h, bin_size_y); T area = px_by_ratio * py; atomic_add_op(&density_map_tensor[k * num_bins_y + h], area); } } } } template <typename T, typename AtomicOp> __global__ void computeExactDensityMap( const T *x_tensor, const T *y_tensor, const T *node_size_x_tensor, const T *node_size_y_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, const int num_impacted_bins_x, const int num_impacted_bins_y, bool fixed_node_flag, AtomicOp atomic_add_op, typename AtomicOp::type *density_map_tensor) { int64_t i = blockIdx.x * blockDim.x + threadIdx.x; int64_t bound = int64_t(num_nodes) * num_impacted_bins_x * num_impacted_bins_y; // rank-one update density map if (i < bound) { int node_id = i / (num_impacted_bins_x * num_impacted_bins_y); int residual_index = i - node_id * num_impacted_bins_x * num_impacted_bins_y; T bxl = x_tensor[node_id]; T byl = y_tensor[node_id]; T bxh = bxl + node_size_x_tensor[node_id]; T byh = byl + node_size_y_tensor[node_id]; // x direction int bin_index_xl = int((bxl - xl) / bin_size_x); bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); int k = bin_index_xl + int(residual_index / num_impacted_bins_y); if (k + 1 > num_bins_x) { return; } // y direction int bin_index_yl = int((byl - yl) / bin_size_y); bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); int h = bin_index_yl + (residual_index % num_impacted_bins_y); if (h + 1 > num_bins_y) { return; } T px = exact_density_function(bxl, bxh - bxl, bin_center_x_tensor[k], bin_size_x, xl, xh, fixed_node_flag); T py = exact_density_function(byl, byh - byl, bin_center_y_tensor[h], bin_size_y, yl, yh, fixed_node_flag); // still area atomic_add_op(&density_map_tensor[k * num_bins_y + h], px * py); } } /// @brief Compute exact density map using cell-by-cell parallelization strategy template <typename T, typename AtomicOp> __global__ void computeExactDensityMapCellByCell( const T *x_tensor, const T *y_tensor, const T *node_size_x_tensor, const T *node_size_y_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, const int num_impacted_bins_x, const int num_impacted_bins_y, bool fixed_node_flag, AtomicOp atomic_add_op, typename AtomicOp::type *density_map_tensor) { auto box2bin = [&](T bxl, T byl, T bxh, T byh) { // x direction int bin_index_xl = int((bxl - xl) / bin_size_x); int bin_index_xh = int(ceil((bxh - xl) / bin_size_x)) + 1; // exclusive bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x); // y direction int bin_index_yl = int((byl - yl) / bin_size_y); int bin_index_yh = int(ceil((byh - yl) / bin_size_y)) + 1; // exclusive bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y); for (int k = bin_index_xl; k < bin_index_xh; ++k) { T px = exact_density_function(bxl, bxh - bxl, bin_center_x_tensor[k], bin_size_x, xl, xh, fixed_node_flag); for (int h = bin_index_yl; h < bin_index_yh; ++h) { T py = exact_density_function(byl, byh - byl, bin_center_y_tensor[h], bin_size_y, yl, yh, fixed_node_flag); // still area atomic_add_op(&density_map_tensor[k * num_bins_y + h], px * py); } } }; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < num_nodes) { T bxl = x_tensor[i]; T byl = y_tensor[i]; T bxh = bxl + node_size_x_tensor[i]; T byh = byl + node_size_y_tensor[i]; box2bin(bxl, byl, bxh, byh); } } template <typename T, typename AtomicOp> int computeTriangleDensityMapCallKernel( const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, int num_nodes, const int num_bins_x, const int num_bins_y, int num_impacted_bins_x, int num_impacted_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, AtomicOp atomic_add_op, typename AtomicOp::type *density_map_tensor, const int *sorted_node_map) { int thread_count = 64; dim3 blockSize(2, 2, thread_count); int block_count = (num_nodes - 1 + thread_count) / thread_count; computeTriangleDensityMap<<<block_count, blockSize>>>( x_tensor, y_tensor, node_size_x_clamped_tensor, node_size_y_clamped_tensor, offset_x_tensor, offset_y_tensor, ratio_tensor, bin_center_x_tensor, bin_center_y_tensor, num_nodes, num_bins_x, num_bins_y, xl, yl, xh, yh, bin_size_x / 2, bin_size_y / 2, bin_size_x, bin_size_y, 1 / bin_size_x, 1 / bin_size_y, atomic_add_op, density_map_tensor, sorted_node_map); // computeTriangleDensityMapSimpleLikeCPU<<<block_count, thread_count>>>( // x_tensor, y_tensor, // node_size_x_clamped_tensor, node_size_y_clamped_tensor, // offset_x_tensor, offset_y_tensor, // ratio_tensor, // bin_center_x_tensor, bin_center_y_tensor, // num_nodes, // num_bins_x, num_bins_y, // xl, yl, xh, yh, // bin_size_x, bin_size_y, // atomic_add_op, // density_map_tensor // ); return 0; } template <typename T> int computeTriangleDensityMapCudaLauncher( const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, int num_nodes, const int num_bins_x, const int num_bins_y, int num_impacted_bins_x, int num_impacted_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, bool deterministic_flag, T *density_map_tensor, const int *sorted_node_map) { if (deterministic_flag) // deterministic implementation using unsigned long // as fixed point number { // total die area double diearea = (xh - xl) * (yh - yl); int integer_bits = max((int)ceil(log2(diearea)) + 1, 32); int fraction_bits = max(64 - integer_bits, 0); unsigned long long int scale_factor = (1UL << fraction_bits); int num_bins = num_bins_x * num_bins_y; unsigned long long int *scaled_density_map_tensor = NULL; allocateCUDA(scaled_density_map_tensor, num_bins, unsigned long long int); AtomicAddCUDA<unsigned long long int> atomic_add_op(scale_factor); int thread_count = 512; copyScaleArray<<<(num_bins + thread_count - 1) / thread_count, thread_count>>>( scaled_density_map_tensor, density_map_tensor, scale_factor, num_bins); computeTriangleDensityMapCallKernel<T, decltype(atomic_add_op)>( x_tensor, y_tensor, node_size_x_clamped_tensor, node_size_y_clamped_tensor, offset_x_tensor, offset_y_tensor, ratio_tensor, bin_center_x_tensor, bin_center_y_tensor, num_nodes, num_bins_x, num_bins_y, num_impacted_bins_x, num_impacted_bins_y, xl, yl, xh, yh, bin_size_x, bin_size_y, atomic_add_op, scaled_density_map_tensor, sorted_node_map); copyScaleArray<<<(num_bins + thread_count - 1) / thread_count, thread_count>>>(density_map_tensor, scaled_density_map_tensor, T(1.0 / scale_factor), num_bins); destroyCUDA(scaled_density_map_tensor); } else { AtomicAddCUDA<T> atomic_add_op; computeTriangleDensityMapCallKernel<T, decltype(atomic_add_op)>( x_tensor, y_tensor, node_size_x_clamped_tensor, node_size_y_clamped_tensor, offset_x_tensor, offset_y_tensor, ratio_tensor, bin_center_x_tensor, bin_center_y_tensor, num_nodes, num_bins_x, num_bins_y, num_impacted_bins_x, num_impacted_bins_y, xl, yl, xh, yh, bin_size_x, bin_size_y, atomic_add_op, density_map_tensor, sorted_node_map); } return 0; } template <typename T, typename AtomicOp> int computeExactDensityMapCallKernel( const T *x_tensor, const T *y_tensor, const T *node_size_x_tensor, const T *node_size_y_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const int num_impacted_bins_x, const int num_impacted_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, bool fixed_node_flag, AtomicOp atomic_add_op, typename AtomicOp::type *density_map_tensor) { int thread_count = 512; //int block_count = (num_nodes * num_impacted_bins_x * num_impacted_bins_y - 1 + thread_count) / thread_count; // dreamplaceAssert(block_count >= 0); // avoid overflow int block_count = (num_nodes - 1 + thread_count) / thread_count; computeExactDensityMapCellByCell<<<block_count, thread_count>>>( x_tensor, y_tensor, node_size_x_tensor, node_size_y_tensor, bin_center_x_tensor, bin_center_y_tensor, num_nodes, num_bins_x, num_bins_y, xl, yl, xh, yh, bin_size_x, bin_size_y, num_impacted_bins_x, num_impacted_bins_y, fixed_node_flag, atomic_add_op, density_map_tensor); return 0; } template <typename T> int computeExactDensityMapCudaLauncher( const T *x_tensor, const T *y_tensor, const T *node_size_x_tensor, const T *node_size_y_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, const int num_bins_y, const int num_impacted_bins_x, const int num_impacted_bins_y, const T xl, const T yl, const T xh, const T yh, const T bin_size_x, const T bin_size_y, bool fixed_node_flag, bool deterministic_flag, T *density_map_tensor) { if (deterministic_flag) // deterministic implementation using unsigned long // as fixed point number { // total die area double diearea = (xh - xl) * (yh - yl); int integer_bits = max((int)ceil(log2(diearea)) + 1, 32); int fraction_bits = max(64 - integer_bits, 0); unsigned long long int scale_factor = (1UL << fraction_bits); // Yibo: usually exact is only invoked once, so I put the message here // If it prints too many message, comment it out dreamplacePrint(kDEBUG, "deterministic mode: integer %d bits, fraction %d bits, " "scale factor %llu\n", integer_bits, fraction_bits, scale_factor); int num_bins = num_bins_x * num_bins_y; unsigned long long int *scaled_density_map_tensor = NULL; allocateCUDA(scaled_density_map_tensor, num_bins, unsigned long long int); AtomicAddCUDA<unsigned long long int> atomic_add_op(scale_factor); int thread_count = 512; copyScaleArray<<<(num_bins + thread_count - 1) / thread_count, thread_count>>>( scaled_density_map_tensor, density_map_tensor, scale_factor, num_bins); computeExactDensityMapCallKernel<T, decltype(atomic_add_op)>( x_tensor, y_tensor, node_size_x_tensor, node_size_y_tensor, bin_center_x_tensor, bin_center_y_tensor, num_nodes, num_bins_x, num_bins_y, num_impacted_bins_x, num_impacted_bins_y, xl, yl, xh, yh, bin_size_x, bin_size_y, fixed_node_flag, atomic_add_op, scaled_density_map_tensor); copyScaleArray<<<(num_bins + thread_count - 1) / thread_count, thread_count>>>(density_map_tensor, scaled_density_map_tensor, T(1.0 / scale_factor), num_bins); destroyCUDA(scaled_density_map_tensor); } else { AtomicAddCUDA<T> atomic_add_op; computeExactDensityMapCallKernel<T, decltype(atomic_add_op)>( x_tensor, y_tensor, node_size_x_tensor, node_size_y_tensor, bin_center_x_tensor, bin_center_y_tensor, num_nodes, num_bins_x, num_bins_y, num_impacted_bins_x, num_impacted_bins_y, xl, yl, xh, yh, bin_size_x, bin_size_y, fixed_node_flag, atomic_add_op, density_map_tensor); } return 0; } #define REGISTER_KERNEL_LAUNCHER(T) \ template int computeTriangleDensityMapCudaLauncher<T>( \ const T *x_tensor, const T *y_tensor, \ const T *node_size_x_clamped_tensor, \ const T *node_size_y_clamped_tensor, const T *offset_x_tensor, \ const T *offset_y_tensor, const T *ratio_tensor, \ const T *bin_center_x_tensor, const T *bin_center_y_tensor, \ const int num_nodes, const int num_bins_x, const int num_bins_y, \ const int num_impacted_bins_x, const int num_impacted_bins_y, \ const T xl, const T yl, const T xh, const T yh, const T bin_size_x, \ const T bin_size_y, bool deterministic_flag, T *density_map_tensor, \ const int *sorted_node_map); \ \ template int computeExactDensityMapCudaLauncher<T>( \ const T *x_tensor, const T *y_tensor, const T *node_size_x_tensor, \ const T *node_size_y_tensor, const T *bin_center_x_tensor, \ const T *bin_center_y_tensor, const int num_nodes, const int num_bins_x, \ const int num_bins_y, const int num_impacted_bins_x, \ const int num_impacted_bins_y, const T xl, const T yl, const T xh, \ const T yh, const T bin_size_x, const T bin_size_y, \ bool fixed_node_flag, bool deterministic_flag, T *density_map_tensor); REGISTER_KERNEL_LAUNCHER(float); REGISTER_KERNEL_LAUNCHER(double); DREAMPLACE_END_NAMESPACE
62ac90cec3a6afd3a7c8e7c5c32b14724d625ce9.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <cassert> #include <vector> #include "glog/logging.h" #include "paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" #include "paddle/fluid/operators/math/prelu.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { PReluPlugin *CreatePreluPluginDeserialize(const void *buffer, size_t length) { return new PReluPlugin(buffer, length); } REGISTER_TRT_PLUGIN("prelu_plugin", CreatePreluPluginDeserialize); int PReluPlugin::initialize() { hipMalloc(&p_gpu_weight_, sizeof(float) * weight_.size()); hipMemcpy(p_gpu_weight_, weight_.data(), weight_.size() * sizeof(float), hipMemcpyHostToDevice); return 0; } nvinfer1::Dims PReluPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) { assert(nbInputs == 1); assert(index < this->getNbOutputs()); nvinfer1::Dims const &input_dims = inputDims[0]; nvinfer1::Dims output_dims = input_dims; return output_dims; } int PReluPlugin::enqueue(int batch_size, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream) { // input dims is CHW. const auto &input_dims = this->getInputDims(0); const float *input = reinterpret_cast<const float *>(inputs[0]); // const float *alpha = reinterpret_cast<const float *>(alpha_.get().values); const float *alpha = p_gpu_weight_; float *output = reinterpret_cast<float **>(outputs)[0]; int numel = 1; for (int i = 0; i < input_dims.nbDims; i++) { numel *= input_dims.d[i]; } if (mode_ == "channel") { operators::math::PreluChannelWiseDirectCUDAFunctor<float> prelu_channel_wise; prelu_channel_wise(stream, input, alpha, output, input_dims.d[0], input_dims.d[1], numel); } else if (mode_ == "element") { operators::math::PreluElementWiseDirectCUDAFunctor<float> prelu_element_wise; prelu_element_wise(stream, input, alpha, output, input_dims.d[0], numel); } else { operators::math::PreluScalarDirectCUDAFunctor<float> prelu_scalar; prelu_scalar(stream, input, alpha, output, numel); } return hipGetLastError() != hipSuccess; } #if IS_TRT_VERSION_GE(6000) void PReluPluginDynamic::terminate() { if (p_gpu_weight_) { hipFree(p_gpu_weight_); } } int PReluPluginDynamic::initialize() { hipMalloc(&p_gpu_weight_, sizeof(float) * weight_.size()); hipMemcpy(p_gpu_weight_, weight_.data(), weight_.size() * sizeof(float), hipMemcpyHostToDevice); return 0; } size_t PReluPluginDynamic::getSerializationSize() const { return 0; } void PReluPluginDynamic::serialize(void *buffer) const {} nvinfer1::DimsExprs PReluPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) { return inputs[0]; } bool PReluPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); (in_out && pos < (nb_inputs + nb_outputs)); return ((in_out[pos].type == nvinfer1::DataType::kFLOAT) && in_out[pos].format == nvinfer1::PluginFormat::kNCHW); } nvinfer1::DataType PReluPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The PRelu Plugin only has one input, so the " "index value should be 0, but get %d.", index)); PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT), true, platform::errors::InvalidArgument( "The input type should be half or float")); return input_types[0]; } int PReluPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) { auto input_dims = input_desc[0].dims; const float *alpha = p_gpu_weight_; const float *input = static_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); int numel = 1; for (int i = 0; i < input_dims.nbDims; i++) { numel *= input_dims.d[i]; } if (mode_ == "channel") { operators::math::PreluChannelWiseDirectCUDAFunctor<float> prelu_channel_wise; prelu_channel_wise(stream, input, alpha, output, input_dims.d[0], input_dims.d[1], numel); } else if (mode_ == "element") { operators::math::PreluElementWiseDirectCUDAFunctor<float> prelu_element_wise; prelu_element_wise(stream, input, alpha, output, input_dims.d[0], numel); } else { operators::math::PreluScalarDirectCUDAFunctor<float> prelu_scalar; prelu_scalar(stream, input, alpha, output, numel); } return hipGetLastError() != hipSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
62ac90cec3a6afd3a7c8e7c5c32b14724d625ce9.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <cassert> #include <vector> #include "glog/logging.h" #include "paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" #include "paddle/fluid/operators/math/prelu.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { PReluPlugin *CreatePreluPluginDeserialize(const void *buffer, size_t length) { return new PReluPlugin(buffer, length); } REGISTER_TRT_PLUGIN("prelu_plugin", CreatePreluPluginDeserialize); int PReluPlugin::initialize() { cudaMalloc(&p_gpu_weight_, sizeof(float) * weight_.size()); cudaMemcpy(p_gpu_weight_, weight_.data(), weight_.size() * sizeof(float), cudaMemcpyHostToDevice); return 0; } nvinfer1::Dims PReluPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) { assert(nbInputs == 1); assert(index < this->getNbOutputs()); nvinfer1::Dims const &input_dims = inputDims[0]; nvinfer1::Dims output_dims = input_dims; return output_dims; } int PReluPlugin::enqueue(int batch_size, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) { // input dims is CHW. const auto &input_dims = this->getInputDims(0); const float *input = reinterpret_cast<const float *>(inputs[0]); // const float *alpha = reinterpret_cast<const float *>(alpha_.get().values); const float *alpha = p_gpu_weight_; float *output = reinterpret_cast<float **>(outputs)[0]; int numel = 1; for (int i = 0; i < input_dims.nbDims; i++) { numel *= input_dims.d[i]; } if (mode_ == "channel") { operators::math::PreluChannelWiseDirectCUDAFunctor<float> prelu_channel_wise; prelu_channel_wise(stream, input, alpha, output, input_dims.d[0], input_dims.d[1], numel); } else if (mode_ == "element") { operators::math::PreluElementWiseDirectCUDAFunctor<float> prelu_element_wise; prelu_element_wise(stream, input, alpha, output, input_dims.d[0], numel); } else { operators::math::PreluScalarDirectCUDAFunctor<float> prelu_scalar; prelu_scalar(stream, input, alpha, output, numel); } return cudaGetLastError() != cudaSuccess; } #if IS_TRT_VERSION_GE(6000) void PReluPluginDynamic::terminate() { if (p_gpu_weight_) { cudaFree(p_gpu_weight_); } } int PReluPluginDynamic::initialize() { cudaMalloc(&p_gpu_weight_, sizeof(float) * weight_.size()); cudaMemcpy(p_gpu_weight_, weight_.data(), weight_.size() * sizeof(float), cudaMemcpyHostToDevice); return 0; } size_t PReluPluginDynamic::getSerializationSize() const { return 0; } void PReluPluginDynamic::serialize(void *buffer) const {} nvinfer1::DimsExprs PReluPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) { return inputs[0]; } bool PReluPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); (in_out && pos < (nb_inputs + nb_outputs)); return ((in_out[pos].type == nvinfer1::DataType::kFLOAT) && in_out[pos].format == nvinfer1::PluginFormat::kNCHW); } nvinfer1::DataType PReluPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The PRelu Plugin only has one input, so the " "index value should be 0, but get %d.", index)); PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT), true, platform::errors::InvalidArgument( "The input type should be half or float")); return input_types[0]; } int PReluPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) { auto input_dims = input_desc[0].dims; const float *alpha = p_gpu_weight_; const float *input = static_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); int numel = 1; for (int i = 0; i < input_dims.nbDims; i++) { numel *= input_dims.d[i]; } if (mode_ == "channel") { operators::math::PreluChannelWiseDirectCUDAFunctor<float> prelu_channel_wise; prelu_channel_wise(stream, input, alpha, output, input_dims.d[0], input_dims.d[1], numel); } else if (mode_ == "element") { operators::math::PreluElementWiseDirectCUDAFunctor<float> prelu_element_wise; prelu_element_wise(stream, input, alpha, output, input_dims.d[0], numel); } else { operators::math::PreluScalarDirectCUDAFunctor<float> prelu_scalar; prelu_scalar(stream, input, alpha, output, numel); } return cudaGetLastError() != cudaSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
c3fea54a0d5a2daf235fbbe09321ad7a7cf87380.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <cmath> #include <algorithm> #include <hip/hip_runtime.h> #include <THH/THH.h> #define BLOCK_SIZE 32 #define BLOCK_CHANNELS (1024 / (BLOCK_SIZE * BLOCK_SIZE)) using std::max; using std::min; using std::floor; using std::ceil; // TODO remove this code #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) inline void __cudaSafeCall( hipError_t err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( hipSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK hipError_t err = hipGetLastError(); if ( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = hipDeviceSynchronize(); if( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } namespace strided { /************************ updateOutput ************************/ // Divides x by y (y > 0), rounds towards minus infinity __device__ inline int divFloor(const int x, const int y) { return x >= 0 ? x / y : (x - y + 1) / y; } // Divides x by y (y > 0), rounds towards minus infinity, returns positive remainder __device__ inline int modFloor(const int x, const int y) { return x >= 0 ? x % y : (y + x % y); } __global__ void forwardNoNormReplicateKernel( const float *intData, const int intDataStrideChannel, float *outData, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *const xMin, const float *const xMax, const float *const yMin, const float *const yMax, const int strideH, const int strideW) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; outData += id; // outData now points to our output pixel const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; const int y = id % wOut; id /= wOut; const int x = id % hOut; id /= hOut; const int windowIdx = id % nWindows; id /= nWindows; // `id` is now is now the current global input plane number intData += id * intDataStrideChannel; const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane; const int & batchIdx = id; if (batchIdx < batchSize) { // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. const int t = max(0, min(x*strideH+(int) ceil(xMin[globalWindowIdx]) , h-1) ); const int b = max(1, min(x*strideH+(int)floor(xMax[globalWindowIdx])+1, h ) ); const int l = max(0, min(y*strideW+(int) ceil(yMin[globalWindowIdx]) , w-1) ); const int r = max(1, min(y*strideW+(int)floor(yMax[globalWindowIdx])+1, w ) ); float outValue = 0; outValue += intData[b*(w+1) + r]; outValue -= intData[t*(w+1) + r]; outValue -= intData[b*(w+1) + l]; outValue += intData[t*(w+1) + l]; *outData = outValue; } } __global__ void forwardNoNormReplicateFracKernel( const float *intData, const int intDataStrideChannel, float *outData, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *const xMin, const float *const xMax, const float *const yMin, const float *const yMax, const float *inData, const int inDataStrideRow, const int inDataStrideChannel, const int strideH, const int strideW) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; outData += id; // outData now points to our output pixel const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; const int y = id % wOut; id /= wOut; const int x = id % hOut; id /= hOut; const int windowIdx = id % nWindows; id /= nWindows; // `id` is now is now the current global input plane number intData += id * intDataStrideChannel; inData += id * inDataStrideChannel; const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane; const int & batchIdx = id; if (batchIdx < batchSize) { // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. const int xMinCurr = (int)ceil(xMin[globalWindowIdx]); const float xMinCurrFrac = (float)xMinCurr - xMin[globalWindowIdx]; const int yMinCurr = (int)ceil(yMin[globalWindowIdx]); const float yMinCurrFrac = (float)yMinCurr - yMin[globalWindowIdx]; const float xMaxCurrFrac = xMax[globalWindowIdx] - floor(xMax[globalWindowIdx]); const int xMaxCurr = (int)floor(xMax[globalWindowIdx]) + 1; const float yMaxCurrFrac = yMax[globalWindowIdx] - floor(yMax[globalWindowIdx]); const int yMaxCurr = (int)floor(yMax[globalWindowIdx]) + 1; const int t = max(0, min(x*strideH+xMinCurr, h-1) ); const int b = max(1, min(x*strideH+xMaxCurr, h) ); const int l = max(0, min(y*strideW+yMinCurr, w-1) ); const int r = max(1, min(y*strideW+yMaxCurr, w) ); double outValue = 0; outValue += intData[b*(w+1) + r]; outValue -= intData[t*(w+1) + r]; outValue -= intData[b*(w+1) + l]; outValue += intData[t*(w+1) + l]; // TODO: tAdv, bAdv, lAdv, rAdv // -- xMax border outValue += ( intData[max(1,min(x*strideH+xMaxCurr+1,h))*(w+1) + max(1,min(y*strideW+yMaxCurr,w))] - intData[max(1,min(x*strideH+xMaxCurr,h))*(w+1) + max(1,min(y*strideW+yMaxCurr,w))] - intData[max(1,min(x*strideH+xMaxCurr+1,h))*(w+1) + max(0,min(y*strideW+yMinCurr,w-1))] + intData[max(1,min(x*strideH+xMaxCurr,h))*(w+1) + max(0,min(y*strideW+yMinCurr,w-1))] ) * xMaxCurrFrac; // -- yMax border outValue += ( intData[max(1,min(x*strideH+xMaxCurr,h))*(w+1) + max(1,min(y*strideW+yMaxCurr+1,w))] - intData[max(1,min(x*strideH+xMaxCurr,h))*(w+1) + max(1,min(y*strideW+yMaxCurr,w))] - intData[max(0,min(x*strideH+xMinCurr,h-1))*(w+1) + max(1,min(y*strideW+yMaxCurr+1,w))] + intData[max(0,min(x*strideH+xMinCurr,h-1))*(w+1) + max(1,min(y*strideW+yMaxCurr,w))] ) * yMaxCurrFrac; // -- xMin border outValue += ( intData[max(0,min(x*strideH+xMinCurr,h-1))*(w+1) + max(1,min(y*strideW+yMaxCurr,w))] - intData[max(0,min(x*strideH+xMinCurr-1,h-1))*(w+1) + max(1,min(y*strideW+yMaxCurr,w))] - intData[max(0,min(x*strideH+xMinCurr,h-1))*(w+1) + max(0,min(y*strideW+yMinCurr,w-1))] + intData[max(0,min(x*strideH+xMinCurr-1,h-1))*(w+1) + max(0,min(y*strideW+yMinCurr,w-1))] ) * xMinCurrFrac; // -- yMin border outValue += ( intData[max(1,min(x*strideH+xMaxCurr,h))*(w+1) + max(0,min(y*strideW+yMinCurr,w-1))] - intData[max(1,min(x*strideH+xMaxCurr,h))*(w+1) + max(0,min(y*strideW+yMinCurr-1,w-1))] - intData[max(0,min(x*strideH+xMinCurr,h-1))*(w+1) + max(0,min(y*strideW+yMinCurr,w-1))] + intData[max(0,min(x*strideH+xMinCurr,h-1))*(w+1) + max(0,min(y*strideW+yMinCurr-1,w-1))] ) * yMinCurrFrac; // -- corner pixels outValue += xMaxCurrFrac*yMaxCurrFrac * ( (x*strideH+xMaxCurr > h-1 or y*strideW+yMaxCurr > w-1 or x*strideH+xMaxCurr <= 0 or y*strideW+yMaxCurr <= 0) ? 0 : inData[(x*strideH+xMaxCurr)*inDataStrideRow + (y*strideW+yMaxCurr)]); outValue += xMinCurrFrac*yMaxCurrFrac * ( (x*strideH+xMinCurr-1 >= h-1 or y*strideW+yMaxCurr > w-1 or x*strideH+xMinCurr-1 < 0 or y*strideW+yMaxCurr <= 0) ? 0 : inData[(x*strideH+xMinCurr-1)*inDataStrideRow + (y*strideW+yMaxCurr)]); outValue += xMaxCurrFrac*yMinCurrFrac * ( (x*strideH+xMaxCurr > h-1 or y*strideW+yMinCurr-1 >= w-1 or x*strideH+xMaxCurr <= 0 or y*strideW+yMinCurr-1 < 0) ? 0 : inData[(x*strideH+xMaxCurr)*inDataStrideRow + (y*strideW+yMinCurr-1)]); outValue += xMinCurrFrac*yMinCurrFrac * ( (x*strideH+xMinCurr-1 >= h-1 or y*strideW+yMinCurr-1 >= w-1 or x*strideH+xMinCurr-1 < 0 or y*strideW+yMinCurr-1 < 0) ? 0 : inData[(x*strideH+xMinCurr-1)*inDataStrideRow + (y*strideW+yMinCurr-1)]); *outData = outValue; } } void forwardNoNormReplicateCuda(THCState *state, const float *intData, const int intDataStrideChannel, float *outData, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin, const float *yMax, const int strideH, const int strideW) { const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; const int NUM_THREADS = BLOCK_SIZE * BLOCK_SIZE; const int threadsNeeded = batchSize * nInputPlane * nWindows * hOut * wOut; const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS; hipLaunchKernelGGL(( forwardNoNormReplicateKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state), intData, intDataStrideChannel, outData, batchSize, nInputPlane, nWindows, h, w, xMin, xMax, yMin, yMax, strideH, strideW); THCudaCheck(hipGetLastError()); } void forwardNoNormReplicateFracCuda(THCState *state, const float *intData, const int intDataStrideChannel, float *outData, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin, const float *yMax, const float *inData, const int inDataStrideRow, const int inDataStrideChannel, const int strideH, const int strideW) { const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; const int NUM_THREADS = BLOCK_SIZE * BLOCK_SIZE; const int threadsNeeded = batchSize * nInputPlane * nWindows * hOut * wOut; const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS; hipLaunchKernelGGL(( forwardNoNormReplicateFracKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state), intData, intDataStrideChannel, outData, batchSize, nInputPlane, nWindows, h, w, xMin, xMax, yMin, yMax, inData, inDataStrideRow, inDataStrideChannel, strideH, strideW); THCudaCheck(hipGetLastError()); } /************************ updateGradInput ************************/ __global__ void updateGradInputReplicatePlanewiseKernel( const float *gradOutputIntData, float * const gradInputData, const int h, const int w, const int nWindows, const float * const xMin, const float * const xMax, const float * const yMin, const float * const yMax, const int strideH, const int strideW) { const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y; const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; if (x < h and y < w) { int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr; double outValue = 0; for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) { xMinCurr = (int)ceil(-xMax[windowIdx]); yMinCurr = (int)ceil(-yMax[windowIdx]); xMaxCurr = (int)floor(-xMin[windowIdx]) + 1; yMaxCurr = (int)floor(-yMin[windowIdx]) + 1; // The following code block implements these lines // as if they were executed simultaneously (see `void updateGradInputFrac()`): // xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr); // xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr); // yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr); // yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr); bool needToChangeMin, needToChangeMax; needToChangeMin = x == 0 and xMaxCurr >= 0; needToChangeMax = x == h-1 and xMinCurr <= 0; if (needToChangeMin) xMinCurr = 0; if (needToChangeMax) xMaxCurr = h+66; needToChangeMin = y == 0 and yMaxCurr >= 0; needToChangeMax = y == w-1 and yMinCurr <= 0; if (needToChangeMin) yMinCurr = 0; if (needToChangeMax) yMaxCurr = w+66; const int t = max(0, min(divFloor(x+xMinCurr + strideH - 1, strideH) , hOut) ); const int b = max(0, min(divFloor(x+xMaxCurr - 1 , strideH) + 1, hOut) ); const int l = max(0, min(divFloor(y+yMinCurr + strideW - 1, strideW) , wOut) ); const int r = max(0, min(divFloor(y+yMaxCurr - 1 , strideW) + 1, wOut) ); outValue += gradOutputIntData[b*(wOut+1) + r]; outValue -= gradOutputIntData[t*(wOut+1) + r]; outValue -= gradOutputIntData[b*(wOut+1) + l]; outValue += gradOutputIntData[t*(wOut+1) + l]; // go to the next channel gradOutputIntData += (hOut+1)*(wOut+1); } gradInputData[x*w + y] = outValue; } } __global__ void updateGradInputReplicatePlanewiseFracKernel( const float *gradOutputIntData, float * const gradInputData, const int h, const int w, const int nWindows, const float * const xMin, const float * const xMax, const float * const yMin, const float * const yMax, const float *gradOutputData, const int gradOutputStrideRow, const int gradOutputStrideChannel, const int strideH, const int strideW) { const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y; const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; if (x < h and y < w) { int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr; double outValue = 0; for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) { xMinCurr = (int)ceil(-xMax[windowIdx]); yMinCurr = (int)ceil(-yMax[windowIdx]); const float xMinCurrFrac = (float)xMinCurr + xMax[windowIdx]; const float yMinCurrFrac = (float)yMinCurr + yMax[windowIdx]; xMaxCurr = (int)floor(-xMin[windowIdx]) + 1; yMaxCurr = (int)floor(-yMin[windowIdx]) + 1; const float xMaxCurrFrac = -xMin[windowIdx] + 1 - xMaxCurr; const float yMaxCurrFrac = -yMin[windowIdx] + 1 - yMaxCurr; // The following code block implements these lines // as if they were executed simultaneously (see `void updateGradInputFrac()`): // xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr); // xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr); // yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr); // yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr); bool needToChangeMin, needToChangeMax; needToChangeMin = x == 0 and xMaxCurr >= 0; needToChangeMax = x == h-1 and xMinCurr <= 0; if (needToChangeMin) xMinCurr = 0; if (needToChangeMax) xMaxCurr = h+66; needToChangeMin = y == 0 and yMaxCurr >= 0; needToChangeMax = y == w-1 and yMinCurr <= 0; if (needToChangeMin) yMinCurr = 0; if (needToChangeMax) yMaxCurr = w+66; const int t = max(0, min(divFloor(x+xMinCurr + strideH - 1, strideH) , hOut) ); const int b = max(0, min(divFloor(x+xMaxCurr - 1 , strideH) + 1, hOut) ); const int l = max(0, min(divFloor(y+yMinCurr + strideW - 1, strideW) , wOut) ); const int r = max(0, min(divFloor(y+yMaxCurr - 1 , strideW) + 1, wOut) ); const int tAdv = modFloor(x+xMinCurr-1, strideH) == 0 and x+xMinCurr-1 < h ? max(0, min(t-1, hOut)) : t; const int bAdv = modFloor(x+xMaxCurr , strideH) == 0 and x+xMaxCurr >= 0 ? max(0, min(b+1, hOut)) : b; const int lAdv = modFloor(y+yMinCurr-1, strideW) == 0 and y+yMinCurr-1 < w ? max(0, min(l-1, wOut)) : l; const int rAdv = modFloor(y+yMaxCurr , strideW) == 0 and y+yMaxCurr >= 0 ? max(0, min(r+1, wOut)) : r; // TODO: 1D grid outValue += gradOutputIntData[b*(wOut+1) + r]; outValue -= gradOutputIntData[t*(wOut+1) + r]; outValue -= gradOutputIntData[b*(wOut+1) + l]; outValue += gradOutputIntData[t*(wOut+1) + l]; // -- xMax border outValue += ( gradOutputIntData[bAdv*(wOut+1) + r] - gradOutputIntData[b *(wOut+1) + r] - gradOutputIntData[bAdv*(wOut+1) + l] + gradOutputIntData[b *(wOut+1) + l] ) * xMaxCurrFrac; // -- yMax border outValue += ( gradOutputIntData[b*(wOut+1) + rAdv] - gradOutputIntData[b*(wOut+1) + r ] - gradOutputIntData[t*(wOut+1) + rAdv] + gradOutputIntData[t*(wOut+1) + r ] ) * yMaxCurrFrac; // -- xMin border outValue += ( gradOutputIntData[t *(wOut+1) + r] - gradOutputIntData[tAdv*(wOut+1) + r] - gradOutputIntData[t *(wOut+1) + l] + gradOutputIntData[tAdv*(wOut+1) + l] ) * xMinCurrFrac; // -- yMin border outValue += ( gradOutputIntData[b*(wOut+1) + l ] - gradOutputIntData[b*(wOut+1) + lAdv] - gradOutputIntData[t*(wOut+1) + l ] + gradOutputIntData[t*(wOut+1) + lAdv] ) * yMinCurrFrac; // -- corner pixels outValue += xMaxCurrFrac*yMaxCurrFrac * ( (x+xMaxCurr > h-1 or y+yMaxCurr > w-1 or x+xMaxCurr < 0 or y+yMaxCurr < 0 or b == bAdv or r == rAdv) ? 0 : gradOutputData[b*gradOutputStrideRow + r]); outValue += xMinCurrFrac*yMaxCurrFrac * ( (x+xMinCurr-1 > h-1 or y+yMaxCurr > w-1 or x+xMinCurr-1 < 0 or y+yMaxCurr < 0 or t == tAdv or r == rAdv) ? 0 : gradOutputData[tAdv*gradOutputStrideRow + r]); outValue += xMaxCurrFrac*yMinCurrFrac * ( (x+xMaxCurr > h-1 or y+yMinCurr-1 > w-1 or x+xMaxCurr < 0 or y+yMinCurr-1 < 0 or b == bAdv or l == lAdv) ? 0 : gradOutputData[b*gradOutputStrideRow + lAdv]); outValue += xMinCurrFrac*yMinCurrFrac * ( (x+xMinCurr-1 > h-1 or y+yMinCurr-1 > w-1 or x+xMinCurr-1 < 0 or y+yMinCurr-1 < 0 or t == tAdv or l == lAdv) ? 0 : gradOutputData[tAdv*gradOutputStrideRow + lAdv]); // go to the next channel gradOutputIntData += (hOut+1)*(wOut+1); gradOutputData += gradOutputStrideChannel; } gradInputData[x*w + y] = outValue; } } void updateGradInputReplicatePlanewiseCuda( const float *gradOutputIntData, float * const gradInputData, const int h, const int w, const int nWindows, const float * const xMin, const float * const xMax, const float * const yMin, const float * const yMax, const int strideH, const int strideW) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS); dim3 dimGrid( (h + dimBlock.x - 1) / dimBlock.x, (w + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( updateGradInputReplicatePlanewiseKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, gradOutputIntData, gradInputData, h, w, nWindows, xMin, xMax, yMin, yMax, strideH, strideW); } void updateGradInputReplicatePlanewiseFracCuda( const float *gradOutputIntData, float * const gradInputData, const int h, const int w, const int nWindows, const float *xMin, const float *xMax, const float *yMin, float *yMax, const float *gradOutputData, const int gradOutputStrideRow, const int gradOutputStrideChannel, const int strideH, const int strideW) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS); dim3 dimGrid( (h + dimBlock.x - 1) / dimBlock.x, (w + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( updateGradInputReplicatePlanewiseFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, gradOutputIntData, gradInputData, h, w, nWindows, xMin, xMax, yMin, yMax, gradOutputData, gradOutputStrideRow, gradOutputStrideChannel, strideH, strideW); } /************************ accGradParameters ************************/ __global__ void xMaxDeltaIntegralReplicateFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMax, const float *yMin, const float *yMax, const float *inData, const int inDataStrideRow, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; // const int xMinInt = (int)ceil(xMin[windowIdx]-1); // const float xMinFrac = xMinInt-xMin[windowIdx]+1; const int yMinInt = (int)ceil(yMin[windowIdx]-1); const float yMinFrac = yMinInt-yMin[windowIdx]+1; const int xMaxInt = (int)floor(xMax[windowIdx]); // const float xMaxFrac = xMax[windowIdx]-xMaxInt; const int yMaxInt = (int)floor(yMax[windowIdx]); const float yMaxFrac = yMax[windowIdx]-yMaxInt; // const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; // const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += brCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac); delta += blCorner * (y+yMinInt >= w ? 1.0f : yMinFrac); delta += intData[max(0,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMaxInt >= 1 and x+xMaxInt < h); tmpArray[xOut*wOut + yOut] = delta; } } __global__ void xMinDeltaIntegralReplicateFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax, const float *inData, const int inDataStrideRow, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; const int xMinInt = (int)ceil(xMin[windowIdx]-1); // const float xMinFrac = xMinInt-xMin[windowIdx]+1; const int yMinInt = (int)ceil(yMin[windowIdx]-1); const float yMinFrac = yMinInt-yMin[windowIdx]+1; // const int xMaxInt = (int)floor(xMax[windowIdx]); // const float xMaxFrac = xMax[windowIdx]-xMaxInt; const int yMaxInt = (int)floor(yMax[windowIdx]); const float yMaxFrac = yMax[windowIdx]-yMaxInt; const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; // const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; // const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += trCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac); delta += tlCorner * (y+yMinInt >= w ? 1.0f : yMinFrac); delta += intData[max(0,min(x+xMinInt , h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt-1, h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt , h))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMinInt-1, h))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMinInt >= 1 and x+xMinInt < h); tmpArray[xOut*wOut + yOut] *= -delta; } } __global__ void yMaxDeltaIntegralReplicateFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMax, const float *inData, const int inDataStrideRow, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; const int xMinInt = (int)ceil(xMin[windowIdx]-1); const float xMinFrac = xMinInt-xMin[windowIdx]+1; // const int yMinInt = (int)ceil(yMin[windowIdx]-1); // const float yMinFrac = yMinInt-yMin[windowIdx]+1; const int xMaxInt = (int)floor(xMax[windowIdx]); const float xMaxFrac = xMax[windowIdx]-xMaxInt; const int yMaxInt = (int)floor(yMax[windowIdx]); // const float yMaxFrac = yMax[windowIdx]-yMaxInt; // const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; // const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += trCorner * (x+xMinInt >= h ? 1.0f : xMinFrac); delta += brCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac); delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMaxInt+1, w))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMaxInt+1, w))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta *= (y+yMaxInt >= 1 and y+yMaxInt < w); tmpArray[xOut*wOut + yOut] = delta; } } __global__ void yMinDeltaIntegralReplicateFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin, const float *inData, const int inDataStrideRow, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; const int xMinInt = (int)ceil(xMin[windowIdx]-1); const float xMinFrac = xMinInt-xMin[windowIdx]+1; const int yMinInt = (int)ceil(yMin[windowIdx]-1); // const float yMinFrac = yMinInt-yMin[windowIdx]+1; const int xMaxInt = (int)floor(xMax[windowIdx]); const float xMaxFrac = xMax[windowIdx]-xMaxInt; // const int yMaxInt = (int)floor(yMax[windowIdx]); // const float yMaxFrac = yMax[windowIdx]-yMaxInt; const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; // const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; // const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += tlCorner * (x+xMinInt >= h ? 1.0f : xMinFrac); delta += blCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac); delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt , w))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt-1, w))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt , w))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt-1, w))]; delta *= (y+yMinInt >= 1 and y+yMinInt < w); tmpArray[xOut*wOut + yOut] *= -delta; } } void backwardReplicateFracCuda( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float * const xMin, const float * const xMax, const float * const yMin, const float * const yMax, const float *inData, const int inDataStrideRow, const int strideH, const int strideW) { const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((nWindows * hOut * wOut + dimBlock.x - 1) / dimBlock.x); hipLaunchKernelGGL(( xMaxDeltaIntegralReplicateFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 0*nWindows*hOut*wOut, nWindows, h, w, xMax, yMin, yMax, inData, inDataStrideRow, strideH, strideW); hipLaunchKernelGGL(( xMinDeltaIntegralReplicateFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 1*nWindows*hOut*wOut, nWindows, h, w, xMin, yMin, yMax, inData, inDataStrideRow, strideH, strideW); hipLaunchKernelGGL(( yMaxDeltaIntegralReplicateFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 2*nWindows*hOut*wOut, nWindows, h, w, xMin, xMax, yMax, inData, inDataStrideRow, strideH, strideW); hipLaunchKernelGGL(( yMinDeltaIntegralReplicateFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 3*nWindows*hOut*wOut, nWindows, h, w, xMin, xMax, yMin, inData, inDataStrideRow, strideH, strideW); } __global__ void xMaxDeltaIntegralReplicateKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMax, const float *yMin, const float *yMax, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; // const int xMinInt = (int)ceil(xMin[windowIdx]-1); const int yMinInt = (int)ceil(yMin[windowIdx]-1); const int xMaxInt = (int)floor(xMax[windowIdx]); const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(1,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(1,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMaxInt >= 1 and x+xMaxInt < h); tmpArray[xOut*wOut + yOut] = delta; } } __global__ void xMinDeltaIntegralReplicateKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; const int xMinInt = (int)ceil(xMin[windowIdx]-1); const int yMinInt = (int)ceil(yMin[windowIdx]-1); // const int xMaxInt = (int)floor(xMax[windowIdx]); const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(0,min(x+xMinInt , h-1))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt-1, h ))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt , h-1))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMinInt-1, h ))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMinInt >= 1 and x+xMinInt < h); tmpArray[xOut*wOut + yOut] *= -delta; } } __global__ void yMaxDeltaIntegralReplicateKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMax, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; const int xMinInt = (int)ceil(xMin[windowIdx]-1); // const int yMinInt = (int)ceil(yMin[windowIdx]-1); const int xMaxInt = (int)floor(xMax[windowIdx]); const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(1,min(y+yMaxInt+1, w))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(1,min(y+yMaxInt+1, w))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta *= (y+yMaxInt >= 1 and y+yMaxInt < w); tmpArray[xOut*wOut + yOut] = delta; } } __global__ void yMinDeltaIntegralReplicateKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; const int xMinInt = (int)ceil(xMin[windowIdx]-1); const int yMinInt = (int)ceil(yMin[windowIdx]-1); const int xMaxInt = (int)floor(xMax[windowIdx]); // const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt , w ))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt-1, w-1))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt , w ))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt-1, w-1))]; delta *= (y+yMinInt >= 1 and y+yMinInt < w); tmpArray[xOut*wOut + yOut] *= -delta; } } void backwardReplicateCuda( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float * const xMin, const float * const xMax, const float * const yMin, const float * const yMax, const int strideH, const int strideW) { const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((nWindows * hOut * wOut + dimBlock.x - 1) / dimBlock.x); hipLaunchKernelGGL(( xMaxDeltaIntegralReplicateKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 0*nWindows*hOut*wOut, nWindows, h, w, xMax, yMin, yMax, strideH, strideW); hipLaunchKernelGGL(( xMinDeltaIntegralReplicateKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 1*nWindows*hOut*wOut, nWindows, h, w, xMin, yMin, yMax, strideH, strideW); hipLaunchKernelGGL(( yMaxDeltaIntegralReplicateKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 2*nWindows*hOut*wOut, nWindows, h, w, xMin, xMax, yMax, strideH, strideW); hipLaunchKernelGGL(( yMinDeltaIntegralReplicateKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 3*nWindows*hOut*wOut, nWindows, h, w, xMin, xMax, yMin, strideH, strideW); } } // namespace
c3fea54a0d5a2daf235fbbe09321ad7a7cf87380.cu
#include <stdio.h> #include <cmath> #include <algorithm> #include <cuda_runtime.h> #include <THC/THC.h> #define BLOCK_SIZE 32 #define BLOCK_CHANNELS (1024 / (BLOCK_SIZE * BLOCK_SIZE)) using std::max; using std::min; using std::floor; using std::ceil; // TODO remove this code #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = cudaDeviceSynchronize(); if( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } namespace strided { /************************ updateOutput ************************/ // Divides x by y (y > 0), rounds towards minus infinity __device__ inline int divFloor(const int x, const int y) { return x >= 0 ? x / y : (x - y + 1) / y; } // Divides x by y (y > 0), rounds towards minus infinity, returns positive remainder __device__ inline int modFloor(const int x, const int y) { return x >= 0 ? x % y : (y + x % y); } __global__ void forwardNoNormReplicateKernel( const float *intData, const int intDataStrideChannel, float *outData, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *const xMin, const float *const xMax, const float *const yMin, const float *const yMax, const int strideH, const int strideW) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; outData += id; // outData now points to our output pixel const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; const int y = id % wOut; id /= wOut; const int x = id % hOut; id /= hOut; const int windowIdx = id % nWindows; id /= nWindows; // `id` is now is now the current global input plane number intData += id * intDataStrideChannel; const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane; const int & batchIdx = id; if (batchIdx < batchSize) { // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. const int t = max(0, min(x*strideH+(int) ceil(xMin[globalWindowIdx]) , h-1) ); const int b = max(1, min(x*strideH+(int)floor(xMax[globalWindowIdx])+1, h ) ); const int l = max(0, min(y*strideW+(int) ceil(yMin[globalWindowIdx]) , w-1) ); const int r = max(1, min(y*strideW+(int)floor(yMax[globalWindowIdx])+1, w ) ); float outValue = 0; outValue += intData[b*(w+1) + r]; outValue -= intData[t*(w+1) + r]; outValue -= intData[b*(w+1) + l]; outValue += intData[t*(w+1) + l]; *outData = outValue; } } __global__ void forwardNoNormReplicateFracKernel( const float *intData, const int intDataStrideChannel, float *outData, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *const xMin, const float *const xMax, const float *const yMin, const float *const yMax, const float *inData, const int inDataStrideRow, const int inDataStrideChannel, const int strideH, const int strideW) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; outData += id; // outData now points to our output pixel const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; const int y = id % wOut; id /= wOut; const int x = id % hOut; id /= hOut; const int windowIdx = id % nWindows; id /= nWindows; // `id` is now is now the current global input plane number intData += id * intDataStrideChannel; inData += id * inDataStrideChannel; const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane; const int & batchIdx = id; if (batchIdx < batchSize) { // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. const int xMinCurr = (int)ceil(xMin[globalWindowIdx]); const float xMinCurrFrac = (float)xMinCurr - xMin[globalWindowIdx]; const int yMinCurr = (int)ceil(yMin[globalWindowIdx]); const float yMinCurrFrac = (float)yMinCurr - yMin[globalWindowIdx]; const float xMaxCurrFrac = xMax[globalWindowIdx] - floor(xMax[globalWindowIdx]); const int xMaxCurr = (int)floor(xMax[globalWindowIdx]) + 1; const float yMaxCurrFrac = yMax[globalWindowIdx] - floor(yMax[globalWindowIdx]); const int yMaxCurr = (int)floor(yMax[globalWindowIdx]) + 1; const int t = max(0, min(x*strideH+xMinCurr, h-1) ); const int b = max(1, min(x*strideH+xMaxCurr, h) ); const int l = max(0, min(y*strideW+yMinCurr, w-1) ); const int r = max(1, min(y*strideW+yMaxCurr, w) ); double outValue = 0; outValue += intData[b*(w+1) + r]; outValue -= intData[t*(w+1) + r]; outValue -= intData[b*(w+1) + l]; outValue += intData[t*(w+1) + l]; // TODO: tAdv, bAdv, lAdv, rAdv // -- xMax border outValue += ( intData[max(1,min(x*strideH+xMaxCurr+1,h))*(w+1) + max(1,min(y*strideW+yMaxCurr,w))] - intData[max(1,min(x*strideH+xMaxCurr,h))*(w+1) + max(1,min(y*strideW+yMaxCurr,w))] - intData[max(1,min(x*strideH+xMaxCurr+1,h))*(w+1) + max(0,min(y*strideW+yMinCurr,w-1))] + intData[max(1,min(x*strideH+xMaxCurr,h))*(w+1) + max(0,min(y*strideW+yMinCurr,w-1))] ) * xMaxCurrFrac; // -- yMax border outValue += ( intData[max(1,min(x*strideH+xMaxCurr,h))*(w+1) + max(1,min(y*strideW+yMaxCurr+1,w))] - intData[max(1,min(x*strideH+xMaxCurr,h))*(w+1) + max(1,min(y*strideW+yMaxCurr,w))] - intData[max(0,min(x*strideH+xMinCurr,h-1))*(w+1) + max(1,min(y*strideW+yMaxCurr+1,w))] + intData[max(0,min(x*strideH+xMinCurr,h-1))*(w+1) + max(1,min(y*strideW+yMaxCurr,w))] ) * yMaxCurrFrac; // -- xMin border outValue += ( intData[max(0,min(x*strideH+xMinCurr,h-1))*(w+1) + max(1,min(y*strideW+yMaxCurr,w))] - intData[max(0,min(x*strideH+xMinCurr-1,h-1))*(w+1) + max(1,min(y*strideW+yMaxCurr,w))] - intData[max(0,min(x*strideH+xMinCurr,h-1))*(w+1) + max(0,min(y*strideW+yMinCurr,w-1))] + intData[max(0,min(x*strideH+xMinCurr-1,h-1))*(w+1) + max(0,min(y*strideW+yMinCurr,w-1))] ) * xMinCurrFrac; // -- yMin border outValue += ( intData[max(1,min(x*strideH+xMaxCurr,h))*(w+1) + max(0,min(y*strideW+yMinCurr,w-1))] - intData[max(1,min(x*strideH+xMaxCurr,h))*(w+1) + max(0,min(y*strideW+yMinCurr-1,w-1))] - intData[max(0,min(x*strideH+xMinCurr,h-1))*(w+1) + max(0,min(y*strideW+yMinCurr,w-1))] + intData[max(0,min(x*strideH+xMinCurr,h-1))*(w+1) + max(0,min(y*strideW+yMinCurr-1,w-1))] ) * yMinCurrFrac; // -- corner pixels outValue += xMaxCurrFrac*yMaxCurrFrac * ( (x*strideH+xMaxCurr > h-1 or y*strideW+yMaxCurr > w-1 or x*strideH+xMaxCurr <= 0 or y*strideW+yMaxCurr <= 0) ? 0 : inData[(x*strideH+xMaxCurr)*inDataStrideRow + (y*strideW+yMaxCurr)]); outValue += xMinCurrFrac*yMaxCurrFrac * ( (x*strideH+xMinCurr-1 >= h-1 or y*strideW+yMaxCurr > w-1 or x*strideH+xMinCurr-1 < 0 or y*strideW+yMaxCurr <= 0) ? 0 : inData[(x*strideH+xMinCurr-1)*inDataStrideRow + (y*strideW+yMaxCurr)]); outValue += xMaxCurrFrac*yMinCurrFrac * ( (x*strideH+xMaxCurr > h-1 or y*strideW+yMinCurr-1 >= w-1 or x*strideH+xMaxCurr <= 0 or y*strideW+yMinCurr-1 < 0) ? 0 : inData[(x*strideH+xMaxCurr)*inDataStrideRow + (y*strideW+yMinCurr-1)]); outValue += xMinCurrFrac*yMinCurrFrac * ( (x*strideH+xMinCurr-1 >= h-1 or y*strideW+yMinCurr-1 >= w-1 or x*strideH+xMinCurr-1 < 0 or y*strideW+yMinCurr-1 < 0) ? 0 : inData[(x*strideH+xMinCurr-1)*inDataStrideRow + (y*strideW+yMinCurr-1)]); *outData = outValue; } } void forwardNoNormReplicateCuda(THCState *state, const float *intData, const int intDataStrideChannel, float *outData, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin, const float *yMax, const int strideH, const int strideW) { const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; const int NUM_THREADS = BLOCK_SIZE * BLOCK_SIZE; const int threadsNeeded = batchSize * nInputPlane * nWindows * hOut * wOut; const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS; forwardNoNormReplicateKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> ( intData, intDataStrideChannel, outData, batchSize, nInputPlane, nWindows, h, w, xMin, xMax, yMin, yMax, strideH, strideW); THCudaCheck(cudaGetLastError()); } void forwardNoNormReplicateFracCuda(THCState *state, const float *intData, const int intDataStrideChannel, float *outData, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin, const float *yMax, const float *inData, const int inDataStrideRow, const int inDataStrideChannel, const int strideH, const int strideW) { const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; const int NUM_THREADS = BLOCK_SIZE * BLOCK_SIZE; const int threadsNeeded = batchSize * nInputPlane * nWindows * hOut * wOut; const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS; forwardNoNormReplicateFracKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> ( intData, intDataStrideChannel, outData, batchSize, nInputPlane, nWindows, h, w, xMin, xMax, yMin, yMax, inData, inDataStrideRow, inDataStrideChannel, strideH, strideW); THCudaCheck(cudaGetLastError()); } /************************ updateGradInput ************************/ __global__ void updateGradInputReplicatePlanewiseKernel( const float *gradOutputIntData, float * const gradInputData, const int h, const int w, const int nWindows, const float * const xMin, const float * const xMax, const float * const yMin, const float * const yMax, const int strideH, const int strideW) { const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y; const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; if (x < h and y < w) { int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr; double outValue = 0; for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) { xMinCurr = (int)ceil(-xMax[windowIdx]); yMinCurr = (int)ceil(-yMax[windowIdx]); xMaxCurr = (int)floor(-xMin[windowIdx]) + 1; yMaxCurr = (int)floor(-yMin[windowIdx]) + 1; // The following code block implements these lines // as if they were executed simultaneously (see `void updateGradInputFrac()`): // xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr); // xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr); // yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr); // yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr); bool needToChangeMin, needToChangeMax; needToChangeMin = x == 0 and xMaxCurr >= 0; needToChangeMax = x == h-1 and xMinCurr <= 0; if (needToChangeMin) xMinCurr = 0; if (needToChangeMax) xMaxCurr = h+66; needToChangeMin = y == 0 and yMaxCurr >= 0; needToChangeMax = y == w-1 and yMinCurr <= 0; if (needToChangeMin) yMinCurr = 0; if (needToChangeMax) yMaxCurr = w+66; const int t = max(0, min(divFloor(x+xMinCurr + strideH - 1, strideH) , hOut) ); const int b = max(0, min(divFloor(x+xMaxCurr - 1 , strideH) + 1, hOut) ); const int l = max(0, min(divFloor(y+yMinCurr + strideW - 1, strideW) , wOut) ); const int r = max(0, min(divFloor(y+yMaxCurr - 1 , strideW) + 1, wOut) ); outValue += gradOutputIntData[b*(wOut+1) + r]; outValue -= gradOutputIntData[t*(wOut+1) + r]; outValue -= gradOutputIntData[b*(wOut+1) + l]; outValue += gradOutputIntData[t*(wOut+1) + l]; // go to the next channel gradOutputIntData += (hOut+1)*(wOut+1); } gradInputData[x*w + y] = outValue; } } __global__ void updateGradInputReplicatePlanewiseFracKernel( const float *gradOutputIntData, float * const gradInputData, const int h, const int w, const int nWindows, const float * const xMin, const float * const xMax, const float * const yMin, const float * const yMax, const float *gradOutputData, const int gradOutputStrideRow, const int gradOutputStrideChannel, const int strideH, const int strideW) { const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y; const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; if (x < h and y < w) { int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr; double outValue = 0; for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) { xMinCurr = (int)ceil(-xMax[windowIdx]); yMinCurr = (int)ceil(-yMax[windowIdx]); const float xMinCurrFrac = (float)xMinCurr + xMax[windowIdx]; const float yMinCurrFrac = (float)yMinCurr + yMax[windowIdx]; xMaxCurr = (int)floor(-xMin[windowIdx]) + 1; yMaxCurr = (int)floor(-yMin[windowIdx]) + 1; const float xMaxCurrFrac = -xMin[windowIdx] + 1 - xMaxCurr; const float yMaxCurrFrac = -yMin[windowIdx] + 1 - yMaxCurr; // The following code block implements these lines // as if they were executed simultaneously (see `void updateGradInputFrac()`): // xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr); // xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr); // yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr); // yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr); bool needToChangeMin, needToChangeMax; needToChangeMin = x == 0 and xMaxCurr >= 0; needToChangeMax = x == h-1 and xMinCurr <= 0; if (needToChangeMin) xMinCurr = 0; if (needToChangeMax) xMaxCurr = h+66; needToChangeMin = y == 0 and yMaxCurr >= 0; needToChangeMax = y == w-1 and yMinCurr <= 0; if (needToChangeMin) yMinCurr = 0; if (needToChangeMax) yMaxCurr = w+66; const int t = max(0, min(divFloor(x+xMinCurr + strideH - 1, strideH) , hOut) ); const int b = max(0, min(divFloor(x+xMaxCurr - 1 , strideH) + 1, hOut) ); const int l = max(0, min(divFloor(y+yMinCurr + strideW - 1, strideW) , wOut) ); const int r = max(0, min(divFloor(y+yMaxCurr - 1 , strideW) + 1, wOut) ); const int tAdv = modFloor(x+xMinCurr-1, strideH) == 0 and x+xMinCurr-1 < h ? max(0, min(t-1, hOut)) : t; const int bAdv = modFloor(x+xMaxCurr , strideH) == 0 and x+xMaxCurr >= 0 ? max(0, min(b+1, hOut)) : b; const int lAdv = modFloor(y+yMinCurr-1, strideW) == 0 and y+yMinCurr-1 < w ? max(0, min(l-1, wOut)) : l; const int rAdv = modFloor(y+yMaxCurr , strideW) == 0 and y+yMaxCurr >= 0 ? max(0, min(r+1, wOut)) : r; // TODO: 1D grid outValue += gradOutputIntData[b*(wOut+1) + r]; outValue -= gradOutputIntData[t*(wOut+1) + r]; outValue -= gradOutputIntData[b*(wOut+1) + l]; outValue += gradOutputIntData[t*(wOut+1) + l]; // -- xMax border outValue += ( gradOutputIntData[bAdv*(wOut+1) + r] - gradOutputIntData[b *(wOut+1) + r] - gradOutputIntData[bAdv*(wOut+1) + l] + gradOutputIntData[b *(wOut+1) + l] ) * xMaxCurrFrac; // -- yMax border outValue += ( gradOutputIntData[b*(wOut+1) + rAdv] - gradOutputIntData[b*(wOut+1) + r ] - gradOutputIntData[t*(wOut+1) + rAdv] + gradOutputIntData[t*(wOut+1) + r ] ) * yMaxCurrFrac; // -- xMin border outValue += ( gradOutputIntData[t *(wOut+1) + r] - gradOutputIntData[tAdv*(wOut+1) + r] - gradOutputIntData[t *(wOut+1) + l] + gradOutputIntData[tAdv*(wOut+1) + l] ) * xMinCurrFrac; // -- yMin border outValue += ( gradOutputIntData[b*(wOut+1) + l ] - gradOutputIntData[b*(wOut+1) + lAdv] - gradOutputIntData[t*(wOut+1) + l ] + gradOutputIntData[t*(wOut+1) + lAdv] ) * yMinCurrFrac; // -- corner pixels outValue += xMaxCurrFrac*yMaxCurrFrac * ( (x+xMaxCurr > h-1 or y+yMaxCurr > w-1 or x+xMaxCurr < 0 or y+yMaxCurr < 0 or b == bAdv or r == rAdv) ? 0 : gradOutputData[b*gradOutputStrideRow + r]); outValue += xMinCurrFrac*yMaxCurrFrac * ( (x+xMinCurr-1 > h-1 or y+yMaxCurr > w-1 or x+xMinCurr-1 < 0 or y+yMaxCurr < 0 or t == tAdv or r == rAdv) ? 0 : gradOutputData[tAdv*gradOutputStrideRow + r]); outValue += xMaxCurrFrac*yMinCurrFrac * ( (x+xMaxCurr > h-1 or y+yMinCurr-1 > w-1 or x+xMaxCurr < 0 or y+yMinCurr-1 < 0 or b == bAdv or l == lAdv) ? 0 : gradOutputData[b*gradOutputStrideRow + lAdv]); outValue += xMinCurrFrac*yMinCurrFrac * ( (x+xMinCurr-1 > h-1 or y+yMinCurr-1 > w-1 or x+xMinCurr-1 < 0 or y+yMinCurr-1 < 0 or t == tAdv or l == lAdv) ? 0 : gradOutputData[tAdv*gradOutputStrideRow + lAdv]); // go to the next channel gradOutputIntData += (hOut+1)*(wOut+1); gradOutputData += gradOutputStrideChannel; } gradInputData[x*w + y] = outValue; } } void updateGradInputReplicatePlanewiseCuda( const float *gradOutputIntData, float * const gradInputData, const int h, const int w, const int nWindows, const float * const xMin, const float * const xMax, const float * const yMin, const float * const yMax, const int strideH, const int strideW) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS); dim3 dimGrid( (h + dimBlock.x - 1) / dimBlock.x, (w + dimBlock.y - 1) / dimBlock.y); updateGradInputReplicatePlanewiseKernel <<<dimGrid, dimBlock>>> ( gradOutputIntData, gradInputData, h, w, nWindows, xMin, xMax, yMin, yMax, strideH, strideW); } void updateGradInputReplicatePlanewiseFracCuda( const float *gradOutputIntData, float * const gradInputData, const int h, const int w, const int nWindows, const float *xMin, const float *xMax, const float *yMin, float *yMax, const float *gradOutputData, const int gradOutputStrideRow, const int gradOutputStrideChannel, const int strideH, const int strideW) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS); dim3 dimGrid( (h + dimBlock.x - 1) / dimBlock.x, (w + dimBlock.y - 1) / dimBlock.y); updateGradInputReplicatePlanewiseFracKernel <<<dimGrid, dimBlock>>> ( gradOutputIntData, gradInputData, h, w, nWindows, xMin, xMax, yMin, yMax, gradOutputData, gradOutputStrideRow, gradOutputStrideChannel, strideH, strideW); } /************************ accGradParameters ************************/ __global__ void xMaxDeltaIntegralReplicateFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMax, const float *yMin, const float *yMax, const float *inData, const int inDataStrideRow, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; // const int xMinInt = (int)ceil(xMin[windowIdx]-1); // const float xMinFrac = xMinInt-xMin[windowIdx]+1; const int yMinInt = (int)ceil(yMin[windowIdx]-1); const float yMinFrac = yMinInt-yMin[windowIdx]+1; const int xMaxInt = (int)floor(xMax[windowIdx]); // const float xMaxFrac = xMax[windowIdx]-xMaxInt; const int yMaxInt = (int)floor(yMax[windowIdx]); const float yMaxFrac = yMax[windowIdx]-yMaxInt; // const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; // const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += brCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac); delta += blCorner * (y+yMinInt >= w ? 1.0f : yMinFrac); delta += intData[max(0,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMaxInt >= 1 and x+xMaxInt < h); tmpArray[xOut*wOut + yOut] = delta; } } __global__ void xMinDeltaIntegralReplicateFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax, const float *inData, const int inDataStrideRow, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; const int xMinInt = (int)ceil(xMin[windowIdx]-1); // const float xMinFrac = xMinInt-xMin[windowIdx]+1; const int yMinInt = (int)ceil(yMin[windowIdx]-1); const float yMinFrac = yMinInt-yMin[windowIdx]+1; // const int xMaxInt = (int)floor(xMax[windowIdx]); // const float xMaxFrac = xMax[windowIdx]-xMaxInt; const int yMaxInt = (int)floor(yMax[windowIdx]); const float yMaxFrac = yMax[windowIdx]-yMaxInt; const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; // const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; // const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += trCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac); delta += tlCorner * (y+yMinInt >= w ? 1.0f : yMinFrac); delta += intData[max(0,min(x+xMinInt , h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt-1, h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt , h))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMinInt-1, h))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMinInt >= 1 and x+xMinInt < h); tmpArray[xOut*wOut + yOut] *= -delta; } } __global__ void yMaxDeltaIntegralReplicateFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMax, const float *inData, const int inDataStrideRow, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; const int xMinInt = (int)ceil(xMin[windowIdx]-1); const float xMinFrac = xMinInt-xMin[windowIdx]+1; // const int yMinInt = (int)ceil(yMin[windowIdx]-1); // const float yMinFrac = yMinInt-yMin[windowIdx]+1; const int xMaxInt = (int)floor(xMax[windowIdx]); const float xMaxFrac = xMax[windowIdx]-xMaxInt; const int yMaxInt = (int)floor(yMax[windowIdx]); // const float yMaxFrac = yMax[windowIdx]-yMaxInt; // const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; // const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += trCorner * (x+xMinInt >= h ? 1.0f : xMinFrac); delta += brCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac); delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMaxInt+1, w))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMaxInt+1, w))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta *= (y+yMaxInt >= 1 and y+yMaxInt < w); tmpArray[xOut*wOut + yOut] = delta; } } __global__ void yMinDeltaIntegralReplicateFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin, const float *inData, const int inDataStrideRow, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; const int xMinInt = (int)ceil(xMin[windowIdx]-1); const float xMinFrac = xMinInt-xMin[windowIdx]+1; const int yMinInt = (int)ceil(yMin[windowIdx]-1); // const float yMinFrac = yMinInt-yMin[windowIdx]+1; const int xMaxInt = (int)floor(xMax[windowIdx]); const float xMaxFrac = xMax[windowIdx]-xMaxInt; // const int yMaxInt = (int)floor(yMax[windowIdx]); // const float yMaxFrac = yMax[windowIdx]-yMaxInt; const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; // const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; // const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += tlCorner * (x+xMinInt >= h ? 1.0f : xMinFrac); delta += blCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac); delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt , w))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt-1, w))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt , w))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt-1, w))]; delta *= (y+yMinInt >= 1 and y+yMinInt < w); tmpArray[xOut*wOut + yOut] *= -delta; } } void backwardReplicateFracCuda( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float * const xMin, const float * const xMax, const float * const yMin, const float * const yMax, const float *inData, const int inDataStrideRow, const int strideH, const int strideW) { const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((nWindows * hOut * wOut + dimBlock.x - 1) / dimBlock.x); xMaxDeltaIntegralReplicateFracKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 0*nWindows*hOut*wOut, nWindows, h, w, xMax, yMin, yMax, inData, inDataStrideRow, strideH, strideW); xMinDeltaIntegralReplicateFracKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 1*nWindows*hOut*wOut, nWindows, h, w, xMin, yMin, yMax, inData, inDataStrideRow, strideH, strideW); yMaxDeltaIntegralReplicateFracKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 2*nWindows*hOut*wOut, nWindows, h, w, xMin, xMax, yMax, inData, inDataStrideRow, strideH, strideW); yMinDeltaIntegralReplicateFracKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 3*nWindows*hOut*wOut, nWindows, h, w, xMin, xMax, yMin, inData, inDataStrideRow, strideH, strideW); } __global__ void xMaxDeltaIntegralReplicateKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMax, const float *yMin, const float *yMax, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; // const int xMinInt = (int)ceil(xMin[windowIdx]-1); const int yMinInt = (int)ceil(yMin[windowIdx]-1); const int xMaxInt = (int)floor(xMax[windowIdx]); const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(1,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(1,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMaxInt >= 1 and x+xMaxInt < h); tmpArray[xOut*wOut + yOut] = delta; } } __global__ void xMinDeltaIntegralReplicateKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; const int xMinInt = (int)ceil(xMin[windowIdx]-1); const int yMinInt = (int)ceil(yMin[windowIdx]-1); // const int xMaxInt = (int)floor(xMax[windowIdx]); const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(0,min(x+xMinInt , h-1))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt-1, h ))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt , h-1))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMinInt-1, h ))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMinInt >= 1 and x+xMinInt < h); tmpArray[xOut*wOut + yOut] *= -delta; } } __global__ void yMaxDeltaIntegralReplicateKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMax, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; const int xMinInt = (int)ceil(xMin[windowIdx]-1); // const int yMinInt = (int)ceil(yMin[windowIdx]-1); const int xMaxInt = (int)floor(xMax[windowIdx]); const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(1,min(y+yMaxInt+1, w))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(1,min(y+yMaxInt+1, w))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta *= (y+yMaxInt >= 1 and y+yMaxInt < w); tmpArray[xOut*wOut + yOut] = delta; } } __global__ void yMinDeltaIntegralReplicateKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin, const int strideH, const int strideW) { // TODO: use block dim instead const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int yOut = id % wOut; id /= wOut; // 0-indexed const int xOut = id % hOut; id /= hOut; // 0-indexed const int & windowIdx = id; if (windowIdx < nWindows and xOut < hOut and yOut < wOut) { const int x = xOut*strideH + 1; const int y = yOut*strideW + 1; tmpArray += windowIdx * hOut * wOut; const int xMinInt = (int)ceil(xMin[windowIdx]-1); const int yMinInt = (int)ceil(yMin[windowIdx]-1); const int xMaxInt = (int)floor(xMax[windowIdx]); // const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt , w ))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt-1, w-1))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt , w ))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt-1, w-1))]; delta *= (y+yMinInt >= 1 and y+yMinInt < w); tmpArray[xOut*wOut + yOut] *= -delta; } } void backwardReplicateCuda( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float * const xMin, const float * const xMax, const float * const yMin, const float * const yMax, const int strideH, const int strideW) { const int hOut = (h + strideH - 1) / strideH; const int wOut = (w + strideW - 1) / strideW; dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((nWindows * hOut * wOut + dimBlock.x - 1) / dimBlock.x); xMaxDeltaIntegralReplicateKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 0*nWindows*hOut*wOut, nWindows, h, w, xMax, yMin, yMax, strideH, strideW); xMinDeltaIntegralReplicateKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 1*nWindows*hOut*wOut, nWindows, h, w, xMin, yMin, yMax, strideH, strideW); yMaxDeltaIntegralReplicateKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 2*nWindows*hOut*wOut, nWindows, h, w, xMin, xMax, yMax, strideH, strideW); yMinDeltaIntegralReplicateKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 3*nWindows*hOut*wOut, nWindows, h, w, xMin, xMax, yMin, strideH, strideW); } } // namespace
d650e8ad446f9a633b58b4cf8171961c02a0a6cb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <hip/hip_runtime.h> #include <rocblas.h> void FPRINTF(FILE*, int N, double, double*); double EVALUATE_ERROR(int, int, double*); __global__ void INITIALIZE(int N, double dx, double* rho_even, double *rho_odd, double* field_even, double* field_odd, double* field_analytic, double* rho_analytic) { int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + idx_y*N; int idx_eo = idx_x/2 + idx_y*(N/2); double x = idx_x*dx; double y = idx_y*dx; field_analytic[idx] = x*(1.-x)*y*(1.-y)*exp(x-y); if (((idx_x%2)+(idx_y%2))%2==0) { if (idx_x!=0&&idx_x!=N-1&&idx_y!=0&&idx_y!=N-1) { field_even[idx_eo] = 0.0; rho_even[idx_eo] = (2.*x*(y-1)*(y-2.*x+x*y+2)*exp(x-y))*dx*dx; // Notice that rho has been times by dx^2!! } else { field_even[idx_eo] = field_analytic[idx]; rho_even[idx_eo] = 0.0; } rho_analytic[idx] = rho_even[idx_eo]; // Notice that rho has been times by dx^2!! } else { if (idx_x!=0&&idx_x!=N-1&&idx_y!=0&&idx_y!=N-1) { field_odd[idx_eo] = 0.0; rho_odd[idx_eo] = (2.*x*(y-1)*(y-2.*x+x*y+2)*exp(x-y))*dx*dx; // Notice that rho has been times by dx^2!! } else { field_odd[idx_eo] = field_analytic[idx]; rho_odd[idx_eo] = 0.0; } rho_analytic[idx] = rho_odd[idx_eo]; // Notice that rho has been times by dx^2!! } } __global__ void EVALUATE_ERROR_BLOCK_EVEN(int N, double* rho, double* field_self, double* field_neighbor, double* error_block) { extern __shared__ double sm[]; int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + N/2*idx_y; int idx_sm = threadIdx.x + blockDim.x*threadIdx.y; if (idx_y!=0&&idx_y!=N-1) { if (idx_y%2==0) { if (idx_x!=0) { int L = idx - 1; int R = idx; int U = idx + N/2; int D = idx - N/2; sm[idx_sm] = pow((field_neighbor[L]+field_neighbor[R]+field_neighbor[U]+field_neighbor[D]-4.*field_self[idx])-rho[idx], 2.); } else sm[idx_sm] = 0.0; } else { if (idx_x!=N/2-1) { int L = idx; int R = idx + 1; int U = idx + N/2; int D = idx - N/2; sm[idx_sm] = pow((field_neighbor[L]+field_neighbor[R]+field_neighbor[U]+field_neighbor[D]-4.*field_self[idx])-rho[idx], 2.); } else sm[idx_sm] = 0.0; } } else sm[idx_sm] = 0.0; __syncthreads(); for (int shift=blockDim.x*blockDim.y/2; shift>0; shift/=2) { if (idx_sm<shift) sm[idx_sm] += sm[idx_sm+shift]; __syncthreads(); } if (idx_sm==0) error_block[blockIdx.x+blockIdx.y*gridDim.x] = sm[0]; // printf("%d\t%.4f\n", blockIdx.x+gridDim.x*blockIdx.y, sm[0]); } __global__ void EVALUATE_ERROR_BLOCK_ODD(int N, double* rho, double* field_self, double* field_neighbor, double* error_block) { extern __shared__ double sm[]; int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + N/2*idx_y; int idx_sm = threadIdx.x + blockDim.x*threadIdx.y; if (idx_y!=0&&idx_y!=N-1) { if (idx_y%2==0) { if (idx_x!=N/2-1) { int L = idx; int R = idx + 1; int U = idx + N/2; int D = idx - N/2; sm[idx_sm] = pow((field_neighbor[L]+field_neighbor[R]+field_neighbor[U]+field_neighbor[D]-4.*field_self[idx])-rho[idx], 2.); } else sm[idx_sm] = 0.0; } else { if (idx_x!=0) { int L = idx - 1; int R = idx; int U = idx + N/2; int D = idx - N/2; sm[idx_sm] = pow((field_neighbor[L]+field_neighbor[R]+field_neighbor[U]+field_neighbor[D]-4.*field_self[idx])-rho[idx], 2.); } else sm[idx_sm] = 0.0; } } else sm[idx_sm] = 0.0; __syncthreads(); for (int shift=blockDim.x*blockDim.y/2; shift>0; shift/=2) { if (idx_sm<shift) sm[idx_sm] += sm[idx_sm+shift]; __syncthreads(); } if (idx_sm==0) error_block[blockIdx.x+blockIdx.y*gridDim.x] = sm[0]; // printf("%d\t%.4f\n", blockIdx.x+gridDim.x*blockIdx.y, sm[0]); } __global__ void LAPLACIAN_EVEN(int N, double dx, double photon_mass, double* p_self, double* p_neighbor, double* A_p) { int idx_x = threadIdx.x + blockDim.x*blockIdx.x; int idx_y = threadIdx.y + blockDim.y*blockIdx.y; int idx = idx_x + N/2*idx_y; if (idx_y!=0&&idx_y!=N-1) { if (idx_y%2==0) { if (idx_x!=0) { int L = idx - 1; int R = idx; int U = idx + N/2; int D = idx - N/2; A_p[idx] = (p_neighbor[L]+p_neighbor[R]+p_neighbor[U]+p_neighbor[D]-(4.-pow(photon_mass*dx,2.))*p_self[idx]); } else A_p[idx] = 0.0; } else { if (idx_x!=N/2-1) { int L = idx; int R = idx + 1; int U = idx + N/2; int D = idx - N/2; A_p[idx] = (p_neighbor[L]+p_neighbor[R]+p_neighbor[U]+p_neighbor[D]-(4.-pow(photon_mass*dx,2.))*p_self[idx]); } else A_p[idx] = 0.0; } } else A_p[idx] = 0.0; } __global__ void LAPLACIAN_ODD(int N, double dx, double photon_mass, double* p_self, double* p_neighbor, double* A_p) { int idx_x = threadIdx.x + blockDim.x*blockIdx.x; int idx_y = threadIdx.y + blockDim.y*blockIdx.y; int idx = idx_x + N/2*idx_y; if (idx_y!=0&&idx_y!=N-1) { if (idx_y%2==0) { if (idx_x!=N/2-1) { int L = idx; int R = idx + 1; int U = idx + N/2; int D = idx - N/2; A_p[idx] = (p_neighbor[L]+p_neighbor[R]+p_neighbor[U]+p_neighbor[D]-(4.-pow(photon_mass*dx,2.))*p_self[idx]); } else A_p[idx] = 0.0; } else { if (idx_x!=0) { int L = idx - 1; int R = idx; int U = idx + N/2; int D = idx - N/2; A_p[idx] = (p_neighbor[L]+p_neighbor[R]+p_neighbor[U]+p_neighbor[D]-(4.-pow(photon_mass*dx,2.))*p_self[idx]); } else A_p[idx] = 0.0; } } else A_p[idx] = 0.0; } __global__ void DAXPY(int N, double c, double* A, double* B) { int idx_x = threadIdx.x + blockDim.x*blockIdx.x; int idx_y = threadIdx.y + blockDim.y*blockIdx.y; int idx = idx_x + N*idx_y; A[idx] = c*A[idx] + B[idx]; } int main(void) { int N, N_block, display_interval, tpb_x, tpb_y, bpg_x, bpg_y, shift, shift_block; float preparation_time, computation_time, total_time; double photon_mass, omega, dx, criteria; double alpha, beta; long iter, iter_max; double *field, *rho, *r, *p, *A_p, *field_analytic, *rho_analytic, *error_block; size_t size_lattice, size_sm; hipEvent_t start, stop; FILE* output_field, *output_rho; printf("Solve the Poission problem using CG by GPU with pre-conditioned.\n\n"); printf("Enter the latttice size (N,N) (N must be divisible by 2)."); scanf("%d", &N); if (N%2!=0) { printf("N is not divisible by 2! Exit!\n"); exit(1); } printf("The lattice size is (%d,%d).\n", N, N); printf("Set the photon mass.\n"); scanf("%lf", &photon_mass); printf("The photon mass is %.4e .\n", photon_mass); printf("Set the value of omega.\n"); scanf("%lf", &omega); printf("The value of omega is %.4e .\n", omega); printf("Set the maximum iteration times.\n"); scanf("%ld", &iter_max); printf("The maximum iteration times is %ld .\n", iter_max); printf("Set the stopping criteria.\n"); scanf("%lf", &criteria); printf("The stopping criteria is %.4e .\n", criteria); printf("Set the display interval during iterations.\n"); scanf("%d", &display_interval); printf("The display interval is set to be %d .\n", display_interval); printf("Set the GPU threads per block (tx,ty). (N/2 must be divisible by tx and N must be divisible by ty)\n"); scanf("%d %d", &tpb_x, &tpb_y); if ((N/2)%tpb_x!=0) { printf("N/2 is not divisible by tx! Exit!\n"); return EXIT_FAILURE; } else if (N%tpb_y!=0) { printf("N is not divisible by ty! Exit!\n"); return EXIT_FAILURE; } else { printf("Threads per block for GPU is (%d,%d) .\n", tpb_x, tpb_y); printf("The block per grid will be set automatically."); bpg_x = (N/2)/tpb_x; bpg_y = N/tpb_y; printf("Blocks per grid for GPU is (%d,%d) .\n", bpg_x, bpg_y); } printf("\n"); printf("Start Preparation...\n"); N_block = 2*bpg_x*bpg_y; shift = (N/2)*N; shift_block = bpg_x*bpg_y; dx = 1./(N-1); size_lattice = N*N*sizeof(double); size_sm = tpb_x*tpb_y*sizeof(double); output_field = fopen("analytical_field_distribution_CG_pre.txt","w"); output_rho = fopen("charge_distribution_CG_pre.txt","w"); hipSetDevice(0); hipEventCreate(&start); hipEventCreate(&stop); dim3 tpb(tpb_x,tpb_y); dim3 bpg(bpg_x,bpg_y); dim3 bpg_init(2*bpg_x,bpg_y); cublasMath_t mode = CUBLAS_TENSOR_OP_MATH; hipblasPointerMode_t mode_pt = HIPBLAS_POINTER_MODE_HOST; hipblasHandle_t handle; hipblasCreate(&handle); cublasSetMathMode(handle, mode); hipblasSetPointerMode(handle, mode_pt); hipEventRecord(start,0); hipMallocManaged(&field, size_lattice); hipMallocManaged(&r, size_lattice); hipMallocManaged(&p, size_lattice); hipMallocManaged(&A_p, size_lattice); hipMallocManaged(&rho, size_lattice); hipMallocManaged(&error_block, N_block*sizeof(double)); hipMallocManaged(&field_analytic, 2*size_lattice); hipMallocManaged(&rho_analytic, 2*size_lattice); hipLaunchKernelGGL(( INITIALIZE), dim3(bpg_init),dim3(tpb), 0, 0, N, dx, rho, rho+shift, field, field+shift, field_analytic, rho_analytic); hipLaunchKernelGGL(( EVALUATE_ERROR_BLOCK_EVEN), dim3(bpg),dim3(tpb),size_sm, 0, N, rho, field, field+shift, error_block); hipLaunchKernelGGL(( EVALUATE_ERROR_BLOCK_ODD), dim3(bpg),dim3(tpb),size_sm, 0, N, rho+shift, field+shift, field, error_block+shift_block); double norm; hipblasDdot(handle, N*N, rho, 1, rho, 1, &norm); norm = sqrt(norm); hipDeviceSynchronize(); hipMemcpy(r, rho, size_lattice, hipMemcpyDeviceToDevice); hipMemcpy(p, rho, size_lattice, hipMemcpyDeviceToDevice); FPRINTF(output_field, N, 1., field_analytic); FPRINTF(output_rho, N, pow(dx,-2.), rho_analytic); hipEventRecord(start,0); printf("Preparation ends.\n"); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&preparation_time, start, stop); printf("Total preparation time is %.4f ms.\n\n", preparation_time); hipEventRecord(start,0); double error = EVALUATE_ERROR(N, N_block, error_block); double temp; printf("Starts computation with error = %.8e...\n", sqrt(error)/norm); iter = 0; while (sqrt(error)/norm>criteria&&iter<iter_max) { hipLaunchKernelGGL(( LAPLACIAN_EVEN), dim3(bpg),dim3(tpb), 0, 0, N, dx, photon_mass, p, p+shift, A_p); hipLaunchKernelGGL(( LAPLACIAN_ODD), dim3(bpg),dim3(tpb), 0, 0, N, dx, photon_mass, p+shift, p, A_p+shift); hipblasDdot(handle, N*N, p, 1, A_p, 1, &temp); alpha = error/temp; temp = -alpha; hipblasDaxpy(handle, N*N, &temp, A_p, 1, r, 1); hipblasDaxpy(handle, N*N, &alpha, p, 1, field, 1); hipblasDdot(handle, N*N, r, 1, r, 1, &temp); beta = temp/error; // printf("%.4f\t%.4f\n", alpha, beta); hipLaunchKernelGGL(( DAXPY), dim3(bpg_init),dim3(tpb), 0, 0, N, beta, p, r); error = temp; iter += 1; if (iter%display_interval==0) printf("Iteration = %ld , error = %.8e .\n", iter, sqrt(error)/norm); } output_field = fopen("simulated_field_distribution_GPU_CG.txt","w"); FPRINTF(output_field, N, 1., field); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&computation_time, start, stop); printf("Computation time is %.4f ms.\n", computation_time); total_time = preparation_time + computation_time; printf("Total iteration is %ld ; total time is %.4f ms.\n", iter, total_time); hipFree(field); hipFree(r); hipFree(p); hipFree(A_p); hipFree(field_analytic); hipFree(rho_analytic); hipFree(rho); hipFree(error_block); hipblasDestroy(handle); fclose(output_field); fclose(output_rho); return EXIT_SUCCESS; } double EVALUATE_ERROR(int N, int N_block, double* error_block) { double error = 0.0; for (int i=0; i<N_block; i++) error += error_block[i]; return error; } void FPRINTF(FILE *output_file, int N, double scale, double *array) { for (int j=0; j<N; j++) { for (int i=0; i<N; i++) fprintf(output_file, "%.8e\t", scale*array[i+j*N]); fprintf(output_file, "\n"); } }
d650e8ad446f9a633b58b4cf8171961c02a0a6cb.cu
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <cuda_runtime.h> #include <cublas_v2.h> void FPRINTF(FILE*, int N, double, double*); double EVALUATE_ERROR(int, int, double*); __global__ void INITIALIZE(int N, double dx, double* rho_even, double *rho_odd, double* field_even, double* field_odd, double* field_analytic, double* rho_analytic) { int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + idx_y*N; int idx_eo = idx_x/2 + idx_y*(N/2); double x = idx_x*dx; double y = idx_y*dx; field_analytic[idx] = x*(1.-x)*y*(1.-y)*exp(x-y); if (((idx_x%2)+(idx_y%2))%2==0) { if (idx_x!=0&&idx_x!=N-1&&idx_y!=0&&idx_y!=N-1) { field_even[idx_eo] = 0.0; rho_even[idx_eo] = (2.*x*(y-1)*(y-2.*x+x*y+2)*exp(x-y))*dx*dx; // Notice that rho has been times by dx^2!! } else { field_even[idx_eo] = field_analytic[idx]; rho_even[idx_eo] = 0.0; } rho_analytic[idx] = rho_even[idx_eo]; // Notice that rho has been times by dx^2!! } else { if (idx_x!=0&&idx_x!=N-1&&idx_y!=0&&idx_y!=N-1) { field_odd[idx_eo] = 0.0; rho_odd[idx_eo] = (2.*x*(y-1)*(y-2.*x+x*y+2)*exp(x-y))*dx*dx; // Notice that rho has been times by dx^2!! } else { field_odd[idx_eo] = field_analytic[idx]; rho_odd[idx_eo] = 0.0; } rho_analytic[idx] = rho_odd[idx_eo]; // Notice that rho has been times by dx^2!! } } __global__ void EVALUATE_ERROR_BLOCK_EVEN(int N, double* rho, double* field_self, double* field_neighbor, double* error_block) { extern __shared__ double sm[]; int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + N/2*idx_y; int idx_sm = threadIdx.x + blockDim.x*threadIdx.y; if (idx_y!=0&&idx_y!=N-1) { if (idx_y%2==0) { if (idx_x!=0) { int L = idx - 1; int R = idx; int U = idx + N/2; int D = idx - N/2; sm[idx_sm] = pow((field_neighbor[L]+field_neighbor[R]+field_neighbor[U]+field_neighbor[D]-4.*field_self[idx])-rho[idx], 2.); } else sm[idx_sm] = 0.0; } else { if (idx_x!=N/2-1) { int L = idx; int R = idx + 1; int U = idx + N/2; int D = idx - N/2; sm[idx_sm] = pow((field_neighbor[L]+field_neighbor[R]+field_neighbor[U]+field_neighbor[D]-4.*field_self[idx])-rho[idx], 2.); } else sm[idx_sm] = 0.0; } } else sm[idx_sm] = 0.0; __syncthreads(); for (int shift=blockDim.x*blockDim.y/2; shift>0; shift/=2) { if (idx_sm<shift) sm[idx_sm] += sm[idx_sm+shift]; __syncthreads(); } if (idx_sm==0) error_block[blockIdx.x+blockIdx.y*gridDim.x] = sm[0]; // printf("%d\t%.4f\n", blockIdx.x+gridDim.x*blockIdx.y, sm[0]); } __global__ void EVALUATE_ERROR_BLOCK_ODD(int N, double* rho, double* field_self, double* field_neighbor, double* error_block) { extern __shared__ double sm[]; int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + N/2*idx_y; int idx_sm = threadIdx.x + blockDim.x*threadIdx.y; if (idx_y!=0&&idx_y!=N-1) { if (idx_y%2==0) { if (idx_x!=N/2-1) { int L = idx; int R = idx + 1; int U = idx + N/2; int D = idx - N/2; sm[idx_sm] = pow((field_neighbor[L]+field_neighbor[R]+field_neighbor[U]+field_neighbor[D]-4.*field_self[idx])-rho[idx], 2.); } else sm[idx_sm] = 0.0; } else { if (idx_x!=0) { int L = idx - 1; int R = idx; int U = idx + N/2; int D = idx - N/2; sm[idx_sm] = pow((field_neighbor[L]+field_neighbor[R]+field_neighbor[U]+field_neighbor[D]-4.*field_self[idx])-rho[idx], 2.); } else sm[idx_sm] = 0.0; } } else sm[idx_sm] = 0.0; __syncthreads(); for (int shift=blockDim.x*blockDim.y/2; shift>0; shift/=2) { if (idx_sm<shift) sm[idx_sm] += sm[idx_sm+shift]; __syncthreads(); } if (idx_sm==0) error_block[blockIdx.x+blockIdx.y*gridDim.x] = sm[0]; // printf("%d\t%.4f\n", blockIdx.x+gridDim.x*blockIdx.y, sm[0]); } __global__ void LAPLACIAN_EVEN(int N, double dx, double photon_mass, double* p_self, double* p_neighbor, double* A_p) { int idx_x = threadIdx.x + blockDim.x*blockIdx.x; int idx_y = threadIdx.y + blockDim.y*blockIdx.y; int idx = idx_x + N/2*idx_y; if (idx_y!=0&&idx_y!=N-1) { if (idx_y%2==0) { if (idx_x!=0) { int L = idx - 1; int R = idx; int U = idx + N/2; int D = idx - N/2; A_p[idx] = (p_neighbor[L]+p_neighbor[R]+p_neighbor[U]+p_neighbor[D]-(4.-pow(photon_mass*dx,2.))*p_self[idx]); } else A_p[idx] = 0.0; } else { if (idx_x!=N/2-1) { int L = idx; int R = idx + 1; int U = idx + N/2; int D = idx - N/2; A_p[idx] = (p_neighbor[L]+p_neighbor[R]+p_neighbor[U]+p_neighbor[D]-(4.-pow(photon_mass*dx,2.))*p_self[idx]); } else A_p[idx] = 0.0; } } else A_p[idx] = 0.0; } __global__ void LAPLACIAN_ODD(int N, double dx, double photon_mass, double* p_self, double* p_neighbor, double* A_p) { int idx_x = threadIdx.x + blockDim.x*blockIdx.x; int idx_y = threadIdx.y + blockDim.y*blockIdx.y; int idx = idx_x + N/2*idx_y; if (idx_y!=0&&idx_y!=N-1) { if (idx_y%2==0) { if (idx_x!=N/2-1) { int L = idx; int R = idx + 1; int U = idx + N/2; int D = idx - N/2; A_p[idx] = (p_neighbor[L]+p_neighbor[R]+p_neighbor[U]+p_neighbor[D]-(4.-pow(photon_mass*dx,2.))*p_self[idx]); } else A_p[idx] = 0.0; } else { if (idx_x!=0) { int L = idx - 1; int R = idx; int U = idx + N/2; int D = idx - N/2; A_p[idx] = (p_neighbor[L]+p_neighbor[R]+p_neighbor[U]+p_neighbor[D]-(4.-pow(photon_mass*dx,2.))*p_self[idx]); } else A_p[idx] = 0.0; } } else A_p[idx] = 0.0; } __global__ void DAXPY(int N, double c, double* A, double* B) { int idx_x = threadIdx.x + blockDim.x*blockIdx.x; int idx_y = threadIdx.y + blockDim.y*blockIdx.y; int idx = idx_x + N*idx_y; A[idx] = c*A[idx] + B[idx]; } int main(void) { int N, N_block, display_interval, tpb_x, tpb_y, bpg_x, bpg_y, shift, shift_block; float preparation_time, computation_time, total_time; double photon_mass, omega, dx, criteria; double alpha, beta; long iter, iter_max; double *field, *rho, *r, *p, *A_p, *field_analytic, *rho_analytic, *error_block; size_t size_lattice, size_sm; cudaEvent_t start, stop; FILE* output_field, *output_rho; printf("Solve the Poission problem using CG by GPU with pre-conditioned.\n\n"); printf("Enter the latttice size (N,N) (N must be divisible by 2)."); scanf("%d", &N); if (N%2!=0) { printf("N is not divisible by 2! Exit!\n"); exit(1); } printf("The lattice size is (%d,%d).\n", N, N); printf("Set the photon mass.\n"); scanf("%lf", &photon_mass); printf("The photon mass is %.4e .\n", photon_mass); printf("Set the value of omega.\n"); scanf("%lf", &omega); printf("The value of omega is %.4e .\n", omega); printf("Set the maximum iteration times.\n"); scanf("%ld", &iter_max); printf("The maximum iteration times is %ld .\n", iter_max); printf("Set the stopping criteria.\n"); scanf("%lf", &criteria); printf("The stopping criteria is %.4e .\n", criteria); printf("Set the display interval during iterations.\n"); scanf("%d", &display_interval); printf("The display interval is set to be %d .\n", display_interval); printf("Set the GPU threads per block (tx,ty). (N/2 must be divisible by tx and N must be divisible by ty)\n"); scanf("%d %d", &tpb_x, &tpb_y); if ((N/2)%tpb_x!=0) { printf("N/2 is not divisible by tx! Exit!\n"); return EXIT_FAILURE; } else if (N%tpb_y!=0) { printf("N is not divisible by ty! Exit!\n"); return EXIT_FAILURE; } else { printf("Threads per block for GPU is (%d,%d) .\n", tpb_x, tpb_y); printf("The block per grid will be set automatically."); bpg_x = (N/2)/tpb_x; bpg_y = N/tpb_y; printf("Blocks per grid for GPU is (%d,%d) .\n", bpg_x, bpg_y); } printf("\n"); printf("Start Preparation...\n"); N_block = 2*bpg_x*bpg_y; shift = (N/2)*N; shift_block = bpg_x*bpg_y; dx = 1./(N-1); size_lattice = N*N*sizeof(double); size_sm = tpb_x*tpb_y*sizeof(double); output_field = fopen("analytical_field_distribution_CG_pre.txt","w"); output_rho = fopen("charge_distribution_CG_pre.txt","w"); cudaSetDevice(0); cudaEventCreate(&start); cudaEventCreate(&stop); dim3 tpb(tpb_x,tpb_y); dim3 bpg(bpg_x,bpg_y); dim3 bpg_init(2*bpg_x,bpg_y); cublasMath_t mode = CUBLAS_TENSOR_OP_MATH; cublasPointerMode_t mode_pt = CUBLAS_POINTER_MODE_HOST; cublasHandle_t handle; cublasCreate(&handle); cublasSetMathMode(handle, mode); cublasSetPointerMode(handle, mode_pt); cudaEventRecord(start,0); cudaMallocManaged(&field, size_lattice); cudaMallocManaged(&r, size_lattice); cudaMallocManaged(&p, size_lattice); cudaMallocManaged(&A_p, size_lattice); cudaMallocManaged(&rho, size_lattice); cudaMallocManaged(&error_block, N_block*sizeof(double)); cudaMallocManaged(&field_analytic, 2*size_lattice); cudaMallocManaged(&rho_analytic, 2*size_lattice); INITIALIZE<<<bpg_init,tpb>>>(N, dx, rho, rho+shift, field, field+shift, field_analytic, rho_analytic); EVALUATE_ERROR_BLOCK_EVEN<<<bpg,tpb,size_sm>>>(N, rho, field, field+shift, error_block); EVALUATE_ERROR_BLOCK_ODD<<<bpg,tpb,size_sm>>>(N, rho+shift, field+shift, field, error_block+shift_block); double norm; cublasDdot(handle, N*N, rho, 1, rho, 1, &norm); norm = sqrt(norm); cudaDeviceSynchronize(); cudaMemcpy(r, rho, size_lattice, cudaMemcpyDeviceToDevice); cudaMemcpy(p, rho, size_lattice, cudaMemcpyDeviceToDevice); FPRINTF(output_field, N, 1., field_analytic); FPRINTF(output_rho, N, pow(dx,-2.), rho_analytic); cudaEventRecord(start,0); printf("Preparation ends.\n"); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&preparation_time, start, stop); printf("Total preparation time is %.4f ms.\n\n", preparation_time); cudaEventRecord(start,0); double error = EVALUATE_ERROR(N, N_block, error_block); double temp; printf("Starts computation with error = %.8e...\n", sqrt(error)/norm); iter = 0; while (sqrt(error)/norm>criteria&&iter<iter_max) { LAPLACIAN_EVEN<<<bpg,tpb>>>(N, dx, photon_mass, p, p+shift, A_p); LAPLACIAN_ODD<<<bpg,tpb>>>(N, dx, photon_mass, p+shift, p, A_p+shift); cublasDdot(handle, N*N, p, 1, A_p, 1, &temp); alpha = error/temp; temp = -alpha; cublasDaxpy(handle, N*N, &temp, A_p, 1, r, 1); cublasDaxpy(handle, N*N, &alpha, p, 1, field, 1); cublasDdot(handle, N*N, r, 1, r, 1, &temp); beta = temp/error; // printf("%.4f\t%.4f\n", alpha, beta); DAXPY<<<bpg_init,tpb>>>(N, beta, p, r); error = temp; iter += 1; if (iter%display_interval==0) printf("Iteration = %ld , error = %.8e .\n", iter, sqrt(error)/norm); } output_field = fopen("simulated_field_distribution_GPU_CG.txt","w"); FPRINTF(output_field, N, 1., field); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&computation_time, start, stop); printf("Computation time is %.4f ms.\n", computation_time); total_time = preparation_time + computation_time; printf("Total iteration is %ld ; total time is %.4f ms.\n", iter, total_time); cudaFree(field); cudaFree(r); cudaFree(p); cudaFree(A_p); cudaFree(field_analytic); cudaFree(rho_analytic); cudaFree(rho); cudaFree(error_block); cublasDestroy(handle); fclose(output_field); fclose(output_rho); return EXIT_SUCCESS; } double EVALUATE_ERROR(int N, int N_block, double* error_block) { double error = 0.0; for (int i=0; i<N_block; i++) error += error_block[i]; return error; } void FPRINTF(FILE *output_file, int N, double scale, double *array) { for (int j=0; j<N; j++) { for (int i=0; i<N; i++) fprintf(output_file, "%.8e\t", scale*array[i+j*N]); fprintf(output_file, "\n"); } }
1d877869b72b36477328e243669bc580bef09741.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include <malloc.h> #include <algorithm> #include "utils.h" __global__ void init_histogram(unsigned int *histogram) { int tid = blockIdx.x * blockDim.x + threadIdx.x; histogram[tid] = 0; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum * -> parallel reduce */ // iterate over block_sizes // parallel min, cuda_calls min_logLum = max_logLum = d_logLuminance[0]; // serial implementation for (size_t i = 1; i < numRows * numCols; i++) { min_logLum = min(min_logLum, d_logLuminance[i]); max_logLum = max(max_logLum, d_logLuminance[i]); } /* 2) subtract them to find the range */ float lumRange = max_logLum - min_logLum; /* //TODO 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins * -> atomic add / alternatives */ unsigned int *histogram = (unsigned int*) calloc(numBins, sizeof(unsigned int)); // serial implementation size_t blockSize = 512; hipLaunchKernelGGL(( init_histogram), dim3((numBins + blockSize - 1)/blockSize), dim3(blockSize), 0, 0, histogram); size_t bin; for (size_t i = 0; i < numCols * numRows; i++) { bin = min(numBins-1, (d_logLuminance[i] - min_logLum) / lumRange * numBins); histogram[bin]++; } /* 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ // serial implementation d_cdf[0] = 0; for (size_t i = 1; i < numCols * numRows; i++) { d_cdf[i] = d_cdf[i-1] + histogram[i-1]; } }
1d877869b72b36477328e243669bc580bef09741.cu
/* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include <malloc.h> #include <algorithm> #include "utils.h" __global__ void init_histogram(unsigned int *histogram) { int tid = blockIdx.x * blockDim.x + threadIdx.x; histogram[tid] = 0; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum * -> parallel reduce */ // iterate over block_sizes // parallel min, cuda_calls min_logLum = max_logLum = d_logLuminance[0]; // serial implementation for (size_t i = 1; i < numRows * numCols; i++) { min_logLum = min(min_logLum, d_logLuminance[i]); max_logLum = max(max_logLum, d_logLuminance[i]); } /* 2) subtract them to find the range */ float lumRange = max_logLum - min_logLum; /* //TODO 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins * -> atomic add / alternatives */ unsigned int *histogram = (unsigned int*) calloc(numBins, sizeof(unsigned int)); // serial implementation size_t blockSize = 512; init_histogram<<<(numBins + blockSize - 1)/blockSize, blockSize>>>(histogram); size_t bin; for (size_t i = 0; i < numCols * numRows; i++) { bin = min(numBins-1, (d_logLuminance[i] - min_logLum) / lumRange * numBins); histogram[bin]++; } /* 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ // serial implementation d_cdf[0] = 0; for (size_t i = 1; i < numCols * numRows; i++) { d_cdf[i] = d_cdf[i-1] + histogram[i-1]; } }
bcb9c29ae284814916b0aabb42df384104dc0f0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <needle.h> #include <stdio.h> #define SDATA( index) CUT_BANK_CHECKER(sdata, index) __device__ __host__ int maximum( int a, int b, int c){ int k; if( a <= b ) k = b; else k = a; if( k <=c ) return(c); else return(k); } __global__ void needle_cuda_shared_1( int* referrence, int* matrix_cuda, int* matrix_cuda_out, int cols, int penalty, int i, int block_width) { int bx = blockIdx.x; int tx = threadIdx.x; int b_index_x = bx; int b_index_y = i - 1 - bx; int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 ); int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols ); int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; __shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1]; __shared__ int ref[BLOCK_SIZE][BLOCK_SIZE]; if (tx == 0) temp[tx][0] = matrix_cuda[index_nw]; for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) ref[ty][tx] = referrence[index + cols * ty]; __syncthreads(); temp[tx + 1][0] = matrix_cuda[index_w + cols * tx]; __syncthreads(); temp[0][tx + 1] = matrix_cuda[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) matrix_cuda[index + ty * cols] = temp[ty+1][tx+1]; } __global__ void needle_cuda_shared_2( int* referrence, int* matrix_cuda, int* matrix_cuda_out, int cols, int penalty, int i, int block_width) { int bx = blockIdx.x; int tx = threadIdx.x; int b_index_x = bx + block_width - i ; int b_index_y = block_width - bx -1; int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 ); int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols ); int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; __shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1]; __shared__ int ref[BLOCK_SIZE][BLOCK_SIZE]; for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) ref[ty][tx] = referrence[index + cols * ty]; __syncthreads(); if (tx == 0) temp[tx][0] = matrix_cuda[index_nw]; temp[tx + 1][0] = matrix_cuda[index_w + cols * tx]; __syncthreads(); temp[0][tx + 1] = matrix_cuda[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) matrix_cuda[index + ty * cols] = temp[ty+1][tx+1]; }
bcb9c29ae284814916b0aabb42df384104dc0f0f.cu
#include <needle.h> #include <stdio.h> #define SDATA( index) CUT_BANK_CHECKER(sdata, index) __device__ __host__ int maximum( int a, int b, int c){ int k; if( a <= b ) k = b; else k = a; if( k <=c ) return(c); else return(k); } __global__ void needle_cuda_shared_1( int* referrence, int* matrix_cuda, int* matrix_cuda_out, int cols, int penalty, int i, int block_width) { int bx = blockIdx.x; int tx = threadIdx.x; int b_index_x = bx; int b_index_y = i - 1 - bx; int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 ); int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols ); int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; __shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1]; __shared__ int ref[BLOCK_SIZE][BLOCK_SIZE]; if (tx == 0) temp[tx][0] = matrix_cuda[index_nw]; for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) ref[ty][tx] = referrence[index + cols * ty]; __syncthreads(); temp[tx + 1][0] = matrix_cuda[index_w + cols * tx]; __syncthreads(); temp[0][tx + 1] = matrix_cuda[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) matrix_cuda[index + ty * cols] = temp[ty+1][tx+1]; } __global__ void needle_cuda_shared_2( int* referrence, int* matrix_cuda, int* matrix_cuda_out, int cols, int penalty, int i, int block_width) { int bx = blockIdx.x; int tx = threadIdx.x; int b_index_x = bx + block_width - i ; int b_index_y = block_width - bx -1; int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 ); int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols ); int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; __shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1]; __shared__ int ref[BLOCK_SIZE][BLOCK_SIZE]; for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) ref[ty][tx] = referrence[index + cols * ty]; __syncthreads(); if (tx == 0) temp[tx][0] = matrix_cuda[index_nw]; temp[tx + 1][0] = matrix_cuda[index_w + cols * tx]; __syncthreads(); temp[0][tx + 1] = matrix_cuda[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) matrix_cuda[index + ty * cols] = temp[ty+1][tx+1]; }
560bf513c83b07cad6e69121d7962d48589d340c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kFloor.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat = NULL; hipMalloc(&mat, XSIZE*YSIZE); float *target = NULL; hipMalloc(&target, XSIZE*YSIZE); unsigned int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kFloor), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,target,len); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kFloor), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,target,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kFloor), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,target,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
560bf513c83b07cad6e69121d7962d48589d340c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kFloor.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat = NULL; cudaMalloc(&mat, XSIZE*YSIZE); float *target = NULL; cudaMalloc(&target, XSIZE*YSIZE); unsigned int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kFloor<<<gridBlock,threadBlock>>>(mat,target,len); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kFloor<<<gridBlock,threadBlock>>>(mat,target,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kFloor<<<gridBlock,threadBlock>>>(mat,target,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
effd21fbb4bafef5303f2befbae454658c2547f7.hip
// !!! This is a file automatically generated by hipify!!! /* coding=utf-8 * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime_api.h> #include <ATen/hip/HIPContext.h> #include <torch/extension.h> #include "scaled_masked_softmax.h" #include "type_shim.h" namespace multihead_attn { namespace fused_softmax { namespace scaled_softmax { torch::Tensor fwd_cuda( torch::Tensor const& input, float scale_factor) { // input is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] const int batches = input.size(0); const int attn_heads = input.size(1); const int query_seq_len = input.size(2); const int key_seq_len = input.size(3); TORCH_INTERNAL_ASSERT(key_seq_len <= 16384); TORCH_INTERNAL_ASSERT(query_seq_len > 1); // Output auto act_options = input.options().requires_grad(false); torch::Tensor softmax_results = torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options); // Softmax Intermediate Result Ptr void* input_ptr = static_cast<void*>(input.data_ptr()); void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr()); DISPATCH_HALF_AND_BFLOAT( input.scalar_type(), "dispatch_scaled_softmax_forward", dispatch_scaled_softmax_forward<scalar_t, scalar_t, float>( reinterpret_cast<scalar_t*>(softmax_results_ptr), reinterpret_cast<const scalar_t*>(input_ptr), scale_factor, query_seq_len, key_seq_len, batches, attn_heads); ); return softmax_results; } torch::Tensor bwd_cuda( torch::Tensor const& output_grads_, torch::Tensor const& softmax_results_, float scale_factor) { auto output_grads = output_grads_.contiguous(); auto softmax_results = softmax_results_.contiguous(); //output grads is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] const int batches = output_grads.size(0); const int attn_heads = output_grads.size(1); const int query_seq_len = output_grads.size(2); const int key_seq_len = output_grads.size(3); void* output_grads_ptr = static_cast<void*>(output_grads.data_ptr()); //Softmax Grad DISPATCH_HALF_AND_BFLOAT( output_grads_.scalar_type(), "dispatch_scaled_masked_softmax_backward", dispatch_scaled_masked_softmax_backward<scalar_t, scalar_t, float>( reinterpret_cast<scalar_t*>(output_grads_ptr), reinterpret_cast<scalar_t*>(output_grads_ptr), reinterpret_cast<scalar_t const*>(softmax_results.data_ptr()), scale_factor, query_seq_len, key_seq_len, batches, attn_heads); ); //backward pass is completely in-place return output_grads; } } } }
effd21fbb4bafef5303f2befbae454658c2547f7.cu
/* coding=utf-8 * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cuda_profiler_api.h> #include <ATen/cuda/CUDAContext.h> #include <torch/extension.h> #include "scaled_masked_softmax.h" #include "type_shim.h" namespace multihead_attn { namespace fused_softmax { namespace scaled_softmax { torch::Tensor fwd_cuda( torch::Tensor const& input, float scale_factor) { // input is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] const int batches = input.size(0); const int attn_heads = input.size(1); const int query_seq_len = input.size(2); const int key_seq_len = input.size(3); TORCH_INTERNAL_ASSERT(key_seq_len <= 16384); TORCH_INTERNAL_ASSERT(query_seq_len > 1); // Output auto act_options = input.options().requires_grad(false); torch::Tensor softmax_results = torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options); // Softmax Intermediate Result Ptr void* input_ptr = static_cast<void*>(input.data_ptr()); void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr()); DISPATCH_HALF_AND_BFLOAT( input.scalar_type(), "dispatch_scaled_softmax_forward", dispatch_scaled_softmax_forward<scalar_t, scalar_t, float>( reinterpret_cast<scalar_t*>(softmax_results_ptr), reinterpret_cast<const scalar_t*>(input_ptr), scale_factor, query_seq_len, key_seq_len, batches, attn_heads); ); return softmax_results; } torch::Tensor bwd_cuda( torch::Tensor const& output_grads_, torch::Tensor const& softmax_results_, float scale_factor) { auto output_grads = output_grads_.contiguous(); auto softmax_results = softmax_results_.contiguous(); //output grads is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] const int batches = output_grads.size(0); const int attn_heads = output_grads.size(1); const int query_seq_len = output_grads.size(2); const int key_seq_len = output_grads.size(3); void* output_grads_ptr = static_cast<void*>(output_grads.data_ptr()); //Softmax Grad DISPATCH_HALF_AND_BFLOAT( output_grads_.scalar_type(), "dispatch_scaled_masked_softmax_backward", dispatch_scaled_masked_softmax_backward<scalar_t, scalar_t, float>( reinterpret_cast<scalar_t*>(output_grads_ptr), reinterpret_cast<scalar_t*>(output_grads_ptr), reinterpret_cast<scalar_t const*>(softmax_results.data_ptr()), scale_factor, query_seq_len, key_seq_len, batches, attn_heads); ); //backward pass is completely in-place return output_grads; } } } }
1f99429b03e9959dde69ed21e11efeb3e7ec4935.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/cudafeatures2d.hpp> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; // Algorithm itself has good performances, but memory allocation is a problem. // I will try to reduce it. namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } __device__ __forceinline__ void SetBit(unsigned char &bitmap, unsigned char pos) { bitmap |= (1 << pos); } __global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, unsigned char *last_pixel) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned P0 = 0x777; unsigned P = 0; if (img[img_index]) { P |= P0; } if (col + 1 < img.cols) { if (img[img_index + 1]) { P |= (P0 << 1); } if (row + 1 < img.rows && img[img_index + img.step + 1]) { P |= (P0 << 5); } } if (row + 1 < img.rows) { if (img[img_index + img.step]) { P |= (P0 << 4); } } if (col == 0) { P &= 0xEEEE; } if (col + 1 >= img.cols) { P &= 0x3333; } else if (col + 2 >= img.cols) { P &= 0x7777; } if (row == 0) { P &= 0xFFF0; } if (row + 1 >= img.rows) { P &= 0xFF; } else if (row + 2 >= img.rows) { P &= 0xFFF; } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors unsigned char conn_bitmask = 0; if (P > 0) { labels[labels_index] = labels_index + 1; if (HasBit(P, 0) && img[img_index - img.step - 1]) { SetBit(conn_bitmask, 0); } if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) { SetBit(conn_bitmask, 1); } if (HasBit(P, 3) && img[img_index + 2 - img.step]) { SetBit(conn_bitmask, 2); } if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 8) && img[img_index + img.step - 1])) { SetBit(conn_bitmask, 3); } if ((HasBit(P, 7) && img[img_index + 2]) || (HasBit(P, 11) && img[img_index + img.step + 2])) { SetBit(conn_bitmask, 4); } if (HasBit(P, 12) && img[img_index + 2 * img.step - 1]) { SetBit(conn_bitmask, 5); } if ((HasBit(P, 13) && img[img_index + 2 * img.step]) || (HasBit(P, 14) && img[img_index + 2 * img.step + 1])) { SetBit(conn_bitmask, 6); } if (HasBit(P, 15) && img[img_index + 2 * img.step + 2]) { SetBit(conn_bitmask, 7); } } else { labels[labels_index] = 0; } // Connection bitmask is stored in the north-east int of every block // If columns are odd, in the last column, it's stored in the south-west of every block instead // If columns are odd and rows are odd, it's stored in *last_pixel if (col + 1 < labels.cols) labels[labels_index + 1] = conn_bitmask; else if (row + 1 < labels.rows) labels[labels_index + (labels.step / labels.elem_size)] = conn_bitmask; else *last_pixel = conn_bitmask; } } __device__ unsigned int MinLabel(unsigned l1, unsigned l2) { if (l1 && l2) return min(l1, l2); else return l1; } __device__ unsigned int FindMinLabel(cuda::PtrStepSzi labels, unsigned char neighbours, unsigned label, unsigned labels_index) { unsigned int min = label; if (HasBit(neighbours, 0)) { min = MinLabel(min, labels.data[labels_index - 2 * (labels.step / labels.elem_size) - 2]); } if (HasBit(neighbours, 1)) { min = MinLabel(min, labels.data[labels_index - 2 * (labels.step / labels.elem_size)]); } if (HasBit(neighbours, 2)) { min = MinLabel(min, labels.data[labels_index - 2 * (labels.step / labels.elem_size) + 2]); } if (HasBit(neighbours, 3)) { min = MinLabel(min, labels.data[labels_index - 2]); } if (HasBit(neighbours, 4)) { min = MinLabel(min, labels.data[labels_index + 2]); } if (HasBit(neighbours, 5)) { min = MinLabel(min, labels.data[labels_index + 2 * (labels.step / labels.elem_size) - 2]); } if (HasBit(neighbours, 6)) { min = MinLabel(min, labels.data[labels_index + 2 * (labels.step / labels.elem_size)]); } if (HasBit(neighbours, 7)) { min = MinLabel(min, labels.data[labels_index + 2 * (labels.step / labels.elem_size) + 2]); } return min; } __global__ void Scan(cuda::PtrStepSzi labels, unsigned char *changes, unsigned char *last_pixel) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned char neighbours; if (col + 1 < labels.cols) neighbours = labels[labels_index + 1]; else if (row + 1 < labels.rows) neighbours = labels[labels_index + (labels.step / labels.elem_size)]; else neighbours = *last_pixel; unsigned label = labels[labels_index]; if (label) { unsigned min_label = FindMinLabel(labels, neighbours, label, labels_index); if (min_label < label) { labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label); *changes = 1; } } } } __global__ void Analyze(cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned label = labels[labels_index]; if (label) { // Performances are the same as the paper variant unsigned index = labels_index; while (label - 1 != index) { index = label - 1; label = labels[index]; } labels[labels_index] = label; } } } __global__ void FinalLabeling(cuda::PtrStepSzi labels, const cuda::PtrStepSzb img) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; unsigned img_index = row * (img.step / img.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned int label = labels[labels_index]; if (img[img_index]) {} // labels[labels_index] = label; else { labels[labels_index] = 0; } if (col + 1 < labels.cols) { if (img[img_index + 1]) labels[labels_index + 1] = label; else { labels[labels_index + 1] = 0; } if (row + 1 < labels.rows) { if (img[img_index + img.step + 1]) labels[labels_index + (labels.step / labels.elem_size) + 1] = label; else { labels[labels_index + (labels.step / labels.elem_size) + 1] = 0; } } } if (row + 1 < labels.rows) { if (img[img_index + img.step]) labels[labels_index + (labels.step / labels.elem_size)] = label; else { labels[labels_index + (labels.step / labels.elem_size)] = 0; } } } } } class BE_LIGHT : public GpuLabeling2D<CONN_8> { private: dim3 grid_size_; dim3 block_size_; char changes_; unsigned char *d_changes_; bool d_changed_alloc_ = false; unsigned char *last_pixel_; public: BE_LIGHT() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); if (d_img_.rows == 1) { if (d_img_.cols == 1) { d_img_.convertTo(d_img_labels_, CV_32SC1); return; } else if (d_img_.cols % 2) { hipMalloc(&d_changes_, sizeof(unsigned char) * 2); d_changed_alloc_ = true; last_pixel_ = d_changes_ + 1; } else { hipMalloc(&d_changes_, sizeof(unsigned char)); d_changed_alloc_ = true; } } else if (d_img_.cols == 1) { if (d_img_.rows % 2) { hipMalloc(&d_changes_, sizeof(unsigned char) * 2); d_changed_alloc_ = true; last_pixel_ = d_changes_ + 1; } else { hipMalloc(&d_changes_, sizeof(unsigned char)); d_changed_alloc_ = true; } } else { d_changes_ = d_img_labels_.data + d_img_labels_.step; last_pixel_ = d_img_labels_.data + d_img_labels_.step + sizeof(unsigned int); } grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_, last_pixel_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); while (true) { changes_ = 0; hipMemcpy(d_changes_, &changes_, sizeof(unsigned char), hipMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_img_labels_, d_changes_, last_pixel_); hipMemcpy(&changes_, d_changes_, sizeof(unsigned char), hipMemcpyDeviceToHost); if (!changes_) break; Analyze << <grid_size_, block_size_ >> > (d_img_labels_); } //Mat1i block_info_final; //d_img_labels_.download(block_info_final); /*if ((img_.rows % 2) && (img_.cols % 2)) LastPixel << <1, 1 >> > (d_img_labels_, last_pixel_); */ FinalLabeling << <grid_size_, block_size_ >> >(d_img_labels_, d_img_); // d_img_labels_.download(img_labels_); if (d_changed_alloc_) hipFree(d_changes_); } private: bool Alloc() { d_img_labels_.create(d_img_.size(), CV_32SC1); if (d_img_.rows == 1) { if (d_img_.cols == 1) { d_img_.convertTo(d_img_labels_, CV_32SC1); return true; } else if (d_img_.cols % 2) { hipMalloc(&d_changes_, sizeof(unsigned char) * 2); last_pixel_ = d_changes_ + 1; } else { hipMalloc(&d_changes_, sizeof(unsigned char)); } } else if (d_img_.cols == 1) { if (d_img_.rows % 2) { hipMalloc(&d_changes_, sizeof(unsigned char) * 2); last_pixel_ = d_changes_ + 1; } else { hipMalloc(&d_changes_, sizeof(unsigned char)); } } else { d_changes_ = d_img_labels_.data + d_img_labels_.step; last_pixel_ = d_img_labels_.data + d_img_labels_.step + sizeof(unsigned int); } return false; } void Dealloc() { if (d_changed_alloc_) hipFree(d_changes_); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { last_pixel_ = d_changes_ + 1; grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_, last_pixel_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); while (true) { changes_ = 0; hipMemcpy(d_changes_, &changes_, sizeof(unsigned char), hipMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_img_labels_, d_changes_, last_pixel_); hipMemcpy(&changes_, d_changes_, sizeof(unsigned char), hipMemcpyDeviceToHost); if (!changes_) break; Analyze << <grid_size_, block_size_ >> > (d_img_labels_); } //Mat1i block_labels; //d_block_labels_.download(block_labels); FinalLabeling << <grid_size_, block_size_ >> >(d_img_labels_, d_img_); hipDeviceSynchronize(); } public: void PerformLabelingWithSteps() { perf_.start(); bool done = Alloc(); perf_.stop(); double alloc_timing = perf_.last(); if (!done) { perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); } perf_.start(); Dealloc(); perf_.stop(); double dealloc_timing = perf_.last(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BE_LIGHT);
1f99429b03e9959dde69ed21e11efeb3e7ec4935.cu
#include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; // Algorithm itself has good performances, but memory allocation is a problem. // I will try to reduce it. namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } __device__ __forceinline__ void SetBit(unsigned char &bitmap, unsigned char pos) { bitmap |= (1 << pos); } __global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, unsigned char *last_pixel) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned P0 = 0x777; unsigned P = 0; if (img[img_index]) { P |= P0; } if (col + 1 < img.cols) { if (img[img_index + 1]) { P |= (P0 << 1); } if (row + 1 < img.rows && img[img_index + img.step + 1]) { P |= (P0 << 5); } } if (row + 1 < img.rows) { if (img[img_index + img.step]) { P |= (P0 << 4); } } if (col == 0) { P &= 0xEEEE; } if (col + 1 >= img.cols) { P &= 0x3333; } else if (col + 2 >= img.cols) { P &= 0x7777; } if (row == 0) { P &= 0xFFF0; } if (row + 1 >= img.rows) { P &= 0xFF; } else if (row + 2 >= img.rows) { P &= 0xFFF; } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors unsigned char conn_bitmask = 0; if (P > 0) { labels[labels_index] = labels_index + 1; if (HasBit(P, 0) && img[img_index - img.step - 1]) { SetBit(conn_bitmask, 0); } if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) { SetBit(conn_bitmask, 1); } if (HasBit(P, 3) && img[img_index + 2 - img.step]) { SetBit(conn_bitmask, 2); } if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 8) && img[img_index + img.step - 1])) { SetBit(conn_bitmask, 3); } if ((HasBit(P, 7) && img[img_index + 2]) || (HasBit(P, 11) && img[img_index + img.step + 2])) { SetBit(conn_bitmask, 4); } if (HasBit(P, 12) && img[img_index + 2 * img.step - 1]) { SetBit(conn_bitmask, 5); } if ((HasBit(P, 13) && img[img_index + 2 * img.step]) || (HasBit(P, 14) && img[img_index + 2 * img.step + 1])) { SetBit(conn_bitmask, 6); } if (HasBit(P, 15) && img[img_index + 2 * img.step + 2]) { SetBit(conn_bitmask, 7); } } else { labels[labels_index] = 0; } // Connection bitmask is stored in the north-east int of every block // If columns are odd, in the last column, it's stored in the south-west of every block instead // If columns are odd and rows are odd, it's stored in *last_pixel if (col + 1 < labels.cols) labels[labels_index + 1] = conn_bitmask; else if (row + 1 < labels.rows) labels[labels_index + (labels.step / labels.elem_size)] = conn_bitmask; else *last_pixel = conn_bitmask; } } __device__ unsigned int MinLabel(unsigned l1, unsigned l2) { if (l1 && l2) return min(l1, l2); else return l1; } __device__ unsigned int FindMinLabel(cuda::PtrStepSzi labels, unsigned char neighbours, unsigned label, unsigned labels_index) { unsigned int min = label; if (HasBit(neighbours, 0)) { min = MinLabel(min, labels.data[labels_index - 2 * (labels.step / labels.elem_size) - 2]); } if (HasBit(neighbours, 1)) { min = MinLabel(min, labels.data[labels_index - 2 * (labels.step / labels.elem_size)]); } if (HasBit(neighbours, 2)) { min = MinLabel(min, labels.data[labels_index - 2 * (labels.step / labels.elem_size) + 2]); } if (HasBit(neighbours, 3)) { min = MinLabel(min, labels.data[labels_index - 2]); } if (HasBit(neighbours, 4)) { min = MinLabel(min, labels.data[labels_index + 2]); } if (HasBit(neighbours, 5)) { min = MinLabel(min, labels.data[labels_index + 2 * (labels.step / labels.elem_size) - 2]); } if (HasBit(neighbours, 6)) { min = MinLabel(min, labels.data[labels_index + 2 * (labels.step / labels.elem_size)]); } if (HasBit(neighbours, 7)) { min = MinLabel(min, labels.data[labels_index + 2 * (labels.step / labels.elem_size) + 2]); } return min; } __global__ void Scan(cuda::PtrStepSzi labels, unsigned char *changes, unsigned char *last_pixel) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned char neighbours; if (col + 1 < labels.cols) neighbours = labels[labels_index + 1]; else if (row + 1 < labels.rows) neighbours = labels[labels_index + (labels.step / labels.elem_size)]; else neighbours = *last_pixel; unsigned label = labels[labels_index]; if (label) { unsigned min_label = FindMinLabel(labels, neighbours, label, labels_index); if (min_label < label) { labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label); *changes = 1; } } } } __global__ void Analyze(cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned label = labels[labels_index]; if (label) { // Performances are the same as the paper variant unsigned index = labels_index; while (label - 1 != index) { index = label - 1; label = labels[index]; } labels[labels_index] = label; } } } __global__ void FinalLabeling(cuda::PtrStepSzi labels, const cuda::PtrStepSzb img) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; unsigned img_index = row * (img.step / img.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned int label = labels[labels_index]; if (img[img_index]) {} // labels[labels_index] = label; else { labels[labels_index] = 0; } if (col + 1 < labels.cols) { if (img[img_index + 1]) labels[labels_index + 1] = label; else { labels[labels_index + 1] = 0; } if (row + 1 < labels.rows) { if (img[img_index + img.step + 1]) labels[labels_index + (labels.step / labels.elem_size) + 1] = label; else { labels[labels_index + (labels.step / labels.elem_size) + 1] = 0; } } } if (row + 1 < labels.rows) { if (img[img_index + img.step]) labels[labels_index + (labels.step / labels.elem_size)] = label; else { labels[labels_index + (labels.step / labels.elem_size)] = 0; } } } } } class BE_LIGHT : public GpuLabeling2D<CONN_8> { private: dim3 grid_size_; dim3 block_size_; char changes_; unsigned char *d_changes_; bool d_changed_alloc_ = false; unsigned char *last_pixel_; public: BE_LIGHT() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); if (d_img_.rows == 1) { if (d_img_.cols == 1) { d_img_.convertTo(d_img_labels_, CV_32SC1); return; } else if (d_img_.cols % 2) { cudaMalloc(&d_changes_, sizeof(unsigned char) * 2); d_changed_alloc_ = true; last_pixel_ = d_changes_ + 1; } else { cudaMalloc(&d_changes_, sizeof(unsigned char)); d_changed_alloc_ = true; } } else if (d_img_.cols == 1) { if (d_img_.rows % 2) { cudaMalloc(&d_changes_, sizeof(unsigned char) * 2); d_changed_alloc_ = true; last_pixel_ = d_changes_ + 1; } else { cudaMalloc(&d_changes_, sizeof(unsigned char)); d_changed_alloc_ = true; } } else { d_changes_ = d_img_labels_.data + d_img_labels_.step; last_pixel_ = d_img_labels_.data + d_img_labels_.step + sizeof(unsigned int); } grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_, last_pixel_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); while (true) { changes_ = 0; cudaMemcpy(d_changes_, &changes_, sizeof(unsigned char), cudaMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_img_labels_, d_changes_, last_pixel_); cudaMemcpy(&changes_, d_changes_, sizeof(unsigned char), cudaMemcpyDeviceToHost); if (!changes_) break; Analyze << <grid_size_, block_size_ >> > (d_img_labels_); } //Mat1i block_info_final; //d_img_labels_.download(block_info_final); /*if ((img_.rows % 2) && (img_.cols % 2)) LastPixel << <1, 1 >> > (d_img_labels_, last_pixel_); */ FinalLabeling << <grid_size_, block_size_ >> >(d_img_labels_, d_img_); // d_img_labels_.download(img_labels_); if (d_changed_alloc_) cudaFree(d_changes_); } private: bool Alloc() { d_img_labels_.create(d_img_.size(), CV_32SC1); if (d_img_.rows == 1) { if (d_img_.cols == 1) { d_img_.convertTo(d_img_labels_, CV_32SC1); return true; } else if (d_img_.cols % 2) { cudaMalloc(&d_changes_, sizeof(unsigned char) * 2); last_pixel_ = d_changes_ + 1; } else { cudaMalloc(&d_changes_, sizeof(unsigned char)); } } else if (d_img_.cols == 1) { if (d_img_.rows % 2) { cudaMalloc(&d_changes_, sizeof(unsigned char) * 2); last_pixel_ = d_changes_ + 1; } else { cudaMalloc(&d_changes_, sizeof(unsigned char)); } } else { d_changes_ = d_img_labels_.data + d_img_labels_.step; last_pixel_ = d_img_labels_.data + d_img_labels_.step + sizeof(unsigned int); } return false; } void Dealloc() { if (d_changed_alloc_) cudaFree(d_changes_); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { last_pixel_ = d_changes_ + 1; grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_, last_pixel_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); while (true) { changes_ = 0; cudaMemcpy(d_changes_, &changes_, sizeof(unsigned char), cudaMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_img_labels_, d_changes_, last_pixel_); cudaMemcpy(&changes_, d_changes_, sizeof(unsigned char), cudaMemcpyDeviceToHost); if (!changes_) break; Analyze << <grid_size_, block_size_ >> > (d_img_labels_); } //Mat1i block_labels; //d_block_labels_.download(block_labels); FinalLabeling << <grid_size_, block_size_ >> >(d_img_labels_, d_img_); cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { perf_.start(); bool done = Alloc(); perf_.stop(); double alloc_timing = perf_.last(); if (!done) { perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); } perf_.start(); Dealloc(); perf_.stop(); double dealloc_timing = perf_.last(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BE_LIGHT);
fec785c0ebe8fea4d854e5362be21ceda29e1455.hip
// !!! This is a file automatically generated by hipify!!! #include "pscan.h" #include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <iostream> #include <hip/hip_runtime.h> #include "cuda_utils.h" static const int THREADS_PER_BLOCK = 128; static const int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * 2; __host__ int nextPowerOfTwo(int x) { int power = 1; while (power < x) { power *= 2; } return power; } __global__ void prescan_small_kernel(int *input, int *output, int n, int pow2) { extern __shared__ int buffer[]; const int threadID = threadIdx.x; if (threadID < n) { buffer[2 * threadID] = input[2 * threadID]; buffer[2 * threadID + 1] = input[2 * threadID + 1]; } else { buffer[2 * threadID] = 0.0; buffer[2 * threadID + 1] = 0.0; } int offset = 1; for (int d = pow2 >> 1; d > 0; d >>= 1) { __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; buffer[bi] += buffer[ai]; } offset *= 2; } if (threadID == 0) { buffer[pow2 - 1] = 0; } for (int d = 1; d < pow2; d *= 2) { offset >>= 1; __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; const int t = buffer[ai]; buffer[ai] = buffer[bi]; buffer[bi] += t; } } __syncthreads(); if (threadID < n) { output[2 * threadID] = buffer[2 * threadID]; output[2 * threadID + 1] = buffer[2 * threadID + 1]; } } __global__ void prescan_large_kernel(int *input, int *output, int n, int *sums) { const int blockID = blockIdx.x; const int threadID = threadIdx.x; const int blockOffset = blockID * n; extern __shared__ int buffer[]; buffer[2 * threadID] = input[blockOffset + (2 * threadID)]; buffer[2 * threadID + 1] = input[blockOffset + (2 * threadID + 1)]; int offset = 1; for (int d = n >> 1; d > 0; d >>= 1) { __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; buffer[bi] += buffer[ai]; } offset *= 2; } __syncthreads(); if (threadID == 0) { sums[blockID] = buffer[n - 1]; buffer[n - 1] = 0; } for (int d = 1; d < n; d *= 2) { offset >>= 1; __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; const int t = buffer[ai]; buffer[ai] = buffer[bi]; buffer[bi] += t; } } __syncthreads(); output[blockOffset + (2 * threadID)] = buffer[2 * threadID]; output[blockOffset + (2 * threadID + 1)] = buffer[2 * threadID + 1]; } __global__ void add(int *output, int length, int *n) { const int blockID = blockIdx.x; const int threadID = threadIdx.x; const int blockOffset = blockID * length; output[blockOffset + threadID] += n[blockID]; } __global__ void add(int *output, int length, int *n1, int *n2) { const int blockID = blockIdx.x; const int threadID = threadIdx.x; const int blockOffset = blockID * length; output[blockOffset + threadID] += n1[blockID] + n2[blockID]; } void prescan_small(int *d_in, int *d_out, int n, int dev_id = 0, hipStream_t stream = 0) { const int pow2 = nextPowerOfTwo(n); hipLaunchKernelGGL(( prescan_small_kernel), dim3(1), dim3((n + 1) / 2), 2 * pow2 * sizeof(int), stream, d_in, d_out, n, pow2); CUDA_CHECK_ERRORS(); } void prescan_large(int *d_in, int *d_out, int n, int dev_id = 0, hipStream_t stream = 0) { const int blocks = (n + ELEMENTS_PER_BLOCK - 1) / ELEMENTS_PER_BLOCK; const int sharedSize = ELEMENTS_PER_BLOCK * sizeof(int); torch::Tensor d_sums = torch::zeros({ blocks }, torch::TensorOptions().dtype(torch::kInt32).device(torch::kCUDA, dev_id)); torch::Tensor d_incr = torch::zeros({ blocks }, torch::TensorOptions().dtype(torch::kInt32).device(torch::kCUDA, dev_id)); hipLaunchKernelGGL(( prescan_large_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 2 * sharedSize, stream, d_in, d_out, ELEMENTS_PER_BLOCK, (int*)d_sums.data_ptr()); const int sumThreadsNeeded = (blocks + 1) / 2; if (sumThreadsNeeded > THREADS_PER_BLOCK) { prescan_large((int*)d_sums.data_ptr(), (int*)d_incr.data_ptr(), blocks, dev_id, stream); } else { prescan_small((int*)d_sums.data_ptr(), (int*)d_incr.data_ptr(), blocks, dev_id, stream); } hipLaunchKernelGGL(( add), dim3(blocks), dim3(ELEMENTS_PER_BLOCK), 0, stream, d_out, ELEMENTS_PER_BLOCK, (int*)d_incr.data_ptr()); CUDA_CHECK_ERRORS(); } void prescan(int *d_in, int *d_out, int size, int dev_id, hipStream_t stream) { const size_t residue = size % ELEMENTS_PER_BLOCK; if (size < ELEMENTS_PER_BLOCK) { prescan_small(d_in, d_out, size, dev_id, stream); } else if (residue == 0) { prescan_large(d_in, d_out, size, dev_id, stream); } else { const size_t tail = size - residue; prescan_large(d_in, d_out, tail, dev_id, stream); prescan_small(&d_in[tail], &d_out[tail], residue, dev_id, stream); hipLaunchKernelGGL(( add), dim3(1), dim3(residue), 0, stream, &d_out[tail], residue, &d_in[tail - 1], &d_out[tail - 1]); } CUDA_CHECK_ERRORS(); }
fec785c0ebe8fea4d854e5362be21ceda29e1455.cu
#include "pscan.h" #include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <iostream> #include <cuda_runtime.h> #include "cuda_utils.h" static const int THREADS_PER_BLOCK = 128; static const int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * 2; __host__ int nextPowerOfTwo(int x) { int power = 1; while (power < x) { power *= 2; } return power; } __global__ void prescan_small_kernel(int *input, int *output, int n, int pow2) { extern __shared__ int buffer[]; const int threadID = threadIdx.x; if (threadID < n) { buffer[2 * threadID] = input[2 * threadID]; buffer[2 * threadID + 1] = input[2 * threadID + 1]; } else { buffer[2 * threadID] = 0.0; buffer[2 * threadID + 1] = 0.0; } int offset = 1; for (int d = pow2 >> 1; d > 0; d >>= 1) { __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; buffer[bi] += buffer[ai]; } offset *= 2; } if (threadID == 0) { buffer[pow2 - 1] = 0; } for (int d = 1; d < pow2; d *= 2) { offset >>= 1; __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; const int t = buffer[ai]; buffer[ai] = buffer[bi]; buffer[bi] += t; } } __syncthreads(); if (threadID < n) { output[2 * threadID] = buffer[2 * threadID]; output[2 * threadID + 1] = buffer[2 * threadID + 1]; } } __global__ void prescan_large_kernel(int *input, int *output, int n, int *sums) { const int blockID = blockIdx.x; const int threadID = threadIdx.x; const int blockOffset = blockID * n; extern __shared__ int buffer[]; buffer[2 * threadID] = input[blockOffset + (2 * threadID)]; buffer[2 * threadID + 1] = input[blockOffset + (2 * threadID + 1)]; int offset = 1; for (int d = n >> 1; d > 0; d >>= 1) { __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; buffer[bi] += buffer[ai]; } offset *= 2; } __syncthreads(); if (threadID == 0) { sums[blockID] = buffer[n - 1]; buffer[n - 1] = 0; } for (int d = 1; d < n; d *= 2) { offset >>= 1; __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; const int t = buffer[ai]; buffer[ai] = buffer[bi]; buffer[bi] += t; } } __syncthreads(); output[blockOffset + (2 * threadID)] = buffer[2 * threadID]; output[blockOffset + (2 * threadID + 1)] = buffer[2 * threadID + 1]; } __global__ void add(int *output, int length, int *n) { const int blockID = blockIdx.x; const int threadID = threadIdx.x; const int blockOffset = blockID * length; output[blockOffset + threadID] += n[blockID]; } __global__ void add(int *output, int length, int *n1, int *n2) { const int blockID = blockIdx.x; const int threadID = threadIdx.x; const int blockOffset = blockID * length; output[blockOffset + threadID] += n1[blockID] + n2[blockID]; } void prescan_small(int *d_in, int *d_out, int n, int dev_id = 0, cudaStream_t stream = 0) { const int pow2 = nextPowerOfTwo(n); prescan_small_kernel<<<1, (n + 1) / 2, 2 * pow2 * sizeof(int), stream>>>(d_in, d_out, n, pow2); CUDA_CHECK_ERRORS(); } void prescan_large(int *d_in, int *d_out, int n, int dev_id = 0, cudaStream_t stream = 0) { const int blocks = (n + ELEMENTS_PER_BLOCK - 1) / ELEMENTS_PER_BLOCK; const int sharedSize = ELEMENTS_PER_BLOCK * sizeof(int); torch::Tensor d_sums = torch::zeros({ blocks }, torch::TensorOptions().dtype(torch::kInt32).device(torch::kCUDA, dev_id)); torch::Tensor d_incr = torch::zeros({ blocks }, torch::TensorOptions().dtype(torch::kInt32).device(torch::kCUDA, dev_id)); prescan_large_kernel<<<blocks, THREADS_PER_BLOCK, 2 * sharedSize, stream>>>( d_in, d_out, ELEMENTS_PER_BLOCK, (int*)d_sums.data_ptr()); const int sumThreadsNeeded = (blocks + 1) / 2; if (sumThreadsNeeded > THREADS_PER_BLOCK) { prescan_large((int*)d_sums.data_ptr(), (int*)d_incr.data_ptr(), blocks, dev_id, stream); } else { prescan_small((int*)d_sums.data_ptr(), (int*)d_incr.data_ptr(), blocks, dev_id, stream); } add<<<blocks, ELEMENTS_PER_BLOCK, 0, stream>>>(d_out, ELEMENTS_PER_BLOCK, (int*)d_incr.data_ptr()); CUDA_CHECK_ERRORS(); } void prescan(int *d_in, int *d_out, int size, int dev_id, cudaStream_t stream) { const size_t residue = size % ELEMENTS_PER_BLOCK; if (size < ELEMENTS_PER_BLOCK) { prescan_small(d_in, d_out, size, dev_id, stream); } else if (residue == 0) { prescan_large(d_in, d_out, size, dev_id, stream); } else { const size_t tail = size - residue; prescan_large(d_in, d_out, tail, dev_id, stream); prescan_small(&d_in[tail], &d_out[tail], residue, dev_id, stream); add<<<1, residue, 0, stream>>>(&d_out[tail], residue, &d_in[tail - 1], &d_out[tail - 1]); } CUDA_CHECK_ERRORS(); }
f77008956679638b45d17d0026e042d4a299491a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <limits.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #define true 1 #define false 0 __device__ int min_distance(int dist[], int spt_set[], int n) { int min = INT_MAX, min_index; for (int v = 0; v < n; v++) { if (spt_set[v] == false && dist[v] <= min){ min = dist[v], min_index = v; } } return min_index; } __device__ void dijkstra(int* graph, int* res, int src, int n) { // inisiasi ukuran matriks maksimal yang dibutuhkan sesuai testcase int dist[3000]; int spt_set[3000]; //init distance and spt_set for (int i = 0; i < n; i++) { dist[i] = INT_MAX; spt_set[i] = false; } // init distance dengan 0 semua dist[src] = 0; for (int count = 0; count < n - 1; count++) { int u = min_distance(dist, spt_set, n); spt_set[u] = true; for (int v = 0; v < n; v++) { if (!spt_set[v] && graph[u*n+v] && dist[u] != INT_MAX && dist[u] + graph[u*n+v] < dist[v]) { dist[v] = dist[u] + graph[u*n+v]; } } } for (int i = 0; i < n; i++) { res[src*n + i] = dist[i]; } } // random matriks dengan nim __host__ void random_matriks(int* host_matrix, int num_nodes) { srand(13517074); // init distance for (int i = 0; i < num_nodes; i++) { for (int j = i; j < num_nodes; j++) { if (i == j) { host_matrix[i*num_nodes + j] = 0; } else { host_matrix[i*num_nodes + j] = rand() % 100; host_matrix[j*num_nodes + i] = host_matrix[i*num_nodes + j]; } } } } __host__ void print_matriks(int* host_matrix, int num_nodes) { for (int i = 0; i < num_nodes; i++) { for (int j = 0; j < num_nodes; j++) { printf("%d\t", host_matrix[i*num_nodes + j]); } printf("\n"); } } __global__ void solution (int* graph, int* result, int nodes_count) { // init source for check matriks /* gridDim: variabel yang berisi dimensi dari grid. * blockIdx: variabel yang berisi index block di mana thread ini berada. * blockDim: variabel yang berisi dimensi dari block. * threadIdx: variabel yang berisi index thread di dalam block. (untuk membedakan thread yang berada di block yang berbeda, gunakan blockIdx).*/ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < nodes_count) { dijkstra(graph, result, i, nodes_count); } } int main(int argc, char *argv[]) { int nodes_count = strtol(argv[2], NULL, 10); int num_thread = atoi(argv[1]); size_t size = nodes_count*nodes_count*sizeof(int); //cuda variabel for calculate time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // built in vaaribels int threads_per_block = num_thread; int blocks_in_grid = (nodes_count / threads_per_block) + 1; // Allocate memory on host int *host_matrix, *host_result_matrix; host_result_matrix = (int*)malloc(size); host_matrix = (int*)malloc(size); // Allocate memory on device int *device_matrix, *device_result_matrix; // check when the matrix allocation with the given size, it should not give an error hipError_t err = hipMalloc(&device_matrix, size); if(err != hipSuccess) { printf("Error Device Matrix: %s\n", hipGetErrorString(err)); } err = hipMalloc(&device_result_matrix, size); if(err != hipSuccess) { printf("Error Device Result Matrix: %s\n", hipGetErrorString(err)); } // Random matrix random_matriks(host_matrix, nodes_count); // Copy data from host to device hipMemcpy(device_matrix, host_matrix, size, hipMemcpyHostToDevice); // start calucate and time hipEventRecord(start); // run solution to find dijkstra hipLaunchKernelGGL(( solution), dim3(blocks_in_grid), dim3(threads_per_block) , 0, 0, device_matrix, device_result_matrix, nodes_count); hipError_t errAsync = hipDeviceSynchronize(); if(errAsync != hipSuccess) { printf("Error Async: %s\n", hipGetErrorString(errAsync)); } // Copy data from device to host hipMemcpy(host_result_matrix, device_result_matrix, size, hipMemcpyDeviceToHost); // stop hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); // hasil matriks dan waktu kalkuklasi nya printf("\n"); printf("~=== Result Matrix ===~\n"); print_matriks(host_result_matrix, nodes_count); printf("\ntime execution: %f microsecond(s)\n", milliseconds*1000); // free host memory free(host_matrix); free(host_result_matrix); // free device memory hipFree(device_matrix); hipFree(device_result_matrix); return 0; }
f77008956679638b45d17d0026e042d4a299491a.cu
#include <limits.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #define true 1 #define false 0 __device__ int min_distance(int dist[], int spt_set[], int n) { int min = INT_MAX, min_index; for (int v = 0; v < n; v++) { if (spt_set[v] == false && dist[v] <= min){ min = dist[v], min_index = v; } } return min_index; } __device__ void dijkstra(int* graph, int* res, int src, int n) { // inisiasi ukuran matriks maksimal yang dibutuhkan sesuai testcase int dist[3000]; int spt_set[3000]; //init distance and spt_set for (int i = 0; i < n; i++) { dist[i] = INT_MAX; spt_set[i] = false; } // init distance dengan 0 semua dist[src] = 0; for (int count = 0; count < n - 1; count++) { int u = min_distance(dist, spt_set, n); spt_set[u] = true; for (int v = 0; v < n; v++) { if (!spt_set[v] && graph[u*n+v] && dist[u] != INT_MAX && dist[u] + graph[u*n+v] < dist[v]) { dist[v] = dist[u] + graph[u*n+v]; } } } for (int i = 0; i < n; i++) { res[src*n + i] = dist[i]; } } // random matriks dengan nim __host__ void random_matriks(int* host_matrix, int num_nodes) { srand(13517074); // init distance for (int i = 0; i < num_nodes; i++) { for (int j = i; j < num_nodes; j++) { if (i == j) { host_matrix[i*num_nodes + j] = 0; } else { host_matrix[i*num_nodes + j] = rand() % 100; host_matrix[j*num_nodes + i] = host_matrix[i*num_nodes + j]; } } } } __host__ void print_matriks(int* host_matrix, int num_nodes) { for (int i = 0; i < num_nodes; i++) { for (int j = 0; j < num_nodes; j++) { printf("%d\t", host_matrix[i*num_nodes + j]); } printf("\n"); } } __global__ void solution (int* graph, int* result, int nodes_count) { // init source for check matriks /* gridDim: variabel yang berisi dimensi dari grid. * blockIdx: variabel yang berisi index block di mana thread ini berada. * blockDim: variabel yang berisi dimensi dari block. * threadIdx: variabel yang berisi index thread di dalam block. (untuk membedakan thread yang berada di block yang berbeda, gunakan blockIdx).*/ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < nodes_count) { dijkstra(graph, result, i, nodes_count); } } int main(int argc, char *argv[]) { int nodes_count = strtol(argv[2], NULL, 10); int num_thread = atoi(argv[1]); size_t size = nodes_count*nodes_count*sizeof(int); //cuda variabel for calculate time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // built in vaaribels int threads_per_block = num_thread; int blocks_in_grid = (nodes_count / threads_per_block) + 1; // Allocate memory on host int *host_matrix, *host_result_matrix; host_result_matrix = (int*)malloc(size); host_matrix = (int*)malloc(size); // Allocate memory on device int *device_matrix, *device_result_matrix; // check when the matrix allocation with the given size, it should not give an error cudaError_t err = cudaMalloc(&device_matrix, size); if(err != cudaSuccess) { printf("Error Device Matrix: %s\n", cudaGetErrorString(err)); } err = cudaMalloc(&device_result_matrix, size); if(err != cudaSuccess) { printf("Error Device Result Matrix: %s\n", cudaGetErrorString(err)); } // Random matrix random_matriks(host_matrix, nodes_count); // Copy data from host to device cudaMemcpy(device_matrix, host_matrix, size, cudaMemcpyHostToDevice); // start calucate and time cudaEventRecord(start); // run solution to find dijkstra solution<<< blocks_in_grid, threads_per_block >>>(device_matrix, device_result_matrix, nodes_count); cudaError_t errAsync = cudaDeviceSynchronize(); if(errAsync != cudaSuccess) { printf("Error Async: %s\n", cudaGetErrorString(errAsync)); } // Copy data from device to host cudaMemcpy(host_result_matrix, device_result_matrix, size, cudaMemcpyDeviceToHost); // stop cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); // hasil matriks dan waktu kalkuklasi nya printf("\n"); printf("~=== Result Matrix ===~\n"); print_matriks(host_result_matrix, nodes_count); printf("\ntime execution: %f microsecond(s)\n", milliseconds*1000); // free host memory free(host_matrix); free(host_result_matrix); // free device memory cudaFree(device_matrix); cudaFree(device_result_matrix); return 0; }
1645c1e5ea1222de9652b8e5c14a3dc5195c2f80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include "timer.h" #include "cuda_utils.h" typedef float dtype; #define N_ (8 * 1024 * 1024) #define MAX_THREADS 256 #define MAX_BLOCKS 64 #define MIN(x,y) ((x < y) ? x : y) /* return the next power of 2 number that is larger than x */ unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* find out # of threads and # thread blocks for a particular kernel */ void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { /* 1 thread per element */ threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { /* 1 thread per 2 elements */ threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } /* limit the total number of threads */ if (whichKernel == 5) blocks = MIN(maxBlocks, blocks); } /* special type of reduction to account for floating point error */ dtype reduce_cpu(dtype *data, int n) { dtype sum = data[0]; dtype c = (dtype)0.0; for (int i = 1; i < n; i++) { dtype y = data[i] - c; dtype t = sum + y; c = (t - sum) - y; sum = t; } return sum; } __global__ void kernel3(dtype *g_idata, dtype *g_odata, unsigned int n) { __shared__ dtype scratch[MAX_THREADS]; unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x; unsigned int i = bid * blockDim.x + threadIdx.x; if (i < n/2) { scratch[threadIdx.x] = g_idata[i] + g_idata[i + n/2]; } else { scratch[threadIdx.x] = 0; } __syncthreads (); for(unsigned int s = 1, temp = blockDim.x/2; s < blockDim.x; s = s << 1, temp = temp/2) { if(threadIdx.x < (blockDim.x / (2*s))) { scratch[threadIdx.x] += scratch[threadIdx.x + temp]; } __syncthreads (); } if(threadIdx.x == 0) { g_odata[bid] = scratch[0]; } } int main(int argc, char** argv) { int i; /* data structure */ dtype *h_idata, h_odata, h_cpu; dtype *d_idata, *d_odata; /* timer */ struct stopwatch_t* timer = NULL; long double t_kernel_3, t_cpu; /* which kernel are we running */ int whichKernel; /* number of threads and thread blocks */ int threads, blocks; int N; if(argc > 1) { N = atoi (argv[1]); printf("N: %d\n", N); } else { N = N_; printf("N: %d\n", N); } /* naive kernel */ whichKernel = 3; getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS, blocks, threads); /* initialize timer */ stopwatch_init (); timer = stopwatch_create (); /* allocate memory */ h_idata = (dtype*) malloc (N * sizeof (dtype)); CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype))); CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype))); /* Initialize array */ srand48(time(NULL)); for(i = 0; i < N; i++) { h_idata[i] = drand48() / 100000; } CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype), hipMemcpyHostToDevice)); /* ================================================== */ /* GPU kernel */ dim3 gb(16, ((blocks + 16 - 1) / 16), 1); dim3 tb(threads, 1, 1); /* warm up */ hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N); hipDeviceSynchronize (); stopwatch_start (timer); /* execute kernel */ hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N); int s = blocks; while(s > 1) { threads = 0; blocks = 0; getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads); dim3 gb(16, (blocks + 16 - 1) / 16, 1); dim3 tb(threads, 1, 1); hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s); s = (s + threads * 2 - 1) / (threads * 2); } hipDeviceSynchronize (); t_kernel_3 = stopwatch_stop (timer); fprintf (stdout, "Time to execute first add GPU reduction kernel: %Lg secs\n", t_kernel_3); double bw = (N * sizeof(dtype)) / (t_kernel_3 * 1e9); fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw); /* copy result back from GPU */ CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype), hipMemcpyDeviceToHost)); /* ================================================== */ /* ================================================== */ /* CPU kernel */ stopwatch_start (timer); h_cpu = reduce_cpu (h_idata, N); t_cpu = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n", t_cpu); /* ================================================== */ if(abs (h_odata - h_cpu) > 1e-5) { fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu); } else { printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu); } return 0; }
1645c1e5ea1222de9652b8e5c14a3dc5195c2f80.cu
#include <stdlib.h> #include <stdio.h> #include "timer.h" #include "cuda_utils.h" typedef float dtype; #define N_ (8 * 1024 * 1024) #define MAX_THREADS 256 #define MAX_BLOCKS 64 #define MIN(x,y) ((x < y) ? x : y) /* return the next power of 2 number that is larger than x */ unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* find out # of threads and # thread blocks for a particular kernel */ void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { /* 1 thread per element */ threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { /* 1 thread per 2 elements */ threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } /* limit the total number of threads */ if (whichKernel == 5) blocks = MIN(maxBlocks, blocks); } /* special type of reduction to account for floating point error */ dtype reduce_cpu(dtype *data, int n) { dtype sum = data[0]; dtype c = (dtype)0.0; for (int i = 1; i < n; i++) { dtype y = data[i] - c; dtype t = sum + y; c = (t - sum) - y; sum = t; } return sum; } __global__ void kernel3(dtype *g_idata, dtype *g_odata, unsigned int n) { __shared__ dtype scratch[MAX_THREADS]; unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x; unsigned int i = bid * blockDim.x + threadIdx.x; if (i < n/2) { scratch[threadIdx.x] = g_idata[i] + g_idata[i + n/2]; } else { scratch[threadIdx.x] = 0; } __syncthreads (); for(unsigned int s = 1, temp = blockDim.x/2; s < blockDim.x; s = s << 1, temp = temp/2) { if(threadIdx.x < (blockDim.x / (2*s))) { scratch[threadIdx.x] += scratch[threadIdx.x + temp]; } __syncthreads (); } if(threadIdx.x == 0) { g_odata[bid] = scratch[0]; } } int main(int argc, char** argv) { int i; /* data structure */ dtype *h_idata, h_odata, h_cpu; dtype *d_idata, *d_odata; /* timer */ struct stopwatch_t* timer = NULL; long double t_kernel_3, t_cpu; /* which kernel are we running */ int whichKernel; /* number of threads and thread blocks */ int threads, blocks; int N; if(argc > 1) { N = atoi (argv[1]); printf("N: %d\n", N); } else { N = N_; printf("N: %d\n", N); } /* naive kernel */ whichKernel = 3; getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS, blocks, threads); /* initialize timer */ stopwatch_init (); timer = stopwatch_create (); /* allocate memory */ h_idata = (dtype*) malloc (N * sizeof (dtype)); CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype))); CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype))); /* Initialize array */ srand48(time(NULL)); for(i = 0; i < N; i++) { h_idata[i] = drand48() / 100000; } CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype), cudaMemcpyHostToDevice)); /* ================================================== */ /* GPU kernel */ dim3 gb(16, ((blocks + 16 - 1) / 16), 1); dim3 tb(threads, 1, 1); /* warm up */ kernel3 <<<gb, tb>>> (d_idata, d_odata, N); cudaThreadSynchronize (); stopwatch_start (timer); /* execute kernel */ kernel3 <<<gb, tb>>> (d_idata, d_odata, N); int s = blocks; while(s > 1) { threads = 0; blocks = 0; getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads); dim3 gb(16, (blocks + 16 - 1) / 16, 1); dim3 tb(threads, 1, 1); kernel3 <<<gb, tb>>> (d_odata, d_odata, s); s = (s + threads * 2 - 1) / (threads * 2); } cudaThreadSynchronize (); t_kernel_3 = stopwatch_stop (timer); fprintf (stdout, "Time to execute first add GPU reduction kernel: %Lg secs\n", t_kernel_3); double bw = (N * sizeof(dtype)) / (t_kernel_3 * 1e9); fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw); /* copy result back from GPU */ CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype), cudaMemcpyDeviceToHost)); /* ================================================== */ /* ================================================== */ /* CPU kernel */ stopwatch_start (timer); h_cpu = reduce_cpu (h_idata, N); t_cpu = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n", t_cpu); /* ================================================== */ if(abs (h_odata - h_cpu) > 1e-5) { fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu); } else { printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu); } return 0; }
011b3e8bbe55faaf652286647a582a57746eb55e.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* * GPU */ // System includes #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // helper functions and utilities to work with CUDA #include <helper_cuda.h> #include <helper_functions.h> /* addOne +1 num :1 */ __device__ void addOne(int &num) { num++; } /* gpuTestSiukwanAddOne a_gpu[i] * b_gpu[i]c_gpu[i] a_gpu : b_gpu : c_gpu :x */ __global__ void gpuTestSiukwanAddOne(int *const a_gpu, int *const b_gpu, int *const c_gpu) { // write data to global memory const unsigned int tid = (blockIdx.x*blockDim.x) + threadIdx.x; c_gpu[tid] = a_gpu[tid] * b_gpu[tid]; for (int i = 0; i < 10240; i++) addOne(c_gpu[tid]); } /* gpuTestSiukwan a_gpu[i] * b_gpu[i]c_gpu[i], a_gpu : b_gpu : c_gpu :x */ __global__ void gpuTestSiukwan(int *const a_gpu, int *const b_gpu, int *const c_gpu) { // write data to global memory const unsigned int tid = (blockIdx.x*blockDim.x) + threadIdx.x; c_gpu[tid] = a_gpu[tid] * b_gpu[tid]; for (int i = 0; i < 10240; i++) c_gpu[tid]++; } // extern "C" void runCUDA(int blocks, int threads, int *const a_gpu, int *const b_gpu, int *const c_gpu) { gpuTestSiukwan << < blocks, threads >> >(a_gpu, b_gpu, c_gpu); } // extern "C" void runCUDA_AddOne(int blocks, int threads, int *const a_gpu, int *const b_gpu, int *const c_gpu) { gpuTestSiukwanAddOne << < blocks, threads >> >(a_gpu, b_gpu, c_gpu); }
011b3e8bbe55faaf652286647a582a57746eb55e.cu
//////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* * 关于智力拼图问题的GPU解法 */ // System includes #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // helper functions and utilities to work with CUDA #include <helper_cuda.h> #include <helper_functions.h> /* 函数名 :addOne 函数功能:进行+1操作 输入参数: num :需要加1的数 */ __device__ void addOne(int &num) { num++; } /* 函数名 :gpuTestSiukwanAddOne 函数功能:计算a_gpu[i] * b_gpu[i],并把结果存放到c_gpu[i]中,实现设备端的函数调用 输入参数: a_gpu :被填充的矩阵 b_gpu :用来填充的图形 c_gpu :被填充矩阵的第一个空格位置x */ __global__ void gpuTestSiukwanAddOne(int *const a_gpu, int *const b_gpu, int *const c_gpu) { // write data to global memory const unsigned int tid = (blockIdx.x*blockDim.x) + threadIdx.x; c_gpu[tid] = a_gpu[tid] * b_gpu[tid]; for (int i = 0; i < 10240; i++) addOne(c_gpu[tid]); } /* 函数名 :gpuTestSiukwan 函数功能:计算a_gpu[i] * b_gpu[i],并把结果存放到c_gpu[i]中, 输入参数: a_gpu :被填充的矩阵 b_gpu :用来填充的图形 c_gpu :被填充矩阵的第一个空格位置x */ __global__ void gpuTestSiukwan(int *const a_gpu, int *const b_gpu, int *const c_gpu) { // write data to global memory const unsigned int tid = (blockIdx.x*blockDim.x) + threadIdx.x; c_gpu[tid] = a_gpu[tid] * b_gpu[tid]; for (int i = 0; i < 10240; i++) c_gpu[tid]++; } //接口函数 extern "C" void runCUDA(int blocks, int threads, int *const a_gpu, int *const b_gpu, int *const c_gpu) { gpuTestSiukwan << < blocks, threads >> >(a_gpu, b_gpu, c_gpu); } //接口函数 extern "C" void runCUDA_AddOne(int blocks, int threads, int *const a_gpu, int *const b_gpu, int *const c_gpu) { gpuTestSiukwanAddOne << < blocks, threads >> >(a_gpu, b_gpu, c_gpu); }
7756b5f0d21806806a237cf611bb0dd58367fb3b.hip
// !!! This is a file automatically generated by hipify!!! #include "gpubase.cuh" #include <stdexcept> // runtime_error #include <thrust/sort.h> using namespace thrust; template<class F> auto execute(F f, Policy_t p) { switch (p) { case pol_thrust_par: return f(thrust::hip::par); default: throw std::runtime_error("Policy not supported!"); } } void read(const std::vector<nid_t>& v_host, device_vector<nid_t>& v_dev) { v_dev = v_host; } void write(const device_vector<nid_t>& v_dev, std::vector<nid_t>& v_host) { v_host.resize(v_dev.size()); thrust::copy(v_dev.cbegin(), v_dev.cend(), v_host.begin()); } void sort(Policy_t pol, vec<nid_t>& v) { execute( [&](auto &pol) { return sort(pol, v.begin(), v.end()); }, pol); }
7756b5f0d21806806a237cf611bb0dd58367fb3b.cu
#include "gpubase.cuh" #include <stdexcept> // runtime_error #include <thrust/sort.h> using namespace thrust; template<class F> auto execute(F f, Policy_t p) { switch (p) { case pol_thrust_par: return f(thrust::cuda::par); default: throw std::runtime_error("Policy not supported!"); } } void read(const std::vector<nid_t>& v_host, device_vector<nid_t>& v_dev) { v_dev = v_host; } void write(const device_vector<nid_t>& v_dev, std::vector<nid_t>& v_host) { v_host.resize(v_dev.size()); thrust::copy(v_dev.cbegin(), v_dev.cend(), v_host.begin()); } void sort(Policy_t pol, vec<nid_t>& v) { execute( [&](auto &pol) { return sort(pol, v.begin(), v.end()); }, pol); }
e7ca2cfb7e49b24c307441fa38a964e7aa111f44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #ifndef _BOXFILTER_KERNEL_H_ #define _BOXFILTER_KERNEL_H_ #include <shrUtils.h> #include <cutil_inline.h> #include <cutil_math.h> texture<float, 2> tex; texture<uchar4, 2, hipReadModeNormalizedFloat> rgbaTex; hipArray* d_array, *d_tempArray; /* Perform a fast box filter using the sliding window method. As the kernel moves from left to right, we add in the contribution of the new sample on the right, and subtract the value of the exiting sample on the left. This only requires 2 adds and a mul per output value, independent of the filter radius. The box filter is separable, so to perform a 2D box filter we perform the filter in the x direction, followed by the same filter in the y direction. Applying multiple iterations of the box filter converges towards a Gaussian blur. Using CUDA, rows or columns of the image are processed in parallel. This version duplicates edge pixels. Note that the x (row) pass suffers from uncoalesced global memory reads, since each thread is reading from a different row. For this reason it is better to use texture lookups for the x pass. The y (column) pass is perfectly coalesced. Parameters id - pointer to input data in global memory od - pointer to output data in global memory w - image width h - image height r - filter radius e.g. for r = 2, w = 8: 0 1 2 3 4 5 6 7 x - - - x - - - - x - - - - x - - - - x - - - - x - - - - x - - - x */ // process row __device__ void d_boxfilter_x(float *id, float *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); float t; // do left edge t = id[0] * r; for (int x = 0; x < (r + 1); x++) { t += id[x]; } od[0] = t * scale; for(int x = 1; x < (r + 1); x++) { t += id[x + r]; t -= id[0]; od[x] = t * scale; } // main loop for(int x = (r + 1); x < w - r; x++) { t += id[x + r]; t -= id[x - r - 1]; od[x] = t * scale; } // do right edge for (int x = w - r; x < w; x++) { t += id[w - 1]; t -= id[x - r - 1]; od[x] = t * scale; } } // process column __device__ void d_boxfilter_y(float *id, float *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); float t; // do left edge t = id[0] * r; for (int y = 0; y < (r + 1); y++) { t += id[y * w]; } od[0] = t * scale; for(int y = 1; y < (r + 1); y++) { t += id[(y + r) * w]; t -= id[0]; od[y * w] = t * scale; } // main loop for(int y = (r + 1); y < (h - r); y++) { t += id[(y + r) * w]; t -= id[((y - r) * w) - w]; od[y * w] = t * scale; } // do right edge for (int y = h - r; y < h; y++) { t += id[(h-1) * w]; t -= id[((y - r) * w) - w]; od[y * w] = t * scale; } } __global__ void d_boxfilter_x_global(float *id, float *od, int w, int h, int r) { unsigned int y = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; d_boxfilter_x(&id[y * w], &od[y * w], w, h, r); } __global__ void d_boxfilter_y_global(float *id, float *od, int w, int h, int r) { unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; d_boxfilter_y(&id[x], &od[x], w, h, r); } // texture version // texture fetches automatically clamp to edge of image __global__ void d_boxfilter_x_tex(float *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); unsigned int y = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; float t = 0.0f; for(int x =- r; x <= r; x++) { t += tex2D(tex, x, y); } od[y * w] = t * scale; for(int x = 1; x < w; x++) { t += tex2D(tex, x + r, y); t -= tex2D(tex, x - r - 1, y); od[y * w + x] = t * scale; } } __global__ void d_boxfilter_y_tex(float *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; float t = 0.0f; for(int y = -r; y <= r; y++) { t += tex2D(tex, x, y); } od[x] = t * scale; for(int y = 1; y < h; y++) { t += tex2D(tex, x, y + r); t -= tex2D(tex, x, y - r - 1); od[y * w + x] = t * scale; } } // RGBA version // reads from 32-bit uint array holding 8-bit RGBA // convert floating point rgba color to 32-bit integer __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f); } __device__ float4 rgbaIntToFloat(uint c) { float4 rgba; rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f; rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f; rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f; rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f; return rgba; } // row pass using texture lookups __global__ void d_boxfilter_rgba_x(uint *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); unsigned int y = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; // as long as address is always less than height, we do work if (y < h) { float4 t = make_float4(0.0f); for(int x = -r; x <= r; x++) { t += tex2D(rgbaTex, x, y); } od[y * w] = rgbaFloatToInt(t * scale); for(int x = 1; x < w; x++) { t += tex2D(rgbaTex, x + r, y); t -= tex2D(rgbaTex, x - r - 1, y); od[y * w + x] = rgbaFloatToInt(t * scale); } } } // column pass using coalesced global memory reads __global__ void d_boxfilter_rgba_y(uint *id, uint *od, int w, int h, int r) { unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; id = &id[x]; od = &od[x]; float scale = 1.0f / (float)((r << 1) + 1); float4 t; // do left edge t = rgbaIntToFloat(id[0]) * r; for (int y = 0; y < (r + 1); y++) { t += rgbaIntToFloat(id[y*w]); } od[0] = rgbaFloatToInt(t * scale); for(int y = 1; y < (r + 1); y++) { t += rgbaIntToFloat(id[(y + r) * w]); t -= rgbaIntToFloat(id[0]); od[y * w] = rgbaFloatToInt(t * scale); } // main loop for(int y = (r + 1); y < (h - r); y++) { t += rgbaIntToFloat(id[(y + r) * w]); t -= rgbaIntToFloat(id[((y - r) * w) - w]); od[y * w] = rgbaFloatToInt(t * scale); } // do right edge for (int y = h - r; y < h; y++) { t += rgbaIntToFloat(id[(h - 1) * w]); t -= rgbaIntToFloat(id[((y - r) * w) - w]); od[y * w] = rgbaFloatToInt(t * scale); } } extern "C" void initTexture(int width, int height, void *pImage) { int size = width * height * sizeof(unsigned int); // copy image data to array hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8, 8, 8, 8, hipChannelFormatKindUnsigned); cutilSafeCall( hipMallocArray ( &d_array, &channelDesc, width, height )); cutilSafeCall( hipMemcpyToArray( d_array, 0, 0, pImage, size, hipMemcpyHostToDevice)); cutilSafeCall( hipMallocArray ( &d_tempArray, &channelDesc, width, height )); // set texture parameters tex.addressMode[0] = hipAddressModeClamp; tex.addressMode[1] = hipAddressModeClamp; tex.filterMode = hipFilterModePoint; tex.normalized = true; // Bind the array to the texture cutilSafeCall( hipBindTextureToArray(tex, d_array, channelDesc) ); } extern "C" void freeTextures() { cutilSafeCall(hipFreeArray(d_array)); cutilSafeCall(hipFreeArray(d_tempArray)); } /* Perform 2D box filter on image using CUDA Parameters: d_src - pointer to input image in device memory d_temp - pointer to temporary storage in device memory d_dest - pointer to destination image in device memory width - image width height - image height radius - filter radius iterations - number of iterations */ extern "C" double boxFilter(float *d_src, float *d_temp, float *d_dest, int width, int height, int radius, int iterations, int nthreads) { // var for kernel timing double dKernelTime = 0.0; // sync host and start computation timer cutilSafeCall( hipDeviceSynchronize() ); shrDeltaT(0); cutilSafeCall( hipBindTextureToArray(tex, d_array) ); for(int i=0; i<iterations; i++) { // use texture for horizontal pass hipLaunchKernelGGL(( d_boxfilter_x_tex), dim3(height / nthreads), dim3(nthreads), 0 , 0, d_temp, width, height, radius); hipLaunchKernelGGL(( d_boxfilter_y_global), dim3(width / nthreads), dim3(nthreads), 0 , 0, d_temp, d_dest, width, height, radius); // sync host and stop computation timer cutilSafeCall( hipDeviceSynchronize() ); dKernelTime += shrDeltaT(0); if (iterations > 1) { // copy result back from global memory to array cutilSafeCall( hipMemcpyToArray( d_tempArray, 0, 0, d_dest, width * height * sizeof(float), hipMemcpyDeviceToDevice)); cutilSafeCall( hipBindTextureToArray(tex, d_tempArray) ); } } return (dKernelTime/(double)iterations); } // RGBA version extern "C" double boxFilterRGBA(uint *d_src, uint *d_temp, uint *d_dest, int width, int height, int radius, int iterations, int nthreads) { cutilSafeCall( hipBindTextureToArray(rgbaTex, d_array) ); // var for kernel computation timing double dKernelTime; for(int i=0; i<iterations; i++) { // sync host and start kernel computation timer dKernelTime = 0.0; cutilSafeCall(hipDeviceSynchronize()); shrDeltaT(0); // use texture for horizontal pass hipLaunchKernelGGL(( d_boxfilter_rgba_x), dim3(height / nthreads), dim3(nthreads), 0 , 0, d_temp, width, height, radius); hipLaunchKernelGGL(( d_boxfilter_rgba_y), dim3(width / nthreads), dim3(nthreads), 0 , 0, d_temp, d_dest, width, height, radius); // sync host and stop computation timer cutilSafeCall( hipDeviceSynchronize() ); dKernelTime += shrDeltaT(0); if (iterations > 1) { // copy result back from global memory to array cutilSafeCall( hipMemcpyToArray( d_tempArray, 0, 0, d_dest, width * height * sizeof(float), hipMemcpyDeviceToDevice)); cutilSafeCall( hipBindTextureToArray(rgbaTex, d_tempArray) ); } } return (dKernelTime/(double)iterations); } #endif // #ifndef _BOXFILTER_KERNEL_H_
e7ca2cfb7e49b24c307441fa38a964e7aa111f44.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #ifndef _BOXFILTER_KERNEL_H_ #define _BOXFILTER_KERNEL_H_ #include <shrUtils.h> #include <cutil_inline.h> #include <cutil_math.h> texture<float, 2> tex; texture<uchar4, 2, cudaReadModeNormalizedFloat> rgbaTex; cudaArray* d_array, *d_tempArray; /* Perform a fast box filter using the sliding window method. As the kernel moves from left to right, we add in the contribution of the new sample on the right, and subtract the value of the exiting sample on the left. This only requires 2 adds and a mul per output value, independent of the filter radius. The box filter is separable, so to perform a 2D box filter we perform the filter in the x direction, followed by the same filter in the y direction. Applying multiple iterations of the box filter converges towards a Gaussian blur. Using CUDA, rows or columns of the image are processed in parallel. This version duplicates edge pixels. Note that the x (row) pass suffers from uncoalesced global memory reads, since each thread is reading from a different row. For this reason it is better to use texture lookups for the x pass. The y (column) pass is perfectly coalesced. Parameters id - pointer to input data in global memory od - pointer to output data in global memory w - image width h - image height r - filter radius e.g. for r = 2, w = 8: 0 1 2 3 4 5 6 7 x - - - x - - - - x - - - - x - - - - x - - - - x - - - - x - - - x */ // process row __device__ void d_boxfilter_x(float *id, float *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); float t; // do left edge t = id[0] * r; for (int x = 0; x < (r + 1); x++) { t += id[x]; } od[0] = t * scale; for(int x = 1; x < (r + 1); x++) { t += id[x + r]; t -= id[0]; od[x] = t * scale; } // main loop for(int x = (r + 1); x < w - r; x++) { t += id[x + r]; t -= id[x - r - 1]; od[x] = t * scale; } // do right edge for (int x = w - r; x < w; x++) { t += id[w - 1]; t -= id[x - r - 1]; od[x] = t * scale; } } // process column __device__ void d_boxfilter_y(float *id, float *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); float t; // do left edge t = id[0] * r; for (int y = 0; y < (r + 1); y++) { t += id[y * w]; } od[0] = t * scale; for(int y = 1; y < (r + 1); y++) { t += id[(y + r) * w]; t -= id[0]; od[y * w] = t * scale; } // main loop for(int y = (r + 1); y < (h - r); y++) { t += id[(y + r) * w]; t -= id[((y - r) * w) - w]; od[y * w] = t * scale; } // do right edge for (int y = h - r; y < h; y++) { t += id[(h-1) * w]; t -= id[((y - r) * w) - w]; od[y * w] = t * scale; } } __global__ void d_boxfilter_x_global(float *id, float *od, int w, int h, int r) { unsigned int y = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; d_boxfilter_x(&id[y * w], &od[y * w], w, h, r); } __global__ void d_boxfilter_y_global(float *id, float *od, int w, int h, int r) { unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; d_boxfilter_y(&id[x], &od[x], w, h, r); } // texture version // texture fetches automatically clamp to edge of image __global__ void d_boxfilter_x_tex(float *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); unsigned int y = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; float t = 0.0f; for(int x =- r; x <= r; x++) { t += tex2D(tex, x, y); } od[y * w] = t * scale; for(int x = 1; x < w; x++) { t += tex2D(tex, x + r, y); t -= tex2D(tex, x - r - 1, y); od[y * w + x] = t * scale; } } __global__ void d_boxfilter_y_tex(float *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; float t = 0.0f; for(int y = -r; y <= r; y++) { t += tex2D(tex, x, y); } od[x] = t * scale; for(int y = 1; y < h; y++) { t += tex2D(tex, x, y + r); t -= tex2D(tex, x, y - r - 1); od[y * w + x] = t * scale; } } // RGBA version // reads from 32-bit uint array holding 8-bit RGBA // convert floating point rgba color to 32-bit integer __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f); } __device__ float4 rgbaIntToFloat(uint c) { float4 rgba; rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f; rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f; rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f; rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f; return rgba; } // row pass using texture lookups __global__ void d_boxfilter_rgba_x(uint *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); unsigned int y = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; // as long as address is always less than height, we do work if (y < h) { float4 t = make_float4(0.0f); for(int x = -r; x <= r; x++) { t += tex2D(rgbaTex, x, y); } od[y * w] = rgbaFloatToInt(t * scale); for(int x = 1; x < w; x++) { t += tex2D(rgbaTex, x + r, y); t -= tex2D(rgbaTex, x - r - 1, y); od[y * w + x] = rgbaFloatToInt(t * scale); } } } // column pass using coalesced global memory reads __global__ void d_boxfilter_rgba_y(uint *id, uint *od, int w, int h, int r) { unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; id = &id[x]; od = &od[x]; float scale = 1.0f / (float)((r << 1) + 1); float4 t; // do left edge t = rgbaIntToFloat(id[0]) * r; for (int y = 0; y < (r + 1); y++) { t += rgbaIntToFloat(id[y*w]); } od[0] = rgbaFloatToInt(t * scale); for(int y = 1; y < (r + 1); y++) { t += rgbaIntToFloat(id[(y + r) * w]); t -= rgbaIntToFloat(id[0]); od[y * w] = rgbaFloatToInt(t * scale); } // main loop for(int y = (r + 1); y < (h - r); y++) { t += rgbaIntToFloat(id[(y + r) * w]); t -= rgbaIntToFloat(id[((y - r) * w) - w]); od[y * w] = rgbaFloatToInt(t * scale); } // do right edge for (int y = h - r; y < h; y++) { t += rgbaIntToFloat(id[(h - 1) * w]); t -= rgbaIntToFloat(id[((y - r) * w) - w]); od[y * w] = rgbaFloatToInt(t * scale); } } extern "C" void initTexture(int width, int height, void *pImage) { int size = width * height * sizeof(unsigned int); // copy image data to array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned); cutilSafeCall( cudaMallocArray ( &d_array, &channelDesc, width, height )); cutilSafeCall( cudaMemcpyToArray( d_array, 0, 0, pImage, size, cudaMemcpyHostToDevice)); cutilSafeCall( cudaMallocArray ( &d_tempArray, &channelDesc, width, height )); // set texture parameters tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; tex.filterMode = cudaFilterModePoint; tex.normalized = true; // Bind the array to the texture cutilSafeCall( cudaBindTextureToArray(tex, d_array, channelDesc) ); } extern "C" void freeTextures() { cutilSafeCall(cudaFreeArray(d_array)); cutilSafeCall(cudaFreeArray(d_tempArray)); } /* Perform 2D box filter on image using CUDA Parameters: d_src - pointer to input image in device memory d_temp - pointer to temporary storage in device memory d_dest - pointer to destination image in device memory width - image width height - image height radius - filter radius iterations - number of iterations */ extern "C" double boxFilter(float *d_src, float *d_temp, float *d_dest, int width, int height, int radius, int iterations, int nthreads) { // var for kernel timing double dKernelTime = 0.0; // sync host and start computation timer cutilSafeCall( cudaThreadSynchronize() ); shrDeltaT(0); cutilSafeCall( cudaBindTextureToArray(tex, d_array) ); for(int i=0; i<iterations; i++) { // use texture for horizontal pass d_boxfilter_x_tex<<< height / nthreads, nthreads, 0 >>>( d_temp, width, height, radius); d_boxfilter_y_global<<< width / nthreads, nthreads, 0 >>>( d_temp, d_dest, width, height, radius); // sync host and stop computation timer cutilSafeCall( cudaThreadSynchronize() ); dKernelTime += shrDeltaT(0); if (iterations > 1) { // copy result back from global memory to array cutilSafeCall( cudaMemcpyToArray( d_tempArray, 0, 0, d_dest, width * height * sizeof(float), cudaMemcpyDeviceToDevice)); cutilSafeCall( cudaBindTextureToArray(tex, d_tempArray) ); } } return (dKernelTime/(double)iterations); } // RGBA version extern "C" double boxFilterRGBA(uint *d_src, uint *d_temp, uint *d_dest, int width, int height, int radius, int iterations, int nthreads) { cutilSafeCall( cudaBindTextureToArray(rgbaTex, d_array) ); // var for kernel computation timing double dKernelTime; for(int i=0; i<iterations; i++) { // sync host and start kernel computation timer dKernelTime = 0.0; cutilSafeCall(cudaThreadSynchronize()); shrDeltaT(0); // use texture for horizontal pass d_boxfilter_rgba_x<<< height / nthreads, nthreads, 0 >>>( d_temp, width, height, radius); d_boxfilter_rgba_y<<< width / nthreads, nthreads, 0 >>>( d_temp, d_dest, width, height, radius); // sync host and stop computation timer cutilSafeCall( cudaThreadSynchronize() ); dKernelTime += shrDeltaT(0); if (iterations > 1) { // copy result back from global memory to array cutilSafeCall( cudaMemcpyToArray( d_tempArray, 0, 0, d_dest, width * height * sizeof(float), cudaMemcpyDeviceToDevice)); cutilSafeCall( cudaBindTextureToArray(rgbaTex, d_tempArray) ); } } return (dKernelTime/(double)iterations); } #endif // #ifndef _BOXFILTER_KERNEL_H_
ec6e9f87eb3bd0cb6ef249345fb37c1cc3d0f09b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/kernel.h" #include "common/plugin.h" #include "hip/hip_fp16.h" #include <array> inline __device__ __half minus_fb(const __half& a, const __half& b) { #if __CUDA_ARCH__ >= 530 return a - b; #else return __float2half(__half2float(a) - __half2float(b)); #endif } inline __device__ float minus_fb(const float & a, const float & b) { return a - b; } template <typename T_BBOX, typename T_SCORE, unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void gatherTopDetections_kernel( const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const int* indices, const T_SCORE* scores, const T_BBOX* bboxData, int* keepCount, T_BBOX* topDetections, const T_SCORE score_shift) { if (keepTopK > topK) return; for (int i = blockIdx.x * nthds_per_cta + threadIdx.x; i < numImages * keepTopK; i += gridDim.x * nthds_per_cta) { const int imgId = i / keepTopK; const int detId = i % keepTopK; const int offset = imgId * numClasses * topK; const int index = indices[offset + detId]; const T_SCORE score = scores[offset + detId]; /* * It is also likely that there is "bad bounding boxes" in the keepTopK bounding boxes. * We set the bounding boxes parameters as the parameters shown below. * These data will only show up at the end of keepTopK bounding boxes since the bounding boxes were sorted previously. * It is also not going to affect the count of valid bounding boxes (keepCount). * These data will probably never be used (because we have keepCount). */ if (index == -1) { topDetections[i * 7] = imgId; // image id topDetections[i * 7 + 1] = -1; // label topDetections[i * 7 + 2] = 0; // confidence score // score==0 will not pass the VisualizeBBox check topDetections[i * 7 + 3] = 0; // bbox xmin topDetections[i * 7 + 4] = 0; // bbox ymin topDetections[i * 7 + 5] = 0; // bbox xmax topDetections[i * 7 + 6] = 0; // bbox ymax } else { const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass)); const int bboxId = ((shareLocation ? (index % numPredsPerClass) : index % (numClasses * numPredsPerClass)) + bboxOffset) * 4; topDetections[i * 7] = imgId; // image id topDetections[i * 7 + 1] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label topDetections[i * 7 + 2] = score; // confidence score // subtract 1.0 score shift we added in sortScorePerClass topDetections[i * 7 + 2] = minus_fb(topDetections[i * 7 + 2], score_shift); const T_BBOX xMin = bboxData[bboxId]; const T_BBOX yMin = bboxData[bboxId + 1]; const T_BBOX xMax = bboxData[bboxId + 2]; const T_BBOX yMax = bboxData[bboxId + 3]; // clipped bbox xmin topDetections[i * 7 + 3] = saturate(xMin); // clipped bbox ymin topDetections[i * 7 + 4] = saturate(yMin); // clipped bbox xmax topDetections[i * 7 + 5] = saturate(xMax); // clipped bbox ymax topDetections[i * 7 + 6] = saturate(yMax); // Atomic add to increase the count of valid keepTopK bounding boxes // Without having to do manual sync. atomicAdd(&keepCount[i / keepTopK], 1); } } } template <typename T_BBOX, typename T_SCORE> pluginStatus_t gatherTopDetections_gpu( hipStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const void* indices, const void* scores, const void* bboxData, void* keepCount, void* topDetections, const float score_shift ) { CSC(hipMemsetAsync(keepCount, 0, numImages * sizeof(int), stream), STATUS_FAILURE); const int BS = 32; const int GS = 32; hipLaunchKernelGGL(( gatherTopDetections_kernel<T_BBOX, T_SCORE, BS>), dim3(GS), dim3(BS), 0, stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, (int*) indices, (T_SCORE*) scores, (T_BBOX*) bboxData, (int*) keepCount, (T_BBOX*) topDetections, T_SCORE(score_shift)); CSC(hipGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // gatherTopDetections LAUNCH CONFIG typedef pluginStatus_t (*gtdFunc)(hipStream_t, const bool, const int, const int, const int, const int, const int, const void*, const void*, const void*, void*, void*, const float); struct gtdLaunchConfig { DataType t_bbox; DataType t_score; gtdFunc function; gtdLaunchConfig(DataType t_bbox, DataType t_score) : t_bbox(t_bbox) , t_score(t_score) { } gtdLaunchConfig(DataType t_bbox, DataType t_score, gtdFunc function) : t_bbox(t_bbox) , t_score(t_score) , function(function) { } bool operator==(const gtdLaunchConfig& other) { return t_bbox == other.t_bbox && t_score == other.t_score; } }; using nvinfer1::DataType; static std::array<gtdLaunchConfig, 2> gtdLCOptions = { gtdLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, gatherTopDetections_gpu<float, float>), gtdLaunchConfig(DataType::kHALF, DataType::kHALF, gatherTopDetections_gpu<__half, __half>) }; pluginStatus_t gatherTopDetections( hipStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const DataType DT_BBOX, const DataType DT_SCORE, const void* indices, const void* scores, const void* bboxData, void* keepCount, void* topDetections, const float score_shift) { gtdLaunchConfig lc = gtdLaunchConfig(DT_BBOX, DT_SCORE); for (unsigned i = 0; i < gtdLCOptions.size(); ++i) { if (lc == gtdLCOptions[i]) { DEBUG_PRINTF("gatherTopDetections kernel %d\n", i); return gtdLCOptions[i].function(stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, indices, scores, bboxData, keepCount, topDetections, score_shift); } } return STATUS_BAD_PARAM; }
ec6e9f87eb3bd0cb6ef249345fb37c1cc3d0f09b.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/kernel.h" #include "common/plugin.h" #include "cuda_fp16.h" #include <array> inline __device__ __half minus_fb(const __half& a, const __half& b) { #if __CUDA_ARCH__ >= 530 return a - b; #else return __float2half(__half2float(a) - __half2float(b)); #endif } inline __device__ float minus_fb(const float & a, const float & b) { return a - b; } template <typename T_BBOX, typename T_SCORE, unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void gatherTopDetections_kernel( const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const int* indices, const T_SCORE* scores, const T_BBOX* bboxData, int* keepCount, T_BBOX* topDetections, const T_SCORE score_shift) { if (keepTopK > topK) return; for (int i = blockIdx.x * nthds_per_cta + threadIdx.x; i < numImages * keepTopK; i += gridDim.x * nthds_per_cta) { const int imgId = i / keepTopK; const int detId = i % keepTopK; const int offset = imgId * numClasses * topK; const int index = indices[offset + detId]; const T_SCORE score = scores[offset + detId]; /* * It is also likely that there is "bad bounding boxes" in the keepTopK bounding boxes. * We set the bounding boxes parameters as the parameters shown below. * These data will only show up at the end of keepTopK bounding boxes since the bounding boxes were sorted previously. * It is also not going to affect the count of valid bounding boxes (keepCount). * These data will probably never be used (because we have keepCount). */ if (index == -1) { topDetections[i * 7] = imgId; // image id topDetections[i * 7 + 1] = -1; // label topDetections[i * 7 + 2] = 0; // confidence score // score==0 will not pass the VisualizeBBox check topDetections[i * 7 + 3] = 0; // bbox xmin topDetections[i * 7 + 4] = 0; // bbox ymin topDetections[i * 7 + 5] = 0; // bbox xmax topDetections[i * 7 + 6] = 0; // bbox ymax } else { const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass)); const int bboxId = ((shareLocation ? (index % numPredsPerClass) : index % (numClasses * numPredsPerClass)) + bboxOffset) * 4; topDetections[i * 7] = imgId; // image id topDetections[i * 7 + 1] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label topDetections[i * 7 + 2] = score; // confidence score // subtract 1.0 score shift we added in sortScorePerClass topDetections[i * 7 + 2] = minus_fb(topDetections[i * 7 + 2], score_shift); const T_BBOX xMin = bboxData[bboxId]; const T_BBOX yMin = bboxData[bboxId + 1]; const T_BBOX xMax = bboxData[bboxId + 2]; const T_BBOX yMax = bboxData[bboxId + 3]; // clipped bbox xmin topDetections[i * 7 + 3] = saturate(xMin); // clipped bbox ymin topDetections[i * 7 + 4] = saturate(yMin); // clipped bbox xmax topDetections[i * 7 + 5] = saturate(xMax); // clipped bbox ymax topDetections[i * 7 + 6] = saturate(yMax); // Atomic add to increase the count of valid keepTopK bounding boxes // Without having to do manual sync. atomicAdd(&keepCount[i / keepTopK], 1); } } } template <typename T_BBOX, typename T_SCORE> pluginStatus_t gatherTopDetections_gpu( cudaStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const void* indices, const void* scores, const void* bboxData, void* keepCount, void* topDetections, const float score_shift ) { CSC(cudaMemsetAsync(keepCount, 0, numImages * sizeof(int), stream), STATUS_FAILURE); const int BS = 32; const int GS = 32; gatherTopDetections_kernel<T_BBOX, T_SCORE, BS><<<GS, BS, 0, stream>>>(shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, (int*) indices, (T_SCORE*) scores, (T_BBOX*) bboxData, (int*) keepCount, (T_BBOX*) topDetections, T_SCORE(score_shift)); CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // gatherTopDetections LAUNCH CONFIG typedef pluginStatus_t (*gtdFunc)(cudaStream_t, const bool, const int, const int, const int, const int, const int, const void*, const void*, const void*, void*, void*, const float); struct gtdLaunchConfig { DataType t_bbox; DataType t_score; gtdFunc function; gtdLaunchConfig(DataType t_bbox, DataType t_score) : t_bbox(t_bbox) , t_score(t_score) { } gtdLaunchConfig(DataType t_bbox, DataType t_score, gtdFunc function) : t_bbox(t_bbox) , t_score(t_score) , function(function) { } bool operator==(const gtdLaunchConfig& other) { return t_bbox == other.t_bbox && t_score == other.t_score; } }; using nvinfer1::DataType; static std::array<gtdLaunchConfig, 2> gtdLCOptions = { gtdLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, gatherTopDetections_gpu<float, float>), gtdLaunchConfig(DataType::kHALF, DataType::kHALF, gatherTopDetections_gpu<__half, __half>) }; pluginStatus_t gatherTopDetections( cudaStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const DataType DT_BBOX, const DataType DT_SCORE, const void* indices, const void* scores, const void* bboxData, void* keepCount, void* topDetections, const float score_shift) { gtdLaunchConfig lc = gtdLaunchConfig(DT_BBOX, DT_SCORE); for (unsigned i = 0; i < gtdLCOptions.size(); ++i) { if (lc == gtdLCOptions[i]) { DEBUG_PRINTF("gatherTopDetections kernel %d\n", i); return gtdLCOptions[i].function(stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, indices, scores, bboxData, keepCount, topDetections, score_shift); } } return STATUS_BAD_PARAM; }
0240886bf7d57e44af1a5b3fd02fccf350d945ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/user/kernels/slice_util.h" #include "oneflow/core/common/switch_func.h" namespace oneflow { namespace { template<typename T, int NDIM> __global__ void SliceForwardGpu(const int n, SliceParams params, SliceIndexHelper<NDIM> entire_idx_cvtr, SliceIndexHelper<NDIM> sliced_idx_cvtr, const T* entire, T* sliced) { CUDA_1D_KERNEL_LOOP(i, n) { int64_t offset = SliceOffsetToEntireOffset<NDIM>(i, params, entire_idx_cvtr, sliced_idx_cvtr); sliced[i] = entire[offset]; } } template<typename T, int NDIM> __global__ void SliceBackwardGpu(const int n, SliceParams params, SliceIndexHelper<NDIM> entire_idx_cvtr, SliceIndexHelper<NDIM> sliced_idx_cvtr, T* entire, const T* sliced) { CUDA_1D_KERNEL_LOOP(i, n) { int64_t offset = SliceOffsetToEntireOffset<NDIM>(i, params, entire_idx_cvtr, sliced_idx_cvtr); entire[offset] = sliced[i]; } } template<typename T, int NDIM> void LaunchSliceForward(DeviceCtx* ctx, const SliceParams& params, const T* entire, T* sliced) { CHECK_EQ(params.ndim, NDIM); int64_t elem_cnt = params.elem_cnt(); SliceIndexHelper<NDIM> entire_idx_cvtr(params.dims); SliceIndexHelper<NDIM> sliced_idx_cvtr(params.size); hipLaunchKernelGGL(( SliceForwardGpu<T, NDIM>) , dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), elem_cnt, params, entire_idx_cvtr, sliced_idx_cvtr, entire, sliced); } template<typename T, int NDIM> void LaunchSliceBackward(DeviceCtx* ctx, const SliceParams& params, const T* sliced, T* entire) { CHECK_EQ(params.ndim, NDIM); int64_t elem_cnt = params.elem_cnt(); SliceIndexHelper<NDIM> entire_idx_cvtr(params.dims); SliceIndexHelper<NDIM> sliced_idx_cvtr(params.size); hipLaunchKernelGGL(( SliceBackwardGpu<T, NDIM>) , dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), elem_cnt, params, entire_idx_cvtr, sliced_idx_cvtr, entire, sliced); } template<typename T> struct SliceSwitchUtil final { #define MAKE_SLICE_SWITCH_ENTRY(func_name, N) func_name<T, N> #define DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(func_name) \ DEFINE_STATIC_SWITCH_FUNC(void, func_name, MAKE_SLICE_SWITCH_ENTRY, MAKE_NDIM_CTRV_SEQ(DIM_SEQ)); DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(LaunchSliceForward); DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(LaunchSliceBackward); #undef DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD #undef MAKE_SLICE_SWITCH_ENTRY }; template<typename T> size_t GetPackSize(const SliceParams& params, const T* entire, const T* sliced) { CHECK_GT(params.ndim, 0); const int64_t last_dim = params.ndim - 1; const int64_t mask = (params.dims[last_dim] * sizeof(T)) | (params.start[last_dim] * sizeof(T)) | (params.size[last_dim] * sizeof(T)) | static_cast<int64_t>(reinterpret_cast<uintptr_t>(entire)) | static_cast<int64_t>(reinterpret_cast<uintptr_t>(sliced)); if ((mask & 0xF) == 0) { return 16; } else if ((mask & 0x7) == 0) { return 8; } else if ((mask & 0x3) == 0) { return 4; } else if ((mask & 0x1) == 0) { return 2; } else { return 1; } } template<typename T> void GetPackedParams(const SliceParams& params, const T* entire, const T* sliced, size_t* pack_size, SliceParams* packed_params) { CHECK_GT(params.ndim, 0); const int64_t last_dim = params.ndim - 1; if (params.step[last_dim] == 1) { *pack_size = GetPackSize<T>(params, entire, sliced); CHECK_GE(*pack_size, sizeof(T)); const int64_t elem_per_pack = *pack_size / sizeof(T); *packed_params = params; packed_params->dims[last_dim] /= elem_per_pack; packed_params->start[last_dim] /= elem_per_pack; packed_params->size[last_dim] /= elem_per_pack; } else { *pack_size = sizeof(T); *packed_params = params; } } } // namespace template<typename T> struct SliceKernelUtil<DeviceType::kGPU, T> { static void Forward(DeviceCtx* ctx, const SliceParams& params, const T* entire, T* sliced) { SliceParams fold_slice_params = FoldContiguousFullSliceDimensions(params); size_t pack_size; SliceParams packed_params{}; GetPackedParams<T>(fold_slice_params, entire, sliced, &pack_size, &packed_params); if (pack_size == 1) { SliceSwitchUtil<uint8_t>::SwitchLaunchSliceForward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint8_t*>(entire), reinterpret_cast<uint8_t*>(sliced)); } else if (pack_size == 2) { SliceSwitchUtil<uint16_t>::SwitchLaunchSliceForward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint16_t*>(entire), reinterpret_cast<uint16_t*>(sliced)); } else if (pack_size == 4) { SliceSwitchUtil<uint32_t>::SwitchLaunchSliceForward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint32_t*>(entire), reinterpret_cast<uint32_t*>(sliced)); } else if (pack_size == 8) { SliceSwitchUtil<uint64_t>::SwitchLaunchSliceForward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint64_t*>(entire), reinterpret_cast<uint64_t*>(sliced)); } else if (pack_size == 16) { SliceSwitchUtil<ulonglong2>::SwitchLaunchSliceForward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const ulonglong2*>(entire), reinterpret_cast<ulonglong2*>(sliced)); } else { UNIMPLEMENTED(); } } static void Backward(DeviceCtx* ctx, const SliceParams& params, const T* sliced, T* entire) { SliceParams fold_slice_params = FoldContiguousFullSliceDimensions(params); size_t pack_size; SliceParams packed_params{}; GetPackedParams<T>(fold_slice_params, entire, sliced, &pack_size, &packed_params); if (pack_size == 1) { SliceSwitchUtil<uint8_t>::SwitchLaunchSliceBackward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint8_t*>(sliced), reinterpret_cast<uint8_t*>(entire)); } else if (pack_size == 2) { SliceSwitchUtil<uint16_t>::SwitchLaunchSliceBackward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint16_t*>(sliced), reinterpret_cast<uint16_t*>(entire)); } else if (pack_size == 4) { SliceSwitchUtil<uint32_t>::SwitchLaunchSliceBackward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint32_t*>(sliced), reinterpret_cast<uint32_t*>(entire)); } else if (pack_size == 8) { SliceSwitchUtil<uint64_t>::SwitchLaunchSliceBackward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint64_t*>(sliced), reinterpret_cast<uint64_t*>(entire)); } else if (pack_size == 16) { SliceSwitchUtil<ulonglong2>::SwitchLaunchSliceBackward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const ulonglong2*>(sliced), reinterpret_cast<ulonglong2*>(entire)); } else { UNIMPLEMENTED(); } } }; INSTANTIATE_SLICE_KERNEL_UTIL_WITH_DEVICE(DeviceType::kGPU) INSTANTIATE_SLICE_KERNEL_UTIL(DeviceType::kGPU, float16) } // namespace oneflow
0240886bf7d57e44af1a5b3fd02fccf350d945ca.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/user/kernels/slice_util.h" #include "oneflow/core/common/switch_func.h" namespace oneflow { namespace { template<typename T, int NDIM> __global__ void SliceForwardGpu(const int n, SliceParams params, SliceIndexHelper<NDIM> entire_idx_cvtr, SliceIndexHelper<NDIM> sliced_idx_cvtr, const T* entire, T* sliced) { CUDA_1D_KERNEL_LOOP(i, n) { int64_t offset = SliceOffsetToEntireOffset<NDIM>(i, params, entire_idx_cvtr, sliced_idx_cvtr); sliced[i] = entire[offset]; } } template<typename T, int NDIM> __global__ void SliceBackwardGpu(const int n, SliceParams params, SliceIndexHelper<NDIM> entire_idx_cvtr, SliceIndexHelper<NDIM> sliced_idx_cvtr, T* entire, const T* sliced) { CUDA_1D_KERNEL_LOOP(i, n) { int64_t offset = SliceOffsetToEntireOffset<NDIM>(i, params, entire_idx_cvtr, sliced_idx_cvtr); entire[offset] = sliced[i]; } } template<typename T, int NDIM> void LaunchSliceForward(DeviceCtx* ctx, const SliceParams& params, const T* entire, T* sliced) { CHECK_EQ(params.ndim, NDIM); int64_t elem_cnt = params.elem_cnt(); SliceIndexHelper<NDIM> entire_idx_cvtr(params.dims); SliceIndexHelper<NDIM> sliced_idx_cvtr(params.size); SliceForwardGpu<T, NDIM> <<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( elem_cnt, params, entire_idx_cvtr, sliced_idx_cvtr, entire, sliced); } template<typename T, int NDIM> void LaunchSliceBackward(DeviceCtx* ctx, const SliceParams& params, const T* sliced, T* entire) { CHECK_EQ(params.ndim, NDIM); int64_t elem_cnt = params.elem_cnt(); SliceIndexHelper<NDIM> entire_idx_cvtr(params.dims); SliceIndexHelper<NDIM> sliced_idx_cvtr(params.size); SliceBackwardGpu<T, NDIM> <<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( elem_cnt, params, entire_idx_cvtr, sliced_idx_cvtr, entire, sliced); } template<typename T> struct SliceSwitchUtil final { #define MAKE_SLICE_SWITCH_ENTRY(func_name, N) func_name<T, N> #define DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(func_name) \ DEFINE_STATIC_SWITCH_FUNC(void, func_name, MAKE_SLICE_SWITCH_ENTRY, MAKE_NDIM_CTRV_SEQ(DIM_SEQ)); DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(LaunchSliceForward); DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(LaunchSliceBackward); #undef DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD #undef MAKE_SLICE_SWITCH_ENTRY }; template<typename T> size_t GetPackSize(const SliceParams& params, const T* entire, const T* sliced) { CHECK_GT(params.ndim, 0); const int64_t last_dim = params.ndim - 1; const int64_t mask = (params.dims[last_dim] * sizeof(T)) | (params.start[last_dim] * sizeof(T)) | (params.size[last_dim] * sizeof(T)) | static_cast<int64_t>(reinterpret_cast<uintptr_t>(entire)) | static_cast<int64_t>(reinterpret_cast<uintptr_t>(sliced)); if ((mask & 0xF) == 0) { return 16; } else if ((mask & 0x7) == 0) { return 8; } else if ((mask & 0x3) == 0) { return 4; } else if ((mask & 0x1) == 0) { return 2; } else { return 1; } } template<typename T> void GetPackedParams(const SliceParams& params, const T* entire, const T* sliced, size_t* pack_size, SliceParams* packed_params) { CHECK_GT(params.ndim, 0); const int64_t last_dim = params.ndim - 1; if (params.step[last_dim] == 1) { *pack_size = GetPackSize<T>(params, entire, sliced); CHECK_GE(*pack_size, sizeof(T)); const int64_t elem_per_pack = *pack_size / sizeof(T); *packed_params = params; packed_params->dims[last_dim] /= elem_per_pack; packed_params->start[last_dim] /= elem_per_pack; packed_params->size[last_dim] /= elem_per_pack; } else { *pack_size = sizeof(T); *packed_params = params; } } } // namespace template<typename T> struct SliceKernelUtil<DeviceType::kGPU, T> { static void Forward(DeviceCtx* ctx, const SliceParams& params, const T* entire, T* sliced) { SliceParams fold_slice_params = FoldContiguousFullSliceDimensions(params); size_t pack_size; SliceParams packed_params{}; GetPackedParams<T>(fold_slice_params, entire, sliced, &pack_size, &packed_params); if (pack_size == 1) { SliceSwitchUtil<uint8_t>::SwitchLaunchSliceForward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint8_t*>(entire), reinterpret_cast<uint8_t*>(sliced)); } else if (pack_size == 2) { SliceSwitchUtil<uint16_t>::SwitchLaunchSliceForward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint16_t*>(entire), reinterpret_cast<uint16_t*>(sliced)); } else if (pack_size == 4) { SliceSwitchUtil<uint32_t>::SwitchLaunchSliceForward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint32_t*>(entire), reinterpret_cast<uint32_t*>(sliced)); } else if (pack_size == 8) { SliceSwitchUtil<uint64_t>::SwitchLaunchSliceForward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint64_t*>(entire), reinterpret_cast<uint64_t*>(sliced)); } else if (pack_size == 16) { SliceSwitchUtil<ulonglong2>::SwitchLaunchSliceForward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const ulonglong2*>(entire), reinterpret_cast<ulonglong2*>(sliced)); } else { UNIMPLEMENTED(); } } static void Backward(DeviceCtx* ctx, const SliceParams& params, const T* sliced, T* entire) { SliceParams fold_slice_params = FoldContiguousFullSliceDimensions(params); size_t pack_size; SliceParams packed_params{}; GetPackedParams<T>(fold_slice_params, entire, sliced, &pack_size, &packed_params); if (pack_size == 1) { SliceSwitchUtil<uint8_t>::SwitchLaunchSliceBackward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint8_t*>(sliced), reinterpret_cast<uint8_t*>(entire)); } else if (pack_size == 2) { SliceSwitchUtil<uint16_t>::SwitchLaunchSliceBackward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint16_t*>(sliced), reinterpret_cast<uint16_t*>(entire)); } else if (pack_size == 4) { SliceSwitchUtil<uint32_t>::SwitchLaunchSliceBackward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint32_t*>(sliced), reinterpret_cast<uint32_t*>(entire)); } else if (pack_size == 8) { SliceSwitchUtil<uint64_t>::SwitchLaunchSliceBackward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const uint64_t*>(sliced), reinterpret_cast<uint64_t*>(entire)); } else if (pack_size == 16) { SliceSwitchUtil<ulonglong2>::SwitchLaunchSliceBackward( SwitchCase(packed_params.ndim), ctx, packed_params, reinterpret_cast<const ulonglong2*>(sliced), reinterpret_cast<ulonglong2*>(entire)); } else { UNIMPLEMENTED(); } } }; INSTANTIATE_SLICE_KERNEL_UTIL_WITH_DEVICE(DeviceType::kGPU) INSTANTIATE_SLICE_KERNEL_UTIL(DeviceType::kGPU, float16) } // namespace oneflow
6b26cc7c13c9ebf8d020982c6153a775f1243d5d.hip
// !!! This is a file automatically generated by hipify!!! /* * GDFCounter.cu * * Created on: Sep 12, 2018 * Author: rqc */ #include "GDFCounter.cuh" #include <iostream> GDFRefCounter* GDFRefCounter::Instance=0; void GDFRefCounter::register_column(gdf_column* col_ptr){ if(col_ptr != nullptr){ std::lock_guard<std::mutex> lock(gc_mutex); rc_key_t map_key = {col_ptr->data, col_ptr->valid}; if(map.find(map_key) == map.end()){ map[map_key]=1; } } } void GDFRefCounter::deregister_column(gdf_column* col_ptr) { std::lock_guard<std::mutex> lock(gc_mutex); rc_key_t map_key = {col_ptr->data, col_ptr->valid}; if(map.find(map_key) != map.end()){ map[map_key]=0; //deregistering } } void GDFRefCounter::free_if_deregistered(gdf_column* col_ptr) { std::lock_guard<std::mutex> lock(gc_mutex); rc_key_t map_key = {col_ptr->data, col_ptr->valid}; if(map.find(map_key)!=map.end()){ if(map[map_key]==0){ map.erase(map_key); hipFree(map_key.first); //data hipFree(map_key.second); //valid } } } void GDFRefCounter::increment(gdf_column* col_ptr) { std::lock_guard<std::mutex> lock(gc_mutex); rc_key_t map_key = {col_ptr->data, col_ptr->valid}; if(map.find(map_key)!=map.end()){ if(map[map_key]!=0){ //is already deregistered map[map_key]++; } } } void GDFRefCounter::decrement(gdf_column* col_ptr) { std::lock_guard<std::mutex> lock(gc_mutex); rc_key_t map_key = {col_ptr->data, col_ptr->valid}; if(map.find(map_key)!=map.end()){ if(map[map_key]>0){ map[map_key]--; if(map[map_key]==0){ map.erase(map_key); hipFree(map_key.first); //data hipFree(map_key.second); //valid } } } } GDFRefCounter::GDFRefCounter() { } // Testing purposes size_t GDFRefCounter::get_map_size() { return map.size(); } GDFRefCounter* GDFRefCounter::getInstance() { if(!Instance) Instance=new GDFRefCounter(); return Instance; }
6b26cc7c13c9ebf8d020982c6153a775f1243d5d.cu
/* * GDFCounter.cu * * Created on: Sep 12, 2018 * Author: rqc */ #include "GDFCounter.cuh" #include <iostream> GDFRefCounter* GDFRefCounter::Instance=0; void GDFRefCounter::register_column(gdf_column* col_ptr){ if(col_ptr != nullptr){ std::lock_guard<std::mutex> lock(gc_mutex); rc_key_t map_key = {col_ptr->data, col_ptr->valid}; if(map.find(map_key) == map.end()){ map[map_key]=1; } } } void GDFRefCounter::deregister_column(gdf_column* col_ptr) { std::lock_guard<std::mutex> lock(gc_mutex); rc_key_t map_key = {col_ptr->data, col_ptr->valid}; if(map.find(map_key) != map.end()){ map[map_key]=0; //deregistering } } void GDFRefCounter::free_if_deregistered(gdf_column* col_ptr) { std::lock_guard<std::mutex> lock(gc_mutex); rc_key_t map_key = {col_ptr->data, col_ptr->valid}; if(map.find(map_key)!=map.end()){ if(map[map_key]==0){ map.erase(map_key); cudaFree(map_key.first); //data cudaFree(map_key.second); //valid } } } void GDFRefCounter::increment(gdf_column* col_ptr) { std::lock_guard<std::mutex> lock(gc_mutex); rc_key_t map_key = {col_ptr->data, col_ptr->valid}; if(map.find(map_key)!=map.end()){ if(map[map_key]!=0){ //is already deregistered map[map_key]++; } } } void GDFRefCounter::decrement(gdf_column* col_ptr) { std::lock_guard<std::mutex> lock(gc_mutex); rc_key_t map_key = {col_ptr->data, col_ptr->valid}; if(map.find(map_key)!=map.end()){ if(map[map_key]>0){ map[map_key]--; if(map[map_key]==0){ map.erase(map_key); cudaFree(map_key.first); //data cudaFree(map_key.second); //valid } } } } GDFRefCounter::GDFRefCounter() { } // Testing purposes size_t GDFRefCounter::get_map_size() { return map.size(); } GDFRefCounter* GDFRefCounter::getInstance() { if(!Instance) Instance=new GDFRefCounter(); return Instance; }
5d6edda1b28a6f6264b32b271ff30ae985c2dcbe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void maxgradinput(float *gradInput, float *gradOutput, float *indices_x, float *indices_y, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; //int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; indices_x = indices_x + o*output_w*output_h; indices_y = indices_y + o*output_w*output_h; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float *ptr_ind_x = indices_x + yy*output_w + xx; float *ptr_ind_y = indices_y + yy*output_w + xx; float z = *ptr_gradOutput; int argmax_x = (*ptr_ind_x)-1; int argmax_y = (*ptr_ind_y)-1; ptr_gradInput[argmax_x + argmax_y*input_w] += z; } } }
5d6edda1b28a6f6264b32b271ff30ae985c2dcbe.cu
#include "includes.h" __global__ void maxgradinput(float *gradInput, float *gradOutput, float *indices_x, float *indices_y, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; //int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; indices_x = indices_x + o*output_w*output_h; indices_y = indices_y + o*output_w*output_h; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float *ptr_ind_x = indices_x + yy*output_w + xx; float *ptr_ind_y = indices_y + yy*output_w + xx; float z = *ptr_gradOutput; int argmax_x = (*ptr_ind_x)-1; int argmax_y = (*ptr_ind_y)-1; ptr_gradInput[argmax_x + argmax_y*input_w] += z; } } }
3d4ebe2dd84ad655bb4236e5d9c57fd59bc12bc3.hip
// !!! This is a file automatically generated by hipify!!! /* * This program uses the device CURAND API to calculate what * proportion of pseudo-random ints are odd. */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> __global__ void setup_kernel(hiprandState_t *state){ int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence number , no offset */ hiprand_init(1234, id, 0, &state[id]); } __global__ void generate_kernel(hiprandState_t *state, int *result){ int id = threadIdx.x + blockIdx.x * 64; int count = 0; unsigned int x; /* Copy state to local memory for efficiency */ hiprandState_t localState = state[id]; /* Generate pseudo -random unsigned ints */ for(int n = 0; n < 100000; n++){ x = hiprand(&localState); /* Check if odd */ if(x & 1){ count ++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } int main(int argc, char *argv[]){ int i, total; int *devResults, *hostResults; hiprandState_t *devStates; /* Allocate space for results on host */ hostResults = (int *) calloc(64 * 64, sizeof(int)); /* Allocate space for results on device */ hipMalloc((void **)&devResults , 64 * 64 *sizeof(int)); /* Set results to 0 */ hipMemset(devResults , 0, 64 * 64 * sizeof(int)); /* Allocate space for prng states on device */ hipMalloc((void **)&devStates , 64 * 64 * sizeof(hiprandState_t)); /* Setup prng states */ hipLaunchKernelGGL(( setup_kernel), dim3(64), dim3(64), 0, 0, devStates); /* Generate and use pseudorandom numbers*/ for(i = 0; i < 10; i++){ hipLaunchKernelGGL(( generate_kernel), dim3(64), dim3(64), 0, 0, devStates, devResults); } /* Copy device memory to host */ hipMemcpy(hostResults, devResults , 64 * 64 * sizeof(int), hipMemcpyDeviceToHost); /* Show result */ total = 0; for(i = 0; i < 64 * 64; i++) { total += hostResults[i]; } printf("Fraction odd was %10.13f\n", (float) total / (64.0f * 64.0f * 100000.0f * 10.0f)); /* Cleanup */ hipFree(devStates); hipFree(devResults); free(hostResults); return EXIT_SUCCESS; }
3d4ebe2dd84ad655bb4236e5d9c57fd59bc12bc3.cu
/* * This program uses the device CURAND API to calculate what * proportion of pseudo-random ints are odd. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> __global__ void setup_kernel(curandState *state){ int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence number , no offset */ curand_init(1234, id, 0, &state[id]); } __global__ void generate_kernel(curandState *state, int *result){ int id = threadIdx.x + blockIdx.x * 64; int count = 0; unsigned int x; /* Copy state to local memory for efficiency */ curandState localState = state[id]; /* Generate pseudo -random unsigned ints */ for(int n = 0; n < 100000; n++){ x = curand(&localState); /* Check if odd */ if(x & 1){ count ++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } int main(int argc, char *argv[]){ int i, total; int *devResults, *hostResults; curandState *devStates; /* Allocate space for results on host */ hostResults = (int *) calloc(64 * 64, sizeof(int)); /* Allocate space for results on device */ cudaMalloc((void **)&devResults , 64 * 64 *sizeof(int)); /* Set results to 0 */ cudaMemset(devResults , 0, 64 * 64 * sizeof(int)); /* Allocate space for prng states on device */ cudaMalloc((void **)&devStates , 64 * 64 * sizeof(curandState)); /* Setup prng states */ setup_kernel<<<64, 64>>>(devStates); /* Generate and use pseudorandom numbers*/ for(i = 0; i < 10; i++){ generate_kernel<<<64, 64>>>(devStates, devResults); } /* Copy device memory to host */ cudaMemcpy(hostResults, devResults , 64 * 64 * sizeof(int), cudaMemcpyDeviceToHost); /* Show result */ total = 0; for(i = 0; i < 64 * 64; i++) { total += hostResults[i]; } printf("Fraction odd was %10.13f\n", (float) total / (64.0f * 64.0f * 100000.0f * 10.0f)); /* Cleanup */ cudaFree(devStates); cudaFree(devResults); free(hostResults); return EXIT_SUCCESS; }
4deaa16722d15119787344ee1e0165e43368fab9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include "flags.h" int main(int argc, char **argv) { int device = 0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device); hipSetDevice( device ); mystruct *arg_ptr, arg, *arg_d ;arg_ptr= &arg; read_params(arg_ptr); dump_params(arg_ptr); printf("GPU device %d, name = %s\n", device, deviceProp.name ); double *rho, *rho1, *u; double *f_d, *f1_d, *ftemp_d, *ftemp1_d, *rho_d, *rho1_d, *u_d; int *is_solid, *is_solid_d, t=0; size_t size_f = arg_ptr->N*Q*sizeof(double); size_t size = arg_ptr->N*sizeof(double); size_t size_int = arg_ptr->N*sizeof(int); rho = (double *)malloc(size); u = (double *)malloc(DIM*size); is_solid = (int *)malloc(size_int); rho1 = (double *)malloc(size); hipMalloc((void **) &rho_d, size); hipMalloc((void **)&u_d, DIM*size); hipMalloc((void **) &rho1_d, size); hipMalloc((void **) &is_solid_d, size_int); hipMalloc((void **) &f_d, size_f); hipMalloc((void **)&ftemp_d, size_f); hipMalloc((void **) &f1_d, arg_ptr->N*5*sizeof(double)); hipMalloc((void **) &ftemp1_d, arg_ptr->N*5*sizeof(double)); hipMalloc(( void **) & arg_d, sizeof(mystruct )); size_t freeMem = 0; size_t totalMem = 0; hipMemGetInfo(&freeMem, &totalMem); printf("GPU Memory avaliable: Free: %lf GB, Total: %lf GB\n",freeMem/1e09, totalMem/1e09); size_f = arg_ptr->N*Q*sizeof(double); printf("\nmem_reqd on host =%lf MB \n",((DIM+1)*size+size_int+2*Q*Q*4)/1.e06); printf("\nmem_reqd on GPU =%lf MB \n",(3*size_f+ (DIM+1)*size+size_int+2*Q*Q*4)/1.e06); if(f_d ==NULL || ftemp_d ==NULL || u_d==NULL || rho_d==NULL || is_solid_d ==NULL || u==NULL || rho==NULL || is_solid ==NULL ) {printf("Memory allocation failed\n EXITING\n"); exit(1);} read_raw(is_solid, arg_ptr); arg_ptr->comp=0; init_vars(rho, u, is_solid, arg_ptr); write_data(rho, u, arg_ptr); bitmap(arg_ptr->LX, arg_ptr->LY, 0, 1, 0, is_solid, rho) ; hipMemcpy(arg_d, &arg, sizeof(mystruct), hipMemcpyHostToDevice); hipMemcpy(rho_d, rho, (size), hipMemcpyHostToDevice); hipMemcpy( u_d, u, (DIM*size), hipMemcpyHostToDevice); hipMemcpy(is_solid_d, is_solid, (size_int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( init_f), dim3((arg_ptr->N+BL-1)/BL),dim3(BL) , 0, 0, f_d, rho_d, u_d, arg_d); arg_ptr->comp=1; init_vars(rho1, u, is_solid, arg_ptr); write_data(rho1, u, arg_ptr); bitmap(arg_ptr->LX, arg_ptr->LY, 0, 1, 1, is_solid, rho1) ; hipMemcpy(arg_d, &arg, sizeof(mystruct), hipMemcpyHostToDevice); hipMemcpy(rho1_d, rho1, (size), hipMemcpyHostToDevice); hipLaunchKernelGGL(( init_f), dim3((arg_ptr->N+BL-1)/BL),dim3(BL) , 0, 0, f1_d, rho1_d, u_d, arg_d); for (t=1; t<= arg_ptr->ts; t++) { arg_ptr->t = t; arg_ptr->comp=0; hipMemcpy(arg_d, &arg, sizeof(mystruct), hipMemcpyHostToDevice); hipLaunchKernelGGL(( streaming), dim3((arg_ptr->N+BL-1)/BL), dim3(BL) , 0, 0, f_d, ftemp_d, arg_d); hipLaunchKernelGGL(( streamingUpdate), dim3((arg_ptr->N+BL-1)/BL), dim3(BL) , 0, 0, f_d, ftemp_d, arg_d); hipLaunchKernelGGL(( bcs_fluid), dim3((arg_ptr->N+BL-1)/BL), dim3(BL) , 0, 0, f_d, arg_d); hipLaunchKernelGGL(( macro_vars), dim3((arg_ptr->N+BL-1)/BL), dim3(BL) , 0, 0, f_d, rho_d, u_d, arg_d); hipLaunchKernelGGL(( collision), dim3((arg_ptr->N+BL-1)/BL), dim3(BL) , 0, 0, f_d, rho_d, rho1_d, u_d, is_solid_d, arg_d); if(arg_ptr->t%arg_ptr->frame_rate==0) { hipMemcpy(rho, rho_d, (size), hipMemcpyDeviceToHost); hipMemcpy( u, u_d, DIM*(size), hipMemcpyDeviceToHost); write_data(rho, u, arg_ptr); bitmap(arg_ptr->LX, arg_ptr->LY, t/arg_ptr->frame_rate, 1, 0, is_solid, rho) ; } arg_ptr->comp=1; hipMemcpy(arg_d, &arg, sizeof(mystruct), hipMemcpyHostToDevice); hipLaunchKernelGGL(( streaming), dim3((arg_ptr->N+BL-1)/BL), dim3(BL) , 0, 0, f1_d, ftemp1_d, arg_d); hipLaunchKernelGGL(( streamingUpdate), dim3((arg_ptr->N+BL-1)/BL), dim3(BL) , 0, 0, f1_d, ftemp1_d, arg_d); hipLaunchKernelGGL(( bcs_solute), dim3((arg_ptr->N+BL-1)/BL), dim3(BL) , 0, 0, f1_d, is_solid_d, arg_d); hipLaunchKernelGGL(( macro_vars), dim3((arg_ptr->N+BL-1)/BL), dim3(BL) , 0, 0, f1_d, rho1_d, u_d, arg_d); hipLaunchKernelGGL(( collision), dim3((arg_ptr->N+BL-1)/BL), dim3(BL) , 0, 0, f1_d, rho1_d, rho_d, u_d, is_solid_d, arg_d); if(arg_ptr->t%arg_ptr->frame_rate==0) { hipMemcpy(rho1, rho1_d, (size), hipMemcpyDeviceToHost); write_data(rho1, u, arg_ptr); bitmap(arg_ptr->LX, arg_ptr->LY, t/arg_ptr->frame_rate, 1, 1, is_solid, rho1); } } free(rho); free(rho1); free(u); free(is_solid); hipFree(is_solid_d); hipFree(f_d); hipFree(ftemp_d); hipFree(f1_d); hipFree(ftemp1_d); hipFree(rho1_d); hipFree(u_d); return 0; }
4deaa16722d15119787344ee1e0165e43368fab9.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "flags.h" int main(int argc, char **argv) { int device = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); cudaSetDevice( device ); mystruct *arg_ptr, arg, *arg_d ;arg_ptr= &arg; read_params(arg_ptr); dump_params(arg_ptr); printf("GPU device %d, name = %s\n", device, deviceProp.name ); double *rho, *rho1, *u; double *f_d, *f1_d, *ftemp_d, *ftemp1_d, *rho_d, *rho1_d, *u_d; int *is_solid, *is_solid_d, t=0; size_t size_f = arg_ptr->N*Q*sizeof(double); size_t size = arg_ptr->N*sizeof(double); size_t size_int = arg_ptr->N*sizeof(int); rho = (double *)malloc(size); u = (double *)malloc(DIM*size); is_solid = (int *)malloc(size_int); rho1 = (double *)malloc(size); cudaMalloc((void **) &rho_d, size); cudaMalloc((void **)&u_d, DIM*size); cudaMalloc((void **) &rho1_d, size); cudaMalloc((void **) &is_solid_d, size_int); cudaMalloc((void **) &f_d, size_f); cudaMalloc((void **)&ftemp_d, size_f); cudaMalloc((void **) &f1_d, arg_ptr->N*5*sizeof(double)); cudaMalloc((void **) &ftemp1_d, arg_ptr->N*5*sizeof(double)); cudaMalloc(( void **) & arg_d, sizeof(mystruct )); size_t freeMem = 0; size_t totalMem = 0; cudaMemGetInfo(&freeMem, &totalMem); printf("GPU Memory avaliable: Free: %lf GB, Total: %lf GB\n",freeMem/1e09, totalMem/1e09); size_f = arg_ptr->N*Q*sizeof(double); printf("\nmem_reqd on host =%lf MB \n",((DIM+1)*size+size_int+2*Q*Q*4)/1.e06); printf("\nmem_reqd on GPU =%lf MB \n",(3*size_f+ (DIM+1)*size+size_int+2*Q*Q*4)/1.e06); if(f_d ==NULL || ftemp_d ==NULL || u_d==NULL || rho_d==NULL || is_solid_d ==NULL || u==NULL || rho==NULL || is_solid ==NULL ) {printf("Memory allocation failed\n EXITING\n"); exit(1);} read_raw(is_solid, arg_ptr); arg_ptr->comp=0; init_vars(rho, u, is_solid, arg_ptr); write_data(rho, u, arg_ptr); bitmap(arg_ptr->LX, arg_ptr->LY, 0, 1, 0, is_solid, rho) ; cudaMemcpy(arg_d, &arg, sizeof(mystruct), cudaMemcpyHostToDevice); cudaMemcpy(rho_d, rho, (size), cudaMemcpyHostToDevice); cudaMemcpy( u_d, u, (DIM*size), cudaMemcpyHostToDevice); cudaMemcpy(is_solid_d, is_solid, (size_int), cudaMemcpyHostToDevice); init_f<<< (arg_ptr->N+BL-1)/BL,BL >>> (f_d, rho_d, u_d, arg_d); arg_ptr->comp=1; init_vars(rho1, u, is_solid, arg_ptr); write_data(rho1, u, arg_ptr); bitmap(arg_ptr->LX, arg_ptr->LY, 0, 1, 1, is_solid, rho1) ; cudaMemcpy(arg_d, &arg, sizeof(mystruct), cudaMemcpyHostToDevice); cudaMemcpy(rho1_d, rho1, (size), cudaMemcpyHostToDevice); init_f<<< (arg_ptr->N+BL-1)/BL,BL >>> (f1_d, rho1_d, u_d, arg_d); for (t=1; t<= arg_ptr->ts; t++) { arg_ptr->t = t; arg_ptr->comp=0; cudaMemcpy(arg_d, &arg, sizeof(mystruct), cudaMemcpyHostToDevice); streaming<<< (arg_ptr->N+BL-1)/BL, BL >>> (f_d, ftemp_d, arg_d); streamingUpdate<<< (arg_ptr->N+BL-1)/BL, BL >>> (f_d, ftemp_d, arg_d); bcs_fluid<<< (arg_ptr->N+BL-1)/BL, BL >>> (f_d, arg_d); macro_vars<<< (arg_ptr->N+BL-1)/BL, BL >>> (f_d, rho_d, u_d, arg_d); collision<<< (arg_ptr->N+BL-1)/BL, BL >>> (f_d, rho_d, rho1_d, u_d, is_solid_d, arg_d); if(arg_ptr->t%arg_ptr->frame_rate==0) { cudaMemcpy(rho, rho_d, (size), cudaMemcpyDeviceToHost); cudaMemcpy( u, u_d, DIM*(size), cudaMemcpyDeviceToHost); write_data(rho, u, arg_ptr); bitmap(arg_ptr->LX, arg_ptr->LY, t/arg_ptr->frame_rate, 1, 0, is_solid, rho) ; } arg_ptr->comp=1; cudaMemcpy(arg_d, &arg, sizeof(mystruct), cudaMemcpyHostToDevice); streaming<<< (arg_ptr->N+BL-1)/BL, BL >>> (f1_d, ftemp1_d, arg_d); streamingUpdate<<< (arg_ptr->N+BL-1)/BL, BL >>> (f1_d, ftemp1_d, arg_d); bcs_solute<<< (arg_ptr->N+BL-1)/BL, BL >>> (f1_d, is_solid_d, arg_d); macro_vars<<< (arg_ptr->N+BL-1)/BL, BL >>> (f1_d, rho1_d, u_d, arg_d); collision<<< (arg_ptr->N+BL-1)/BL, BL >>> (f1_d, rho1_d, rho_d, u_d, is_solid_d, arg_d); if(arg_ptr->t%arg_ptr->frame_rate==0) { cudaMemcpy(rho1, rho1_d, (size), cudaMemcpyDeviceToHost); write_data(rho1, u, arg_ptr); bitmap(arg_ptr->LX, arg_ptr->LY, t/arg_ptr->frame_rate, 1, 1, is_solid, rho1); } } free(rho); free(rho1); free(u); free(is_solid); cudaFree(is_solid_d); cudaFree(f_d); cudaFree(ftemp_d); cudaFree(f1_d); cudaFree(ftemp1_d); cudaFree(rho1_d); cudaFree(u_d); return 0; }
aa3f8963ba1b9a6ec2ec00669b792c8a3c37b717.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/cudart_utils.h> #include <cuml/common/device_buffer.hpp> #include <cuml/linear_model/preprocess_mg.hpp> #include <cuml/solvers/cd_mg.hpp> #include <functions/softThres.cuh> #include <opg/linalg/mv_aTb.hpp> #include <opg/linalg/norm.hpp> #include <raft/comms/comms.hpp> #include <raft/cuda_utils.cuh> #include <raft/linalg/add.cuh> #include <raft/linalg/eltwise.cuh> #include <raft/linalg/gemm.cuh> #include <raft/linalg/multiply.cuh> #include <raft/linalg/subtract.cuh> #include <raft/matrix/math.cuh> #include <raft/matrix/matrix.cuh> #include <raft/mr/device/allocator.hpp> #include "shuffle.h" using namespace MLCommon; namespace ML { namespace CD { namespace opg { template <typename T> void fit_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<T> *> &labels, T *coef, T *intercept, bool fit_intercept, bool normalize, int epochs, T alpha, T l1_ratio, bool shuffle, T tol, hipStream_t *streams, int n_streams, bool verbose) { const auto &comm = handle.get_comms(); hipblasHandle_t cublas_handle = handle.get_cublas_handle(); const auto allocator = handle.get_device_allocator(); std::vector<Matrix::RankSizePair *> partsToRanks = input_desc.blocksOwnedBy(comm.get_rank()); size_t total_M = 0.0; for (int i = 0; i < partsToRanks.size(); i++) { total_M += partsToRanks[i]->size; } device_buffer<T> pred(allocator, streams[0], total_M); device_buffer<T> residual(allocator, streams[0], total_M); device_buffer<T> squared(allocator, streams[0], input_desc.N); device_buffer<T> mu_input(allocator, streams[0]); device_buffer<T> norm2_input(allocator, streams[0]); device_buffer<T> mu_labels(allocator, streams[0]); std::vector<T> h_coef(input_desc.N, T(0)); if (fit_intercept) { mu_input.resize(input_desc.N, streams[0]); mu_labels.resize(1, streams[0]); if (normalize) { norm2_input.resize(input_desc.N, streams[0]); } GLM::opg::preProcessData(handle, input_data, input_desc, labels, mu_input.data(), mu_labels.data(), norm2_input.data(), fit_intercept, normalize, streams, n_streams, verbose); } std::vector<int> ri(input_desc.N); std::mt19937 g(rand()); size_t memsize = input_desc.N * sizeof(int); int *ri_h = (int *)malloc(memsize); CUDA_CHECK(hipHostRegister(ri_h, memsize, hipHostRegisterDefault)); if (comm.get_rank() == 0) { ML::Solver::initShuffle(ri, g); for (int i = 0; i < input_desc.N; i++) { ri_h[i] = ri[i]; } } comm.bcast(ri_h, input_desc.N, 0, streams[0]); comm.sync_stream(streams[0]); T l2_alpha = (1 - l1_ratio) * alpha * input_desc.M; alpha = l1_ratio * alpha * input_desc.M; if (normalize) { T scalar = T(1.0) + l2_alpha; raft::matrix::setValue(squared.data(), squared.data(), scalar, input_desc.N, streams[0]); } else { Matrix::Data<T> squared_data{squared.data(), size_t(input_desc.N)}; LinAlg::opg::colNorm2NoSeq(handle, squared_data, input_data, input_desc, streams, n_streams); raft::linalg::addScalar(squared.data(), squared.data(), l2_alpha, input_desc.N, streams[0]); } std::vector<Matrix::Data<T> *> input_data_temp; Matrix::PartDescriptor input_desc_temp = input_desc; input_desc_temp.N = size_t(1); std::vector<Matrix::Data<T> *> residual_temp; Matrix::Data<T> coef_loc_data; T *rs = residual.data(); for (int i = 0; i < partsToRanks.size(); i++) { raft::copy(rs, labels[i]->ptr, partsToRanks[i]->size, streams[0]); Matrix::Data<T> *rs_data = new Matrix::Data<T>(); rs_data->ptr = rs; rs_data->totalSize = partsToRanks[i]->size; residual_temp.push_back(rs_data); Matrix::Data<T> *temp_data = new Matrix::Data<T>(); temp_data->totalSize = partsToRanks[i]->size; input_data_temp.push_back(temp_data); rs += partsToRanks[i]->size; } for (int i = 0; i < epochs; i++) { if (i > 0 && shuffle) { if (comm.get_rank() == 0) { Solver::shuffle(ri, g); for (int k = 0; k < input_desc.N; k++) { ri_h[k] = ri[k]; } } comm.bcast(ri_h, input_desc.N, 0, streams[0]); comm.sync_stream(streams[0]); } T coef_max = 0.0; T d_coef_max = 0.0; T coef_prev = 0.0; for (int j = 0; j < input_desc.N; j++) { int ci = ri_h[j]; T *coef_loc = coef + ci; T *squared_loc = squared.data() + ci; T *input_col_loc; T *pred_loc = pred.data(); T *residual_loc = residual.data(); for (int k = 0; k < input_data.size(); k++) { input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); input_data_temp[k]->ptr = input_col_loc; input_data_temp[k]->totalSize = partsToRanks[k]->size; raft::linalg::multiplyScalar(pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); raft::linalg::add(residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); pred_loc = pred_loc + partsToRanks[k]->size; residual_loc = residual_loc + partsToRanks[k]->size; } for (int k = 0; k < n_streams; k++) { CUDA_CHECK(hipStreamSynchronize(streams[k])); } coef_loc_data.ptr = coef_loc; coef_loc_data.totalSize = size_t(1); LinAlg::opg::mv_aTb(handle, coef_loc_data, input_data_temp, input_desc_temp, residual_temp, streams, n_streams); if (l1_ratio > T(0.0)) Functions::softThres(coef_loc, coef_loc, alpha, 1, streams[0]); raft::linalg::eltwiseDivideCheckZero(coef_loc, coef_loc, squared_loc, 1, streams[0]); coef_prev = h_coef[ci]; raft::update_host(&(h_coef[ci]), coef_loc, 1, streams[0]); CUDA_CHECK(hipStreamSynchronize(streams[0])); T diff = abs(coef_prev - h_coef[ci]); if (diff > d_coef_max) d_coef_max = diff; if (abs(h_coef[ci]) > coef_max) coef_max = abs(h_coef[ci]); pred_loc = pred.data(); residual_loc = residual.data(); for (int k = 0; k < input_data.size(); k++) { input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); raft::linalg::multiplyScalar(pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); raft::linalg::subtract(residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); pred_loc = pred_loc + partsToRanks[k]->size; residual_loc = residual_loc + partsToRanks[k]->size; } for (int k = 0; k < n_streams; k++) { CUDA_CHECK(hipStreamSynchronize(streams[k])); } } bool flag_continue = true; if (coef_max == T(0)) { flag_continue = false; } if ((d_coef_max / coef_max) < tol) { flag_continue = false; } if (!flag_continue) { break; } } CUDA_CHECK(hipHostUnregister(ri_h)); free(ri_h); for (int i = 0; i < partsToRanks.size(); i++) { delete residual_temp[i]; delete input_data_temp[i]; } if (fit_intercept) { GLM::opg::postProcessData(handle, input_data, input_desc, labels, coef, intercept, mu_input.data(), mu_labels.data(), norm2_input.data(), fit_intercept, normalize, streams, n_streams, verbose); } else { *intercept = T(0); } } /** * @brief performs MNMG fit operation for the ols * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param input: input data * @input param labels: labels data * @output param coef: learned regression coefficients * @output param intercept: intercept value * @input param fit_intercept: fit intercept or not * @input param normalize: normalize the data or not * @input param verbose */ template <typename T> void fit_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<T> *> &labels, T *coef, T *intercept, bool fit_intercept, bool normalize, int epochs, T alpha, T l1_ratio, bool shuffle, T tol, bool verbose) { int rank = handle.get_comms().get_rank(); // TODO: These streams should come from raft::handle_t // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 int n_streams = input_desc.blocksOwnedBy(rank).size(); ; hipStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamCreate(&streams[i])); } fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamDestroy(streams[i])); } } template <typename T> void predict_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, T *coef, T intercept, std::vector<Matrix::Data<T> *> &preds, hipStream_t *streams, int n_streams, bool verbose) { std::vector<Matrix::RankSizePair *> local_blocks = input_desc.partsToRanks; T alpha = T(1); T beta = T(0); for (int i = 0; i < input_data.size(); i++) { int si = i % n_streams; raft::linalg::gemm(handle, input_data[i]->ptr, local_blocks[i]->size, input_desc.N, coef, preds[i]->ptr, local_blocks[i]->size, size_t(1), HIPBLAS_OP_N, HIPBLAS_OP_N, alpha, beta, streams[si]); raft::linalg::addScalar(preds[i]->ptr, preds[i]->ptr, intercept, local_blocks[i]->size, streams[si]); } } template <typename T> void predict_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<T> **input, size_t n_rows, size_t n_cols, T *coef, T intercept, Matrix::Data<T> **preds, bool verbose) { int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes, rank_sizes + n_parts); std::vector<Matrix::Data<T> *> input_data(input, input + n_parts); Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank); std::vector<Matrix::Data<T> *> preds_data(preds, preds + n_parts); // TODO: These streams should come from raft::handle_t // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 int n_streams = n_parts; hipStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamCreate(&streams[i])); } predict_impl(handle, input_data, input_desc, coef, intercept, preds_data, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamDestroy(streams[i])); } } void fit(raft::handle_t &handle, std::vector<Matrix::Data<float> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<float> *> &labels, float *coef, float *intercept, bool fit_intercept, bool normalize, int epochs, float alpha, float l1_ratio, bool shuffle, float tol, bool verbose) { fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, verbose); } void fit(raft::handle_t &handle, std::vector<Matrix::Data<double> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<double> *> &labels, double *coef, double *intercept, bool fit_intercept, bool normalize, int epochs, double alpha, double l1_ratio, bool shuffle, double tol, bool verbose) { fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, verbose); } void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<float> **input, size_t n_rows, size_t n_cols, float *coef, float intercept, Matrix::Data<float> **preds, bool verbose) { predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); } void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<double> **input, size_t n_rows, size_t n_cols, double *coef, double intercept, Matrix::Data<double> **preds, bool verbose) { predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); } } // namespace opg } // namespace CD } // namespace ML
aa3f8963ba1b9a6ec2ec00669b792c8a3c37b717.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/cudart_utils.h> #include <cuml/common/device_buffer.hpp> #include <cuml/linear_model/preprocess_mg.hpp> #include <cuml/solvers/cd_mg.hpp> #include <functions/softThres.cuh> #include <opg/linalg/mv_aTb.hpp> #include <opg/linalg/norm.hpp> #include <raft/comms/comms.hpp> #include <raft/cuda_utils.cuh> #include <raft/linalg/add.cuh> #include <raft/linalg/eltwise.cuh> #include <raft/linalg/gemm.cuh> #include <raft/linalg/multiply.cuh> #include <raft/linalg/subtract.cuh> #include <raft/matrix/math.cuh> #include <raft/matrix/matrix.cuh> #include <raft/mr/device/allocator.hpp> #include "shuffle.h" using namespace MLCommon; namespace ML { namespace CD { namespace opg { template <typename T> void fit_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<T> *> &labels, T *coef, T *intercept, bool fit_intercept, bool normalize, int epochs, T alpha, T l1_ratio, bool shuffle, T tol, cudaStream_t *streams, int n_streams, bool verbose) { const auto &comm = handle.get_comms(); cublasHandle_t cublas_handle = handle.get_cublas_handle(); const auto allocator = handle.get_device_allocator(); std::vector<Matrix::RankSizePair *> partsToRanks = input_desc.blocksOwnedBy(comm.get_rank()); size_t total_M = 0.0; for (int i = 0; i < partsToRanks.size(); i++) { total_M += partsToRanks[i]->size; } device_buffer<T> pred(allocator, streams[0], total_M); device_buffer<T> residual(allocator, streams[0], total_M); device_buffer<T> squared(allocator, streams[0], input_desc.N); device_buffer<T> mu_input(allocator, streams[0]); device_buffer<T> norm2_input(allocator, streams[0]); device_buffer<T> mu_labels(allocator, streams[0]); std::vector<T> h_coef(input_desc.N, T(0)); if (fit_intercept) { mu_input.resize(input_desc.N, streams[0]); mu_labels.resize(1, streams[0]); if (normalize) { norm2_input.resize(input_desc.N, streams[0]); } GLM::opg::preProcessData(handle, input_data, input_desc, labels, mu_input.data(), mu_labels.data(), norm2_input.data(), fit_intercept, normalize, streams, n_streams, verbose); } std::vector<int> ri(input_desc.N); std::mt19937 g(rand()); size_t memsize = input_desc.N * sizeof(int); int *ri_h = (int *)malloc(memsize); CUDA_CHECK(cudaHostRegister(ri_h, memsize, cudaHostRegisterDefault)); if (comm.get_rank() == 0) { ML::Solver::initShuffle(ri, g); for (int i = 0; i < input_desc.N; i++) { ri_h[i] = ri[i]; } } comm.bcast(ri_h, input_desc.N, 0, streams[0]); comm.sync_stream(streams[0]); T l2_alpha = (1 - l1_ratio) * alpha * input_desc.M; alpha = l1_ratio * alpha * input_desc.M; if (normalize) { T scalar = T(1.0) + l2_alpha; raft::matrix::setValue(squared.data(), squared.data(), scalar, input_desc.N, streams[0]); } else { Matrix::Data<T> squared_data{squared.data(), size_t(input_desc.N)}; LinAlg::opg::colNorm2NoSeq(handle, squared_data, input_data, input_desc, streams, n_streams); raft::linalg::addScalar(squared.data(), squared.data(), l2_alpha, input_desc.N, streams[0]); } std::vector<Matrix::Data<T> *> input_data_temp; Matrix::PartDescriptor input_desc_temp = input_desc; input_desc_temp.N = size_t(1); std::vector<Matrix::Data<T> *> residual_temp; Matrix::Data<T> coef_loc_data; T *rs = residual.data(); for (int i = 0; i < partsToRanks.size(); i++) { raft::copy(rs, labels[i]->ptr, partsToRanks[i]->size, streams[0]); Matrix::Data<T> *rs_data = new Matrix::Data<T>(); rs_data->ptr = rs; rs_data->totalSize = partsToRanks[i]->size; residual_temp.push_back(rs_data); Matrix::Data<T> *temp_data = new Matrix::Data<T>(); temp_data->totalSize = partsToRanks[i]->size; input_data_temp.push_back(temp_data); rs += partsToRanks[i]->size; } for (int i = 0; i < epochs; i++) { if (i > 0 && shuffle) { if (comm.get_rank() == 0) { Solver::shuffle(ri, g); for (int k = 0; k < input_desc.N; k++) { ri_h[k] = ri[k]; } } comm.bcast(ri_h, input_desc.N, 0, streams[0]); comm.sync_stream(streams[0]); } T coef_max = 0.0; T d_coef_max = 0.0; T coef_prev = 0.0; for (int j = 0; j < input_desc.N; j++) { int ci = ri_h[j]; T *coef_loc = coef + ci; T *squared_loc = squared.data() + ci; T *input_col_loc; T *pred_loc = pred.data(); T *residual_loc = residual.data(); for (int k = 0; k < input_data.size(); k++) { input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); input_data_temp[k]->ptr = input_col_loc; input_data_temp[k]->totalSize = partsToRanks[k]->size; raft::linalg::multiplyScalar(pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); raft::linalg::add(residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); pred_loc = pred_loc + partsToRanks[k]->size; residual_loc = residual_loc + partsToRanks[k]->size; } for (int k = 0; k < n_streams; k++) { CUDA_CHECK(cudaStreamSynchronize(streams[k])); } coef_loc_data.ptr = coef_loc; coef_loc_data.totalSize = size_t(1); LinAlg::opg::mv_aTb(handle, coef_loc_data, input_data_temp, input_desc_temp, residual_temp, streams, n_streams); if (l1_ratio > T(0.0)) Functions::softThres(coef_loc, coef_loc, alpha, 1, streams[0]); raft::linalg::eltwiseDivideCheckZero(coef_loc, coef_loc, squared_loc, 1, streams[0]); coef_prev = h_coef[ci]; raft::update_host(&(h_coef[ci]), coef_loc, 1, streams[0]); CUDA_CHECK(cudaStreamSynchronize(streams[0])); T diff = abs(coef_prev - h_coef[ci]); if (diff > d_coef_max) d_coef_max = diff; if (abs(h_coef[ci]) > coef_max) coef_max = abs(h_coef[ci]); pred_loc = pred.data(); residual_loc = residual.data(); for (int k = 0; k < input_data.size(); k++) { input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); raft::linalg::multiplyScalar(pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); raft::linalg::subtract(residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); pred_loc = pred_loc + partsToRanks[k]->size; residual_loc = residual_loc + partsToRanks[k]->size; } for (int k = 0; k < n_streams; k++) { CUDA_CHECK(cudaStreamSynchronize(streams[k])); } } bool flag_continue = true; if (coef_max == T(0)) { flag_continue = false; } if ((d_coef_max / coef_max) < tol) { flag_continue = false; } if (!flag_continue) { break; } } CUDA_CHECK(cudaHostUnregister(ri_h)); free(ri_h); for (int i = 0; i < partsToRanks.size(); i++) { delete residual_temp[i]; delete input_data_temp[i]; } if (fit_intercept) { GLM::opg::postProcessData(handle, input_data, input_desc, labels, coef, intercept, mu_input.data(), mu_labels.data(), norm2_input.data(), fit_intercept, normalize, streams, n_streams, verbose); } else { *intercept = T(0); } } /** * @brief performs MNMG fit operation for the ols * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param input: input data * @input param labels: labels data * @output param coef: learned regression coefficients * @output param intercept: intercept value * @input param fit_intercept: fit intercept or not * @input param normalize: normalize the data or not * @input param verbose */ template <typename T> void fit_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<T> *> &labels, T *coef, T *intercept, bool fit_intercept, bool normalize, int epochs, T alpha, T l1_ratio, bool shuffle, T tol, bool verbose) { int rank = handle.get_comms().get_rank(); // TODO: These streams should come from raft::handle_t // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 int n_streams = input_desc.blocksOwnedBy(rank).size(); ; cudaStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamCreate(&streams[i])); } fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamDestroy(streams[i])); } } template <typename T> void predict_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, T *coef, T intercept, std::vector<Matrix::Data<T> *> &preds, cudaStream_t *streams, int n_streams, bool verbose) { std::vector<Matrix::RankSizePair *> local_blocks = input_desc.partsToRanks; T alpha = T(1); T beta = T(0); for (int i = 0; i < input_data.size(); i++) { int si = i % n_streams; raft::linalg::gemm(handle, input_data[i]->ptr, local_blocks[i]->size, input_desc.N, coef, preds[i]->ptr, local_blocks[i]->size, size_t(1), CUBLAS_OP_N, CUBLAS_OP_N, alpha, beta, streams[si]); raft::linalg::addScalar(preds[i]->ptr, preds[i]->ptr, intercept, local_blocks[i]->size, streams[si]); } } template <typename T> void predict_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<T> **input, size_t n_rows, size_t n_cols, T *coef, T intercept, Matrix::Data<T> **preds, bool verbose) { int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes, rank_sizes + n_parts); std::vector<Matrix::Data<T> *> input_data(input, input + n_parts); Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank); std::vector<Matrix::Data<T> *> preds_data(preds, preds + n_parts); // TODO: These streams should come from raft::handle_t // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 int n_streams = n_parts; cudaStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamCreate(&streams[i])); } predict_impl(handle, input_data, input_desc, coef, intercept, preds_data, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamDestroy(streams[i])); } } void fit(raft::handle_t &handle, std::vector<Matrix::Data<float> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<float> *> &labels, float *coef, float *intercept, bool fit_intercept, bool normalize, int epochs, float alpha, float l1_ratio, bool shuffle, float tol, bool verbose) { fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, verbose); } void fit(raft::handle_t &handle, std::vector<Matrix::Data<double> *> &input_data, Matrix::PartDescriptor &input_desc, std::vector<Matrix::Data<double> *> &labels, double *coef, double *intercept, bool fit_intercept, bool normalize, int epochs, double alpha, double l1_ratio, bool shuffle, double tol, bool verbose) { fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, verbose); } void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<float> **input, size_t n_rows, size_t n_cols, float *coef, float intercept, Matrix::Data<float> **preds, bool verbose) { predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); } void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<double> **input, size_t n_rows, size_t n_cols, double *coef, double intercept, Matrix::Data<double> **preds, bool verbose) { predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); } } // namespace opg } // namespace CD } // namespace ML
6b08fb0d509895a77df7ed416d18c55abf3cddf2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "DomaineMath_GPU.h" #include "math/JuliaMath.h" using namespace gpu; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void julia(float c1, float c2, uchar4* ptrDevPixels, uint w, uint h, float t, DomaineMath domaineMath); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void julia(float c1, float c2, uchar4* ptrDevPixels, uint w, uint h, float t, DomaineMath domaineMath) { JuliaMath juliaMath = JuliaMath(t, c1, c2); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; int s = TID; int i, j; double x, y; while (s < WH) { IndiceTools::toIJ(s, w, &i, &j); domaineMath.toXY(i, j, &x, &y); juliaMath.colorXY(&ptrDevPixels[s], x, y, t); s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
6b08fb0d509895a77df7ed416d18c55abf3cddf2.cu
#include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "DomaineMath_GPU.h" #include "math/JuliaMath.h" using namespace gpu; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void julia(float c1, float c2, uchar4* ptrDevPixels, uint w, uint h, float t, DomaineMath domaineMath); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void julia(float c1, float c2, uchar4* ptrDevPixels, uint w, uint h, float t, DomaineMath domaineMath) { JuliaMath juliaMath = JuliaMath(t, c1, c2); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; int s = TID; int i, j; double x, y; while (s < WH) { IndiceTools::toIJ(s, w, &i, &j); domaineMath.toXY(i, j, &x, &y); juliaMath.colorXY(&ptrDevPixels[s], x, y, t); s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
0d0d96466788d729a8932ee9bc27ab5332ec5598.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define N 50 #define NewN 100 #define LifeN 500 #define numofthreads 512 int numofeles=0,capacity; struct chromosome { long long weight=0, value=0; bool chromo[100003]; }; chromosome chromoele[N],*cudaChromo,*cudaNewpopulation,newpopulation[NewN],res,x[2]; int weight[100001],value[100001],*devValue,*devWeight,*devnumeles; __global__ void gan(chromosome *cudaChromo, chromosome* cudaNewpopulation,const int capacity) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < N) { for (int i = idx; i < NewN;i+=N) if (cudaNewpopulation[i].weight<=capacity&&cudaNewpopulation[i].value>cudaChromo[idx].value) cudaChromo[idx] = cudaNewpopulation[i]; } }
0d0d96466788d729a8932ee9bc27ab5332ec5598.cu
#include "includes.h" #define N 50 #define NewN 100 #define LifeN 500 #define numofthreads 512 int numofeles=0,capacity; struct chromosome { long long weight=0, value=0; bool chromo[100003]; }; chromosome chromoele[N],*cudaChromo,*cudaNewpopulation,newpopulation[NewN],res,x[2]; int weight[100001],value[100001],*devValue,*devWeight,*devnumeles; __global__ void gan(chromosome *cudaChromo, chromosome* cudaNewpopulation,const int capacity) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < N) { for (int i = idx; i < NewN;i+=N) if (cudaNewpopulation[i].weight<=capacity&&cudaNewpopulation[i].value>cudaChromo[idx].value) cudaChromo[idx] = cudaNewpopulation[i]; } }
c178002ddeefe03f6d88cbb7e594c18012ae4ef8.hip
// !!! This is a file automatically generated by hipify!!! #include "libgpualg/mult.cuh" #include <iostream> #include <vector> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cmath> #include <stdio.h> //Normal CPU Matrix Multiplication void matMultiplyOnHost(double* A, double* B, double* C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { for (int i = 0; i < numARows; i++) { for (int j = 0; j < numAColumns; j++) { C[i * numCColumns + j] = 0; for (int k = 0; k < numCColumns; k++) { C[i * numCColumns + j] += A[i * numAColumns + k] * B[k * numBColumns + j]; } } } return; } void print_Mat(int Row, int Col, double* Mat) { for (int i = 0; i < Row * Col; i++) { printf("%f ", *(Mat + i)); if ((i % Col) == 0) { printf("\n"); } } } int main(int argc, char** argv) { // Perform matrix multiplication C = A*B int h_A_row = 1; int h_A_col = 3; int h_B_row = 3; int h_B_col = 3; int h_C_row = h_A_row; int h_C_col = h_B_col; // Allocate memory on the host double* h_A = (double*)malloc(h_A_row * h_A_col * sizeof(double)); double* h_B = (double*)malloc(h_B_row * h_B_col * sizeof(double)); double* h_C = (double*)malloc(h_C_row * h_C_col * sizeof(double)); for (int i = 0; i < h_A_row; i++) { for (int j = 0; j < h_A_col; j++) { h_A[i * h_A_row + j] = 2;//sin(i); } } h_A[2] = 9.; for (int i = 0; i < h_B_row; i++) { for (int j = 0; j < h_B_col; j++) { h_B[i * h_B_row + j] = 2;//sin(i); } } h_B[6] = 53.; double *d_A; double *d_B; double* d_C; hipMalloc(&d_A, h_A_row * h_A_col * sizeof(double)); hipMalloc(&d_B, h_B_row * h_B_col * sizeof(double)); hipMalloc(&d_C, h_C_row * h_C_col * sizeof(double)); hipMemcpy(d_A, h_A, h_A_row * h_A_col * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, h_B_row * h_B_col * sizeof(double), hipMemcpyHostToDevice); matrixMultiplication(d_A, d_B, d_C, h_A_row, h_A_col, h_B_row, h_B_col, h_C_row, h_C_col); hipDeviceSynchronize(); hipMemcpy(h_C, d_C, h_C_row * h_C_col * sizeof(double), hipMemcpyDeviceToHost); hipDeviceSynchronize(); double* cpu_C; cpu_C = new double[h_C_row * h_C_col]; matMultiplyOnHost(h_A, h_B, cpu_C, h_A_row, h_A_col, h_B_row, h_B_col, h_C_row, h_C_col); double err = 0; // Check the result and make sure it is correct for (int i = 0; i < h_C_col * h_C_row; i++) { err += cpu_C[i] - h_C[i]; if (cpu_C[i] != h_C[i]) { printf("Mismatch at Row = %d Col = %d hostComputed[] = %f --device[] %f\n", i / h_C_col, i % h_C_col, cpu_C[i], h_C[i]); break; } } hipDeviceSynchronize(); std::cerr << "Error: " << err << std::endl; hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); hipDeviceReset(); return 0; }
c178002ddeefe03f6d88cbb7e594c18012ae4ef8.cu
#include "libgpualg/mult.cuh" #include <iostream> #include <vector> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> #include <cuda.h> #include <cmath> #include <stdio.h> //Normal CPU Matrix Multiplication void matMultiplyOnHost(double* A, double* B, double* C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { for (int i = 0; i < numARows; i++) { for (int j = 0; j < numAColumns; j++) { C[i * numCColumns + j] = 0; for (int k = 0; k < numCColumns; k++) { C[i * numCColumns + j] += A[i * numAColumns + k] * B[k * numBColumns + j]; } } } return; } void print_Mat(int Row, int Col, double* Mat) { for (int i = 0; i < Row * Col; i++) { printf("%f ", *(Mat + i)); if ((i % Col) == 0) { printf("\n"); } } } int main(int argc, char** argv) { // Perform matrix multiplication C = A*B int h_A_row = 1; int h_A_col = 3; int h_B_row = 3; int h_B_col = 3; int h_C_row = h_A_row; int h_C_col = h_B_col; // Allocate memory on the host double* h_A = (double*)malloc(h_A_row * h_A_col * sizeof(double)); double* h_B = (double*)malloc(h_B_row * h_B_col * sizeof(double)); double* h_C = (double*)malloc(h_C_row * h_C_col * sizeof(double)); for (int i = 0; i < h_A_row; i++) { for (int j = 0; j < h_A_col; j++) { h_A[i * h_A_row + j] = 2;//sin(i); } } h_A[2] = 9.; for (int i = 0; i < h_B_row; i++) { for (int j = 0; j < h_B_col; j++) { h_B[i * h_B_row + j] = 2;//sin(i); } } h_B[6] = 53.; double *d_A; double *d_B; double* d_C; cudaMalloc(&d_A, h_A_row * h_A_col * sizeof(double)); cudaMalloc(&d_B, h_B_row * h_B_col * sizeof(double)); cudaMalloc(&d_C, h_C_row * h_C_col * sizeof(double)); cudaMemcpy(d_A, h_A, h_A_row * h_A_col * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, h_B_row * h_B_col * sizeof(double), cudaMemcpyHostToDevice); matrixMultiplication(d_A, d_B, d_C, h_A_row, h_A_col, h_B_row, h_B_col, h_C_row, h_C_col); cudaDeviceSynchronize(); cudaMemcpy(h_C, d_C, h_C_row * h_C_col * sizeof(double), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); double* cpu_C; cpu_C = new double[h_C_row * h_C_col]; matMultiplyOnHost(h_A, h_B, cpu_C, h_A_row, h_A_col, h_B_row, h_B_col, h_C_row, h_C_col); double err = 0; // Check the result and make sure it is correct for (int i = 0; i < h_C_col * h_C_row; i++) { err += cpu_C[i] - h_C[i]; if (cpu_C[i] != h_C[i]) { printf("Mismatch at Row = %d Col = %d hostComputed[] = %f --device[] %f\n", i / h_C_col, i % h_C_col, cpu_C[i], h_C[i]); break; } } cudaDeviceSynchronize(); std::cerr << "Error: " << err << std::endl; cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); cudaDeviceReset(); return 0; }
3527153ef42f00b849dc8ffacbc24b04063809e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <hiprand/hiprand_kernel.h> // CURAND Bibliothek header-Datei #include "common.h" #define NUMBERS_PER_THREAD 128 //#define BLOCKSIZE 64 #define GRIDSZE 1 #define NHIST 20 __global__ void zufallszahlen_gpu(float *d_z, int N, hiprandState_t *states, float x) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; int k,ix; // Initialisieren des Zufallszahlen-Generators // Der 'state' (Status) wird fr jeden Thread unabhngig gespeichert hiprand_init(tid, x, 0, &states[tid]); // gleichverteilte Zufallszahlen in (0,1] ix=tid*N; for (k=0; k<N; k++) d_z[ix+k] = hiprand_uniform(&states[tid]); } //********* NEU ********* //********* NEU ********* //********* NEU ********* void print_vector_in(int *p, double n) { int j=0; printf("In Vector : \n"); while(j<n) { printf(" "); printf("v(%d) = ",j); printf("%d ",p[j]); printf("\n"); ++j; } } float approx_pi(int *h_in, int N) { double sum=0; int i; for(i=0; i<N; ++i) { sum+=h_in[i]; } return 4*sum/N; } __global__ void in_out_gpu(float *d_z, float *d_w, int *in, int N) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; in[tid]=0; if(tid<N){ if(d_z[tid]*d_z[tid]+d_w[tid]*d_w[tid]<=1) { // printf("IN\n"); in[tid]=1; } } // else printf("OUT\n"); } int main(int argc, char **argv) { int N, k, Ngpu; float *z, *d_z, *h_z; hiprandState_t *d_states_z; hiprandState_t *d_states_w; N=128; if (argc>1) { N=atoi(argv[1]); } int BLOCKSIZE=N; Ngpu=N*BLOCKSIZE*GRIDSZE; float *d_w; int *d_in, *h_in, *d_sum, *h_sum; h_in=(int*)malloc(Ngpu*sizeof(int)); h_sum=(int*)malloc(Ngpu*sizeof(int)); CHECK(hipMalloc((void**)&d_in,Ngpu*sizeof(int))); CHECK(hipMalloc((void**)&d_sum,Ngpu*sizeof(int))); printf("%s Starting...\n\n", argv[0]); // Zufallszahlen auf der CPU srand((unsigned int)seconds()); // Initialisieren des Zufallszahlen-Generators z=(float*)malloc(N*sizeof(float)); for (k=0; k<N; k++) { z[k] = (float)(rand()) / (float)RAND_MAX; // Zufallszahlen in (0,1] } //printf("Histogramm der Zufallszahlen auf der CPU:\n\n"); //histogramm(z,N); // Histogramm // Zufallszahlen auf der GPU // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); h_z=(float*)malloc(Ngpu*sizeof(float)); CHECK(hipMalloc((void**)&d_z,Ngpu*sizeof(float))); CHECK(hipMalloc((void**)&d_states_z,BLOCKSIZE*GRIDSZE*sizeof(hiprandState_t))); hipLaunchKernelGGL(( zufallszahlen_gpu), dim3(GRIDSZE),dim3(BLOCKSIZE), 0, 0, d_z,N,d_states_z,(float)(rand())); CHECK(hipMemcpy(h_z, d_z, Ngpu*sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipGetLastError()); CHECK(hipMalloc((void**)&d_w,Ngpu*sizeof(float))); CHECK(hipMalloc((void**)&d_states_w,BLOCKSIZE*GRIDSZE*sizeof(hiprandState_t))); hipLaunchKernelGGL(( zufallszahlen_gpu), dim3(GRIDSZE),dim3(BLOCKSIZE), 0, 0, d_w,N,d_states_w,(float)(rand())); CHECK(hipGetLastError()); //printf("Histogramm der Zufallszahlen auf der GPU:\n\n"); //histogramm(h_z,Ngpu); // Histogramm printf("N = %d\n",N); //Anruf von in_out_gpu Funktion hipLaunchKernelGGL(( in_out_gpu), dim3(GRIDSZE),dim3(BLOCKSIZE), 0, 0, d_z,d_w,d_in,N); CHECK(hipDeviceSynchronize()); CHECK(hipMemcpy(h_in, d_in, Ngpu*sizeof(int), hipMemcpyDeviceToHost)); CHECK(hipGetLastError()); //print_vector_in(h_in,N); CHECK(hipMemcpy(h_sum, d_sum, Ngpu*sizeof(int), hipMemcpyDeviceToHost)); //printf("In Ereignisse N_in = %d \n",h_sum[0]); float ergebnis=approx_pi(h_in,N); printf("Approx von Pi : %f \n",ergebnis); free(z); free(h_z); free(h_in); free(h_sum); CHECK(hipFree(d_w)); CHECK(hipFree(d_z)); CHECK(hipFree(d_states_z)); CHECK(hipFree(d_states_w)); CHECK(hipFree(d_in)); CHECK(hipFree(d_sum)); }
3527153ef42f00b849dc8ffacbc24b04063809e5.cu
#include <stdio.h> #include <stdlib.h> #include <curand_kernel.h> // CURAND Bibliothek header-Datei #include "common.h" #define NUMBERS_PER_THREAD 128 //#define BLOCKSIZE 64 #define GRIDSZE 1 #define NHIST 20 __global__ void zufallszahlen_gpu(float *d_z, int N, curandState *states, float x) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; int k,ix; // Initialisieren des Zufallszahlen-Generators // Der 'state' (Status) wird für jeden Thread unabhängig gespeichert curand_init(tid, x, 0, &states[tid]); // gleichverteilte Zufallszahlen in (0,1] ix=tid*N; for (k=0; k<N; k++) d_z[ix+k] = curand_uniform(&states[tid]); } //********* NEU ********* //********* NEU ********* //********* NEU ********* void print_vector_in(int *p, double n) { int j=0; printf("In Vector : \n"); while(j<n) { printf(" "); printf("v(%d) = ",j); printf("%d ",p[j]); printf("\n"); ++j; } } float approx_pi(int *h_in, int N) { double sum=0; int i; for(i=0; i<N; ++i) { sum+=h_in[i]; } return 4*sum/N; } __global__ void in_out_gpu(float *d_z, float *d_w, int *in, int N) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; in[tid]=0; if(tid<N){ if(d_z[tid]*d_z[tid]+d_w[tid]*d_w[tid]<=1) { // printf("IN\n"); in[tid]=1; } } // else printf("OUT\n"); } int main(int argc, char **argv) { int N, k, Ngpu; float *z, *d_z, *h_z; curandState *d_states_z; curandState *d_states_w; N=128; if (argc>1) { N=atoi(argv[1]); } int BLOCKSIZE=N; Ngpu=N*BLOCKSIZE*GRIDSZE; float *d_w; int *d_in, *h_in, *d_sum, *h_sum; h_in=(int*)malloc(Ngpu*sizeof(int)); h_sum=(int*)malloc(Ngpu*sizeof(int)); CHECK(cudaMalloc((void**)&d_in,Ngpu*sizeof(int))); CHECK(cudaMalloc((void**)&d_sum,Ngpu*sizeof(int))); printf("%s Starting...\n\n", argv[0]); // Zufallszahlen auf der CPU srand((unsigned int)seconds()); // Initialisieren des Zufallszahlen-Generators z=(float*)malloc(N*sizeof(float)); for (k=0; k<N; k++) { z[k] = (float)(rand()) / (float)RAND_MAX; // Zufallszahlen in (0,1] } //printf("Histogramm der Zufallszahlen auf der CPU:\n\n"); //histogramm(z,N); // Histogramm // Zufallszahlen auf der GPU // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); h_z=(float*)malloc(Ngpu*sizeof(float)); CHECK(cudaMalloc((void**)&d_z,Ngpu*sizeof(float))); CHECK(cudaMalloc((void**)&d_states_z,BLOCKSIZE*GRIDSZE*sizeof(curandState))); zufallszahlen_gpu<<<GRIDSZE,BLOCKSIZE>>>(d_z,N,d_states_z,(float)(rand())); CHECK(cudaMemcpy(h_z, d_z, Ngpu*sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaGetLastError()); CHECK(cudaMalloc((void**)&d_w,Ngpu*sizeof(float))); CHECK(cudaMalloc((void**)&d_states_w,BLOCKSIZE*GRIDSZE*sizeof(curandState))); zufallszahlen_gpu<<<GRIDSZE,BLOCKSIZE>>>(d_w,N,d_states_w,(float)(rand())); CHECK(cudaGetLastError()); //printf("Histogramm der Zufallszahlen auf der GPU:\n\n"); //histogramm(h_z,Ngpu); // Histogramm printf("N = %d\n",N); //Anruf von in_out_gpu Funktion in_out_gpu<<<GRIDSZE,BLOCKSIZE>>>(d_z,d_w,d_in,N); CHECK(cudaDeviceSynchronize()); CHECK(cudaMemcpy(h_in, d_in, Ngpu*sizeof(int), cudaMemcpyDeviceToHost)); CHECK(cudaGetLastError()); //print_vector_in(h_in,N); CHECK(cudaMemcpy(h_sum, d_sum, Ngpu*sizeof(int), cudaMemcpyDeviceToHost)); //printf("In Ereignisse N_in = %d \n",h_sum[0]); float ergebnis=approx_pi(h_in,N); printf("Approx von Pi : %f \n",ergebnis); free(z); free(h_z); free(h_in); free(h_sum); CHECK(cudaFree(d_w)); CHECK(cudaFree(d_z)); CHECK(cudaFree(d_states_z)); CHECK(cudaFree(d_states_w)); CHECK(cudaFree(d_in)); CHECK(cudaFree(d_sum)); }
e158b0b1981f87cbc3b33b516c6ff707ec4fa9c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ======================================================================== // // Copyright 2018-2019 Ingo Wald // // // // Licensed under the Apache License, Version 2.0 (the "License"); // // you may not use this file except in compliance with the License. // // You may obtain a copy of the License at // // // // http://www.apache.org/licenses/LICENSE-2.0 // // // // Unless required by applicable law or agreed to in writing, software // // distributed under the License is distributed on an "AS IS" BASIS, // // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // // limitations under the License. // // ======================================================================== // #include <optix_device.h> #include <vec.h> namespace osc { #include "launch_params.h" } using namespace osc; namespace osc { /*! launch parameters in constant memory, filled in by optix upon optixLaunch (this gets filled in from the buffer we pass to optixLaunch) */ extern "C" __constant__ LaunchParams optixLaunchParams; // for this simple example, we have a single ray type enum { SURFACE_RAY_TYPE = 0, RAY_TYPE_COUNT }; static __forceinline__ __device__ void* unpackPointer(u32 i0, u32 i1) { const u64 uptr = static_cast<u64>(i0) << 32 | i1; void* ptr = reinterpret_cast<void*>(uptr); return ptr; } static __forceinline__ __device__ void packPointer(void* ptr, u32& i0, u32& i1) { const u64 uptr = reinterpret_cast<u64>(ptr); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } template <typename T> static __forceinline__ __device__ T* getPRD() { const u32 u0 = optixGetPayload_0(); const u32 u1 = optixGetPayload_1(); return reinterpret_cast<T*>(unpackPointer(u0, u1)); } /*! helper function that creates a semi-random color from an ID */ inline __device__ f32x3 randomColor(int i) { int r = unsigned(i) * 13 * 17 + 0x234235; int g = unsigned(i) * 7 * 3 * 5 + 0x773477; int b = unsigned(i) * 11 * 19 + 0x223766; return make_f32x3((r & 255) / 255.f, (g & 255) / 255.f, (b & 255) / 255.f); } //------------------------------------------------------------------------------ // closest hit and anyhit programs for radiance-type rays. // // Note eventually we will have to create one pair of those for each // ray type and each geometry type we want to render; but this // simple example doesn't use any actual geometries yet, so we only // create a single, dummy, set of them (we do have to have at least // one group of them to set up the SBT) //------------------------------------------------------------------------------ extern "C" __global__ void __closesthit__radiance() { const TriangleMeshSBTData& sbtData = *(const TriangleMeshSBTData*)optixGetSbtDataPointer(); // compute normal: const int primID = optixGetPrimitiveIndex(); const i32x3 index = sbtData.index[primID]; const f32x3& A = sbtData.vertex[index.x]; const f32x3& B = sbtData.vertex[index.y]; const f32x3& C = sbtData.vertex[index.z]; const f32x3 Ng = normalize(cross(B - A, C - A)); const f32x3 rayDir = optixGetWorldRayDirection(); const float cosDN = 0.2f + .8f * fabsf(dot(rayDir, Ng)); f32x3& prd = *(f32x3*)getPRD<f32x3>(); prd = cosDN * sbtData.color; } extern "C" __global__ void __anyhit__radiance() { /*! for this simple example, this will remain empty */ } //------------------------------------------------------------------------------ // miss program that gets called for any ray that did not have a // valid intersection // // as with the anyhit/closest hit programs, in this example we only // need to have _some_ dummy function to set up a valid SBT // ------------------------------------------------------------------------------ extern "C" __global__ void __miss__radiance() { f32x3& prd = *(f32x3*)getPRD<f32x3>(); // set to constant white as background color prd = make_f32x3(1.f, 1.0f, 1.0f); } //------------------------------------------------------------------------------ // ray gen program - the actual rendering happens in here //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__renderFrame() { // compute a test pattern based on pixel ID const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; const auto& camera = optixLaunchParams.camera; // our per-ray data for this example. what we initialize it to // won't matter, since this value will be overwritten by either // the miss or hit program, anyway f32x3 pixelColorPRD = make_f32x3(0.f, 0.0f, 0.0f); // the values we store the PRD pointer in: u32 u0, u1; packPointer(&pixelColorPRD, u0, u1); // normalized screen plane position, in [0,1]^2 const f32x2 screen = make_f32x2(f32(ix) + .5f, f32(iy) + .5f) / make_f32x2(optixLaunchParams.frame.size.x, optixLaunchParams.frame.size.y); // generate ray direction f32x3 rayDir = normalize(camera.direction + (screen.x - 0.5f) * camera.horizontal + (screen.y - 0.5f) * camera.vertical); optixTrace(optixLaunchParams.traversable, (float3)camera.position, (float3)rayDir, 0.f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask(255), OPTIX_RAY_FLAG_DISABLE_ANYHIT, // OPTIX_RAY_FLAG_NONE, SURFACE_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride SURFACE_RAY_TYPE, // missSBTIndex u0, u1); // and write to frame buffer ... const u32 fbIndex = ix + iy * optixLaunchParams.frame.size.x; optixLaunchParams.frame.color_buffer[fbIndex] = make_float4(pixelColorPRD.x, pixelColorPRD.y, pixelColorPRD.z, 1.0f); } } // namespace osc
e158b0b1981f87cbc3b33b516c6ff707ec4fa9c6.cu
// ======================================================================== // // Copyright 2018-2019 Ingo Wald // // // // Licensed under the Apache License, Version 2.0 (the "License"); // // you may not use this file except in compliance with the License. // // You may obtain a copy of the License at // // // // http://www.apache.org/licenses/LICENSE-2.0 // // // // Unless required by applicable law or agreed to in writing, software // // distributed under the License is distributed on an "AS IS" BASIS, // // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // // limitations under the License. // // ======================================================================== // #include <optix_device.h> #include <vec.h> namespace osc { #include "launch_params.h" } using namespace osc; namespace osc { /*! launch parameters in constant memory, filled in by optix upon optixLaunch (this gets filled in from the buffer we pass to optixLaunch) */ extern "C" __constant__ LaunchParams optixLaunchParams; // for this simple example, we have a single ray type enum { SURFACE_RAY_TYPE = 0, RAY_TYPE_COUNT }; static __forceinline__ __device__ void* unpackPointer(u32 i0, u32 i1) { const u64 uptr = static_cast<u64>(i0) << 32 | i1; void* ptr = reinterpret_cast<void*>(uptr); return ptr; } static __forceinline__ __device__ void packPointer(void* ptr, u32& i0, u32& i1) { const u64 uptr = reinterpret_cast<u64>(ptr); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } template <typename T> static __forceinline__ __device__ T* getPRD() { const u32 u0 = optixGetPayload_0(); const u32 u1 = optixGetPayload_1(); return reinterpret_cast<T*>(unpackPointer(u0, u1)); } /*! helper function that creates a semi-random color from an ID */ inline __device__ f32x3 randomColor(int i) { int r = unsigned(i) * 13 * 17 + 0x234235; int g = unsigned(i) * 7 * 3 * 5 + 0x773477; int b = unsigned(i) * 11 * 19 + 0x223766; return make_f32x3((r & 255) / 255.f, (g & 255) / 255.f, (b & 255) / 255.f); } //------------------------------------------------------------------------------ // closest hit and anyhit programs for radiance-type rays. // // Note eventually we will have to create one pair of those for each // ray type and each geometry type we want to render; but this // simple example doesn't use any actual geometries yet, so we only // create a single, dummy, set of them (we do have to have at least // one group of them to set up the SBT) //------------------------------------------------------------------------------ extern "C" __global__ void __closesthit__radiance() { const TriangleMeshSBTData& sbtData = *(const TriangleMeshSBTData*)optixGetSbtDataPointer(); // compute normal: const int primID = optixGetPrimitiveIndex(); const i32x3 index = sbtData.index[primID]; const f32x3& A = sbtData.vertex[index.x]; const f32x3& B = sbtData.vertex[index.y]; const f32x3& C = sbtData.vertex[index.z]; const f32x3 Ng = normalize(cross(B - A, C - A)); const f32x3 rayDir = optixGetWorldRayDirection(); const float cosDN = 0.2f + .8f * fabsf(dot(rayDir, Ng)); f32x3& prd = *(f32x3*)getPRD<f32x3>(); prd = cosDN * sbtData.color; } extern "C" __global__ void __anyhit__radiance() { /*! for this simple example, this will remain empty */ } //------------------------------------------------------------------------------ // miss program that gets called for any ray that did not have a // valid intersection // // as with the anyhit/closest hit programs, in this example we only // need to have _some_ dummy function to set up a valid SBT // ------------------------------------------------------------------------------ extern "C" __global__ void __miss__radiance() { f32x3& prd = *(f32x3*)getPRD<f32x3>(); // set to constant white as background color prd = make_f32x3(1.f, 1.0f, 1.0f); } //------------------------------------------------------------------------------ // ray gen program - the actual rendering happens in here //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__renderFrame() { // compute a test pattern based on pixel ID const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; const auto& camera = optixLaunchParams.camera; // our per-ray data for this example. what we initialize it to // won't matter, since this value will be overwritten by either // the miss or hit program, anyway f32x3 pixelColorPRD = make_f32x3(0.f, 0.0f, 0.0f); // the values we store the PRD pointer in: u32 u0, u1; packPointer(&pixelColorPRD, u0, u1); // normalized screen plane position, in [0,1]^2 const f32x2 screen = make_f32x2(f32(ix) + .5f, f32(iy) + .5f) / make_f32x2(optixLaunchParams.frame.size.x, optixLaunchParams.frame.size.y); // generate ray direction f32x3 rayDir = normalize(camera.direction + (screen.x - 0.5f) * camera.horizontal + (screen.y - 0.5f) * camera.vertical); optixTrace(optixLaunchParams.traversable, (float3)camera.position, (float3)rayDir, 0.f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask(255), OPTIX_RAY_FLAG_DISABLE_ANYHIT, // OPTIX_RAY_FLAG_NONE, SURFACE_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride SURFACE_RAY_TYPE, // missSBTIndex u0, u1); // and write to frame buffer ... const u32 fbIndex = ix + iy * optixLaunchParams.frame.size.x; optixLaunchParams.frame.color_buffer[fbIndex] = make_float4(pixelColorPRD.x, pixelColorPRD.y, pixelColorPRD.z, 1.0f); } } // namespace osc
21c02e0de995576345286f812fb4adeed0d34771.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdlib> #include <cinttypes> #include <hip/hip_runtime.h> #include "common.hh" static __global__ void f(const uint64_t a[], const uint64_t b[], uint64_t c[], int64_t N) { int64_t index = threadIdx.x + blockIdx.x * blockDim.x; int64_t stride = blockDim.x * gridDim.x; for (int64_t i = index; i < N; i += stride) { c[i] = a[i] * b[i]; } } static void doit(const uint64_t a[], const uint64_t b[], uint64_t c[], int64_t N) { int blockSize = 256; int64_t numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( f), dim3(numBlocks), dim3(blockSize), 0, 0, a, b, c, N); } int main(int argc, char *argv[]) { size_t N = 10000000; clock_t start_program, end_program; clock_t start, end; uint64_t *a, *b, *c; size_t count; if (argc == 2) { N = checked_strtosize(argv[1]); } count = checked_mul(N, sizeof(uint64_t)); /* Initialize context */ check(hipHostMalloc(&a, 128)); check(hipDeviceSynchronize()); check(hipHostFree(a)); start_program = clock(); start = clock(); check(hipHostMalloc(&a, count)); check(hipHostMalloc(&b, count)); check(hipHostMalloc(&c, count)); end = clock(); log("host: MallocHost", start, end); start = clock(); for (size_t i = 0; i < N; i++) { a[i] = 3; b[i] = 5; } end = clock(); log("host: init arrays", start, end); start = clock(); doit(a, b, c, N); check(hipDeviceSynchronize()); end = clock(); log("device: DMA+compute+synchronize", start, end); start = clock(); for (size_t i = 0; i < N; i++) { if (a[i] != 3 || b[i] != 5 || c[i] != 15) { fprintf(stderr, "unexpected result a: %lu b: %lu c: %lu\n", a[i], b[i], c[i]); exit(1); } } end = clock(); log("host: access all arrays", start, end); start = clock(); for (size_t i = 0; i < N; i++) { if (a[i] != 3 || b[i] != 5 || c[i] != 15) { fprintf(stderr, "unexpected result a: %lu b: %lu c: %lu\n", a[i], b[i], c[i]); exit(1); } } end = clock(); log("host: access all arrays a second time", start, end); start = clock(); check(hipHostFree(a)); check(hipHostFree(b)); check(hipHostFree(c)); end = clock(); log("host: free", start, end); end_program = clock(); log("total", start_program, end_program); return 0; }
21c02e0de995576345286f812fb4adeed0d34771.cu
#include <cstdio> #include <cstdlib> #include <cinttypes> #include <cuda_runtime.h> #include "common.hh" static __global__ void f(const uint64_t a[], const uint64_t b[], uint64_t c[], int64_t N) { int64_t index = threadIdx.x + blockIdx.x * blockDim.x; int64_t stride = blockDim.x * gridDim.x; for (int64_t i = index; i < N; i += stride) { c[i] = a[i] * b[i]; } } static void doit(const uint64_t a[], const uint64_t b[], uint64_t c[], int64_t N) { int blockSize = 256; int64_t numBlocks = (N + blockSize - 1) / blockSize; f<<<numBlocks, blockSize>>>(a, b, c, N); } int main(int argc, char *argv[]) { size_t N = 10000000; clock_t start_program, end_program; clock_t start, end; uint64_t *a, *b, *c; size_t count; if (argc == 2) { N = checked_strtosize(argv[1]); } count = checked_mul(N, sizeof(uint64_t)); /* Initialize context */ check(cudaMallocHost(&a, 128)); check(cudaDeviceSynchronize()); check(cudaFreeHost(a)); start_program = clock(); start = clock(); check(cudaMallocHost(&a, count)); check(cudaMallocHost(&b, count)); check(cudaMallocHost(&c, count)); end = clock(); log("host: MallocHost", start, end); start = clock(); for (size_t i = 0; i < N; i++) { a[i] = 3; b[i] = 5; } end = clock(); log("host: init arrays", start, end); start = clock(); doit(a, b, c, N); check(cudaDeviceSynchronize()); end = clock(); log("device: DMA+compute+synchronize", start, end); start = clock(); for (size_t i = 0; i < N; i++) { if (a[i] != 3 || b[i] != 5 || c[i] != 15) { fprintf(stderr, "unexpected result a: %lu b: %lu c: %lu\n", a[i], b[i], c[i]); exit(1); } } end = clock(); log("host: access all arrays", start, end); start = clock(); for (size_t i = 0; i < N; i++) { if (a[i] != 3 || b[i] != 5 || c[i] != 15) { fprintf(stderr, "unexpected result a: %lu b: %lu c: %lu\n", a[i], b[i], c[i]); exit(1); } } end = clock(); log("host: access all arrays a second time", start, end); start = clock(); check(cudaFreeHost(a)); check(cudaFreeHost(b)); check(cudaFreeHost(c)); end = clock(); log("host: free", start, end); end_program = clock(); log("total", start_program, end_program); return 0; }
86c545ae36bd3232a98142c6d5e680d5ff092b21.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _BACKPROP_CUDA_KERNEL_H_ #define _BACKPROP_CUDA_KERNEL_H_ #include <stdio.h> #include "backprop.h" #include "math.h" #include "hip/hip_runtime.h" __global__ void bpnn_layerforward_CUDA(float *input_cuda, float *output_hidden_cuda, float *input_hidden_cuda, float *hidden_partial_sum, int in, int hid) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_in = HEIGHT * by + ty + 1; // __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; weight_matrix[ty][tx] = input_hidden_cuda[index] * input_cuda[index_in]; __syncthreads(); if((ty & 1) ==0){ weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + 1][tx]; if((ty & 3) ==0){ weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + 2][tx]; if((ty & 7) ==0){ weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + 4][tx]; if((ty & 15) ==0){ weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + 8][tx]; } } } } input_hidden_cuda[index] = weight_matrix[ty][tx]; if (tx == 0) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } __global__ void bpnn_adjust_weights_cuda(float * delta, int hid, float * ly, int in, float * w, float * oldw) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; //eta = 0.3; //momentum = 0.3; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by ==0){ w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } } #endif
86c545ae36bd3232a98142c6d5e680d5ff092b21.cu
#ifndef _BACKPROP_CUDA_KERNEL_H_ #define _BACKPROP_CUDA_KERNEL_H_ #include <stdio.h> #include "backprop.h" #include "math.h" #include "cuda.h" __global__ void bpnn_layerforward_CUDA(float *input_cuda, float *output_hidden_cuda, float *input_hidden_cuda, float *hidden_partial_sum, int in, int hid) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_in = HEIGHT * by + ty + 1; // __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; weight_matrix[ty][tx] = input_hidden_cuda[index] * input_cuda[index_in]; __syncthreads(); if((ty & 1) ==0){ weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + 1][tx]; if((ty & 3) ==0){ weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + 2][tx]; if((ty & 7) ==0){ weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + 4][tx]; if((ty & 15) ==0){ weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + 8][tx]; } } } } input_hidden_cuda[index] = weight_matrix[ty][tx]; if (tx == 0) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } __global__ void bpnn_adjust_weights_cuda(float * delta, int hid, float * ly, int in, float * w, float * oldw) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; //eta = 0.3; //momentum = 0.3; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by ==0){ w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } } #endif
7349837fa03203d209aef41ccf7493b3ad4bf6fa.hip
// !!! This is a file automatically generated by hipify!!! #include <random> #include <vector> #include <tuple> #include <cstdio> #include <cstdlib> #include <functional> #include <algorithm> #include "SyncedMemory.h" #include "Timer.h" #include "counting.h" using namespace std; #define CHECK {\ auto e = hipDeviceSynchronize();\ if (e != hipSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\ abort();\ }\ } template <typename Engine> tuple<vector<char>, vector<int>> GenerateTestCase(Engine &eng, const int N) { poisson_distribution<int> pd(14.0); bernoulli_distribution bd(0.1); uniform_int_distribution<int> id1(1, 20); uniform_int_distribution<int> id2(1, 5); uniform_int_distribution<int> id3('a', 'z'); tuple<vector<char>, vector<int>> ret; auto &text = get<0>(ret); auto &pos = get<1>(ret); auto gen_rand_word_len = [&] () -> int { return max(1, min(500, pd(eng) - 5 + (bd(eng) ? id1(eng)*20 : 0))); }; auto gen_rand_space_len = [&] () -> int { return id2(eng); }; auto gen_rand_char = [&] () { return id3(eng); }; auto AddWord = [&] () { int n = gen_rand_word_len(); for (int i = 0; i < n; ++i) { text.push_back(gen_rand_char()); pos.push_back(i+1); } }; auto AddSpace = [&] () { int n = gen_rand_space_len(); for (int i = 0; i < n; ++i) { text.push_back('\n'); pos.push_back(0); } }; AddWord(); while (text.size() < N) { AddSpace(); AddWord(); } return ret; } void TestRoutine( SyncedMemory<int>& yours_sync, SyncedMemory<char>& text_sync, const int n, const int part, const int *golden ) { // Initialization Timer timer_count_position; int *yours_gpu = yours_sync.get_gpu_wo(); hipMemset(yours_gpu, 0, sizeof(int)*n); // Run timer_count_position.Start(); if (part == 1) { CountPosition1(text_sync.get_gpu_ro(), yours_gpu, n); } else { CountPosition2(text_sync.get_gpu_ro(), yours_gpu, n); } CHECK; timer_count_position.Pause(); // Part I check const int *yours = yours_sync.get_cpu_ro(); int n_match = mismatch(golden, golden+n, yours).first - golden; printf_timer(timer_count_position); if (n_match != n) { printf("Part %d WA\n", part); } else { printf("Part %d AC\n", part); } } int main(int argc, char **argv) { // Initialize random text default_random_engine engine(12345); auto text_pos_head = GenerateTestCase(engine, 40000000); // 40 MB data vector<char> &text = get<0>(text_pos_head); vector<int> &pos = get<1>(text_pos_head); // Prepare buffers int n = text.size(); char *text_gpu; hipMalloc(&text_gpu, sizeof(char)*n); SyncedMemory<char> text_sync(text.data(), text_gpu, n); text_sync.get_cpu_wo(); // touch the cpu data MemoryBuffer<int> yours1_buf(n); MemoryBuffer<int> yours2_buf(n); auto yours1_mb = yours1_buf.CreateSync(n); auto yours2_mb = yours2_buf.CreateSync(n); // We test 2 in first to prevent cheating TestRoutine(yours1_mb, text_sync, n, 2, pos.data()); TestRoutine(yours2_mb, text_sync, n, 1, pos.data()); hipFree(text_gpu); return 0; }
7349837fa03203d209aef41ccf7493b3ad4bf6fa.cu
#include <random> #include <vector> #include <tuple> #include <cstdio> #include <cstdlib> #include <functional> #include <algorithm> #include "SyncedMemory.h" #include "Timer.h" #include "counting.h" using namespace std; #define CHECK {\ auto e = cudaDeviceSynchronize();\ if (e != cudaSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\ abort();\ }\ } template <typename Engine> tuple<vector<char>, vector<int>> GenerateTestCase(Engine &eng, const int N) { poisson_distribution<int> pd(14.0); bernoulli_distribution bd(0.1); uniform_int_distribution<int> id1(1, 20); uniform_int_distribution<int> id2(1, 5); uniform_int_distribution<int> id3('a', 'z'); tuple<vector<char>, vector<int>> ret; auto &text = get<0>(ret); auto &pos = get<1>(ret); auto gen_rand_word_len = [&] () -> int { return max(1, min(500, pd(eng) - 5 + (bd(eng) ? id1(eng)*20 : 0))); }; auto gen_rand_space_len = [&] () -> int { return id2(eng); }; auto gen_rand_char = [&] () { return id3(eng); }; auto AddWord = [&] () { int n = gen_rand_word_len(); for (int i = 0; i < n; ++i) { text.push_back(gen_rand_char()); pos.push_back(i+1); } }; auto AddSpace = [&] () { int n = gen_rand_space_len(); for (int i = 0; i < n; ++i) { text.push_back('\n'); pos.push_back(0); } }; AddWord(); while (text.size() < N) { AddSpace(); AddWord(); } return ret; } void TestRoutine( SyncedMemory<int>& yours_sync, SyncedMemory<char>& text_sync, const int n, const int part, const int *golden ) { // Initialization Timer timer_count_position; int *yours_gpu = yours_sync.get_gpu_wo(); cudaMemset(yours_gpu, 0, sizeof(int)*n); // Run timer_count_position.Start(); if (part == 1) { CountPosition1(text_sync.get_gpu_ro(), yours_gpu, n); } else { CountPosition2(text_sync.get_gpu_ro(), yours_gpu, n); } CHECK; timer_count_position.Pause(); // Part I check const int *yours = yours_sync.get_cpu_ro(); int n_match = mismatch(golden, golden+n, yours).first - golden; printf_timer(timer_count_position); if (n_match != n) { printf("Part %d WA\n", part); } else { printf("Part %d AC\n", part); } } int main(int argc, char **argv) { // Initialize random text default_random_engine engine(12345); auto text_pos_head = GenerateTestCase(engine, 40000000); // 40 MB data vector<char> &text = get<0>(text_pos_head); vector<int> &pos = get<1>(text_pos_head); // Prepare buffers int n = text.size(); char *text_gpu; cudaMalloc(&text_gpu, sizeof(char)*n); SyncedMemory<char> text_sync(text.data(), text_gpu, n); text_sync.get_cpu_wo(); // touch the cpu data MemoryBuffer<int> yours1_buf(n); MemoryBuffer<int> yours2_buf(n); auto yours1_mb = yours1_buf.CreateSync(n); auto yours2_mb = yours2_buf.CreateSync(n); // We test 2 in first to prevent cheating TestRoutine(yours1_mb, text_sync, n, 2, pos.data()); TestRoutine(yours2_mb, text_sync, n, 1, pos.data()); cudaFree(text_gpu); return 0; }
a1565a8901bf133bd1f8ff954edd10b71c9a9882.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void squareFunc(unsigned int *d_in, unsigned int *d_out) { int idx = threadIdx.x; unsigned int val = d_in[idx]; d_out[idx] = val * val; //printf("%d square value %d \n ", idx, d_out[idx]); }
a1565a8901bf133bd1f8ff954edd10b71c9a9882.cu
#include "includes.h" __global__ void squareFunc(unsigned int *d_in, unsigned int *d_out) { int idx = threadIdx.x; unsigned int val = d_in[idx]; d_out[idx] = val * val; //printf("%d square value %d \n ", idx, d_out[idx]); }
7e77b3710ebcad7380fe8627d84b43c00f78f036.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.h" namespace filter { template void linearColumn<float3, short3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream); } #endif /* CUDA_DISABLER */
7e77b3710ebcad7380fe8627d84b43c00f78f036.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.h" namespace filter { template void linearColumn<float3, short3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream); } #endif /* CUDA_DISABLER */
74c457fb90da46b94a3fb52c7f17e96af8c95f5e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <string> #include <fstream> #include <fstream> #include <vector> #include <stdint.h> #include <cassert> #include <sys/time.h> #define NUM_STACK 80 //#define GPUDEBUG #define ENABLE_PAIR #define ENABLE_TRIPLE #define ENABLE_INTERSECTION #define ENABLE_XWING #define ENABLE_YWING #ifdef GPUDEBUG #define GPU_PF(...) printf(__VA_ARGS__) #else #define GPU_PF(...) #endif #define GPU_CHECKERROR( err ) (gpuCheckError( err, __FILE__, __LINE__ )) static void gpuCheckError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } enum { STAT_NOCHG = 0, STAT_UPDATED = 1, STAT_NOTOK = 2, STAT_FINISHED = 3, }; enum { SUFLG_PROPAGATE_SINGLE = (1<<0), //propagated the fact that I solved this cell already }; template<int SIZE> struct SudokuProblem { uint32_t givens[SIZE][SIZE]; //0 if unknown, digit otherwise }; /* warning: will break on sizes > 32 */ template<int SIZE> struct SudokuState { uint32_t bitstate[SIZE][SIZE]; uint8_t work_flag[SIZE][SIZE]; //flags to cache work done int8_t curr_r, curr_c, curr_dig; //counter for recursion }; template<int SIZE> void print_state(const SudokuState<SIZE> &s) { for(int r=0;r<SIZE;++r) { for(int c=0;c<SIZE;++c) { fprintf(stderr, "(%d, %d)", r,c); for(int t=0;t<SIZE;++t) { if(s.bitstate[r][c] & (1u<<t)) { fprintf(stderr, " %d", t+1); } } fprintf(stderr, "\n"); } } } __device__ inline void do_remove_mask(uint32_t *data, int mask, int *bstatus) { if(atomicAnd(data, ~mask) & mask){ *bstatus = STAT_UPDATED; } } #include <quickcheck.h> #include <simple_cand_elim.h> #include <singleton_search.h> #include <pair_search.h> template<int RSIZE> __global__ void triple_search(SudokuState<RSIZE*RSIZE> *p, int *rc) { __shared__ SudokuState<RSIZE*RSIZE> s; __shared__ int block_status, block_ok; __shared__ uint32_t bit_counts[RSIZE*RSIZE]; const int r = threadIdx.x; const int c = threadIdx.y; if(r == 0 && c == 0){block_status = STAT_NOCHG;block_ok = 1;} //copy current values const uint32_t myval = p->bitstate[r][c]; s.bitstate[r][c] = myval; //rows for(int row=0;row<RSIZE*RSIZE;++row) { bit_counts[r] = 0; __syncthreads(); //look at bit r of cell c if(s.bitstate[row][c] & (1<<r)) { atomicOr(&bit_counts[r], (1u << c)); } __syncthreads(); if(!block_ok){break;} //check if the pair of digits (r,c) has a third digit so that they happen in exactly 3 places if(r < c) { const uint32_t xx = (bit_counts[r] | bit_counts[c]); for(int odig=c+1;odig<RSIZE*RSIZE;++odig) { const uint32_t x = (bit_counts[odig] | xx); int ct = __popc(x); if(ct <= 2) { //There aren't enough cells //That's no good! block_ok = 0; } else if(ct == 3) { const uint32_t qq = (1u<<r)|(1u<<c)|(1u<<odig); for(int t=0;t<RSIZE*RSIZE;++t) { if(x & (1u<<t)) { if(qq != (qq | atomicAnd(&s.bitstate[row][t], qq))) { block_status = STAT_UPDATED; } } } } } } __syncthreads(); //check if the pair of cells (r, c) has a third cell that has only 3 digits if(r < c) { const uint32_t basepair = ( s.bitstate[row][r]| s.bitstate[row][c]); if(__popc(basepair) <= 3) { for(int ocell=c+1;ocell<RSIZE*RSIZE;++ocell) { const uint32_t tmask = (basepair| s.bitstate[row][ocell]); if(__popc(tmask) == 3) { for(int t=0;t<RSIZE*RSIZE;++t) { if(t != r && t != c && t != ocell) { do_remove_mask(&s.bitstate[row][t], tmask, &block_status); } } } } } } } if(!block_ok){goto ending;} //columns for(int col=0;col<RSIZE*RSIZE;++col) { if(!block_ok){break;} bit_counts[r] = 0; __syncthreads(); //look at bit r of cell c if(s.bitstate[c][col] & (1<<r)) { atomicOr(&bit_counts[r], (1u << c)); } __syncthreads(); //check if the pair of digits (r,c) has a third digit so that they happen in exactly 3 places if(r < c) { const uint32_t xx = (bit_counts[r] | bit_counts[c]); for(int odig=c+1;odig<RSIZE*RSIZE;++odig) { const uint32_t x = (bit_counts[odig] | xx); int ct = __popc(x); if(ct <= 2) { //There aren't enough cells //That's no good! block_ok = 0; } else if(ct == 3) { const uint32_t qq = (1u<<r)|(1u<<c)|(1u<<odig); for(int t=0;t<RSIZE*RSIZE;++t) { if(x & (1u<<t)) { if(qq != (qq | atomicAnd(&s.bitstate[t][col], qq))) { block_status = STAT_UPDATED; } } } } } } __syncthreads(); //check if the pair of cells (r, c) has a third cell that has only 3 digits if(r < c) { const uint32_t basepair = ( s.bitstate[r][col]| s.bitstate[c][col]); if(__popc(basepair) <= 3) { for(int ocell=c+1;ocell<RSIZE*RSIZE;++ocell) { const uint32_t tmask = (basepair| s.bitstate[ocell][col]); if(__popc(tmask) == 3) { for(int t=0;t<RSIZE*RSIZE;++t) { if(t != r && t != c && t != ocell) { do_remove_mask(&s.bitstate[t][col], tmask, &block_status); } } } } } } } if(!block_ok){goto ending;} //regions for(int regionid=0;regionid<RSIZE*RSIZE;++regionid) { const int baser = RSIZE*(regionid/RSIZE); const int basec = RSIZE*(regionid%RSIZE); if(!block_ok){break;} bit_counts[r] = 0; __syncthreads(); //look at bit r of cell c if(s.bitstate[baser+(c/RSIZE)][basec+(c%RSIZE)] & (1<<r)) { atomicOr(&bit_counts[r], (1u << c)); } __syncthreads(); //check if the pair of digits (r,c) has a third digit so that they happen in exactly 3 places if(r < c) { const uint32_t xx = (bit_counts[r] | bit_counts[c]); for(int odig=c+1;odig<RSIZE*RSIZE;++odig) { const uint32_t x = (bit_counts[odig] | xx); int ct = __popc(x); if(ct <= 2) { //There aren't enough cells //That's no good! block_ok = 0; } else if(ct == 3) { const uint32_t qq = (1u<<r)|(1u<<c)|(1u<<odig); for(int t=0;t<RSIZE*RSIZE;++t) { if(x & (1u<<t)) { if(qq != (qq | atomicAnd(&s.bitstate[baser+(t/RSIZE)][basec+(t%RSIZE)], qq))) { block_status = STAT_UPDATED; } } } } } } __syncthreads(); //check if the pair of cells (r, c) has a third cell that has only 3 digits if(r < c) { const uint32_t basepair = ( s.bitstate[baser+(r/RSIZE)][basec+(r%RSIZE)]| s.bitstate[baser+(c/RSIZE)][basec+(c%RSIZE)]); if(__popc(basepair) <= 3) { for(int ocell=c+1;ocell<RSIZE*RSIZE;++ocell) { const uint32_t tmask = (basepair| s.bitstate[baser+(ocell/RSIZE)][basec+(ocell%RSIZE)]); if(__popc(tmask) == 3) { for(int t=0;t<RSIZE*RSIZE;++t) { if(t != r && t != c && t != ocell) { do_remove_mask(&s.bitstate[baser+(t/RSIZE)][basec+(t%RSIZE)], tmask, &block_status); } } } } } } } __syncthreads(); ending: if(block_ok && block_status == STAT_UPDATED) { p->bitstate[r][c] = s.bitstate[r][c]; } __syncthreads(); if(r == 0 && c == 0) { if(!block_ok) { block_status = STAT_NOTOK; } *rc = block_status; } } /* "Pointing Pairs", but also includes other (2 of box/row/column) interactions */ template<int RSIZE> __global__ void intersection_search(SudokuState<RSIZE*RSIZE> *p, int *rc) { __shared__ SudokuState<RSIZE*RSIZE> s; __shared__ int block_status, block_ok; __shared__ uint32_t bit_counts[RSIZE*RSIZE]; const int r = threadIdx.x; const int c = threadIdx.y; if(r == 0 && c == 0){block_status = STAT_NOCHG;block_ok = 1;} //copy current values const uint32_t myval = p->bitstate[r][c]; const uint32_t BASEMASK = (RSIZE == 3)?0x49: ( (RSIZE == 4)?0x1111: ( (RSIZE == 5)?0x108421:0)); s.bitstate[r][c] = myval; __syncthreads(); /* look for pairs/triples in box */ for(int boxr=0;boxr<RSIZE;++boxr) for(int boxc=0;boxc<RSIZE;++boxc) { const int baser = RSIZE * boxr; const int basec = RSIZE * boxc; //thread (r,c) will check for digit r in cell c of the box if(c == 0) { bit_counts[r] = 0; } __syncthreads(); if(s.bitstate[baser+(c/RSIZE)][basec+(c%RSIZE)] & (1<<r)) { atomicOr(&bit_counts[r], (1u << c)); } __syncthreads(); //now we check for each digit whether it signals a row or column intersection //for digit d //if bit counts mask == xxx000000 // eliminate rest in row 0 //if bit counts mask == 000xxx000 // eliminate rest in row 1 //... { //row intersection if(!(c >= basec && c < basec+RSIZE)) { for(int x=0,mymask=((1u<<RSIZE)-1);x<RSIZE;++x, mymask<<=RSIZE) { if((bit_counts[r] & mymask) == bit_counts[r]) { do_remove_mask(&s.bitstate[baser+x][c], (1u<<r), &block_status); } } } //column intersection if(!(c >= baser && c < baser+RSIZE)) { for(int x=0,mymask=BASEMASK;x<RSIZE;++x, mymask<<=1) { if((bit_counts[r] & mymask) == bit_counts[r]) { do_remove_mask(&s.bitstate[c][basec+x], (1u<<r), &block_status); } } } } __syncthreads(); } /* TODO: row/box and col/box interaction */ if(block_ok && block_status == STAT_UPDATED) { //implies ok && finalval has 1 bit set p->bitstate[r][c] = s.bitstate[r][c]; } __syncthreads(); if(r == 0 && c == 0) { if(!block_ok) { block_status = STAT_NOTOK; } *rc = block_status; } } template<int RSIZE> __global__ void xwing_search(SudokuState<RSIZE*RSIZE> *p, int *rc) { __shared__ SudokuState<RSIZE*RSIZE> s; __shared__ int block_status, block_ok; __shared__ uint32_t bit_counts[RSIZE*RSIZE]; const int r = threadIdx.x; const int c = threadIdx.y; if(r == 0 && c == 0){block_status = STAT_NOCHG;block_ok = 1;} //copy current values const uint32_t myval = p->bitstate[r][c]; s.bitstate[r][c] = myval; __syncthreads(); for(int dig=0;dig<RSIZE*RSIZE;++dig) { //look by row if(c == 0) bit_counts[r] = 0; __syncthreads(); if((s.bitstate[r][c] & (1u<<dig))) { atomicOr(&bit_counts[r], (1u << c)); } __syncthreads(); //now we look for 2 rows that are identical and //have exactly two spots open if(r < c) { if(bit_counts[r] == bit_counts[c] && __popc(bit_counts[r]) == 2) { //we have to remove this digit from the other things in the two bits that are there int col_a = __ffs(bit_counts[r])-1; int col_b = 31-__clz(bit_counts[r]); //GPU_PF("Found xw %x %d by row %d %d, cols %d %d\n", bit_counts[r], dig, r, c, col_a, col_b); for(int t=0;t<RSIZE*RSIZE;++t) { if(t != r && t != c) { do_remove_mask(&s.bitstate[t][col_a], (1u << dig), &block_status); do_remove_mask(&s.bitstate[t][col_b], (1u << dig), &block_status); } } } } //now look by column if(c == 0) bit_counts[r] = 0; __syncthreads(); if((s.bitstate[r][c] & (1u<<dig))) { atomicOr(&bit_counts[c], (1u << r)); } __syncthreads(); if(r < c) { if(bit_counts[r] == bit_counts[c] && __popc(bit_counts[r]) == 2) { //we have to remove this digit from the other things in the two bits that are there int row_a = __ffs(bit_counts[r])-1; int row_b = 31-__clz(bit_counts[r]); //GPU_PF("Found xw %x %d by col %d %d, rows %d %d\n", bit_counts[r], dig, r, c, row_a, row_b); for(int t=0;t<RSIZE*RSIZE;++t) { if(t != r && t != c) { do_remove_mask(&s.bitstate[row_a][t], (1u << dig), &block_status); do_remove_mask(&s.bitstate[row_b][t], (1u << dig), &block_status); } } } } __syncthreads(); } if(block_ok && block_status == STAT_UPDATED) { p->bitstate[r][c] = s.bitstate[r][c]; } __syncthreads(); if(r == 0 && c == 0) { if(!block_ok) { block_status = STAT_NOTOK; } *rc = block_status; } } template<int RSIZE> __global__ void ywing_search(SudokuState<RSIZE*RSIZE> *p, int *rc) { __shared__ SudokuState<RSIZE*RSIZE> s; __shared__ int block_status, block_ok; //left versus right __shared__ uint32_t bit_counts[RSIZE*RSIZE][RSIZE*RSIZE][2]; const int r = threadIdx.x; const int c = threadIdx.y; if(r == 0 && c == 0){block_status = STAT_NOCHG;block_ok = 1;} //copy current values const uint32_t myval = p->bitstate[r][c]; const int mybits = __popc(myval); s.bitstate[r][c] = myval; __syncthreads(); //iterate over every cell with exactly 2 values, //propagate a search for other bivalued cells with one value in common, //set bitmask for the other side to indicate that cell //then take the set bitmasks and propagate those, hopefully you then have overlap to //remove from main board for(int keyr=0;keyr<RSIZE*RSIZE;++keyr) { for(int keyc=0;keyc<RSIZE*RSIZE;++keyc) { const uint32_t kval = s.bitstate[keyr][keyc]; const uint32_t lowval = __ffs(kval)-1; if(__popc(kval) != 2){continue;} bit_counts[r][c][0] = bit_counts[r][c][1] = 0; __syncthreads(); //now attempt to propagate if((r == keyr) || (c == keyc) || ((r/RSIZE) == (keyr/RSIZE) && (c/RSIZE) == (keyc/RSIZE))) { if(mybits == 2) { uint32_t x = (myval & (~kval)); if(x && !(x & (x-1))) { int target_idx = 0; if(myval == (x | (1u<<lowval))) { target_idx = 0; } else { target_idx = 1; } //propagate to all cells stemming from this one for(int t=0;t<9;++t) { if(t != c){ atomicOr(&bit_counts[r][t][target_idx], x); } if(t != r) { atomicOr(&bit_counts[t][c][target_idx], x); } if(t != (RSIZE*(r%RSIZE) + (c%RSIZE))) { atomicOr(&bit_counts[RSIZE*(r/RSIZE)+(t/3)][RSIZE*(c/RSIZE)+(t%3)][target_idx], x); } } } } } __syncthreads(); //now do intersection on each propagated thing { const uint32_t rval = (bit_counts[r][c][0]&bit_counts[r][c][1]); if(rval) { do_remove_mask(&s.bitstate[r][c], rval, &block_status); } } __syncthreads(); if(block_status == STAT_UPDATED) { goto ending; } if(!block_ok) { goto ending; } } } ending:; if(block_ok && block_status == STAT_UPDATED) { p->bitstate[r][c] = s.bitstate[r][c]; } __syncthreads(); if(r == 0 && c == 0) { if(!block_ok) { block_status = STAT_NOTOK; } *rc = block_status; } } template<int SIZE> void fill_state_from_problem(SudokuState<SIZE> *state, const SudokuProblem<SIZE> &problem) { memset(state, 0, sizeof(SudokuState<SIZE>)); for(int r=0;r<SIZE;++r) { for(int c=0;c<SIZE;++c) { if(problem.givens[r][c] != 0) { assert(problem.givens[r][c] >= 1); assert(problem.givens[r][c] <= SIZE); state->bitstate[r][c] = (1u << (problem.givens[r][c]-1)); } else { state->bitstate[r][c] = (1u << SIZE) - 1; } } } } template<int RSIZE> int check_state(const SudokuState<RSIZE*RSIZE> &s) { const char *foo = getenv("VERBOSE"); bool ok = true; for(int r=0;r<RSIZE*RSIZE;++r) { for(int c=0;c<RSIZE*RSIZE;++c) { const uint32_t bs = s.bitstate[r][c]; if(bs == 0 || (bs & (bs-1)) != 0) { if(foo) std::cerr << "Row " << r << " " << "col " << c << " not singleton!" << std::endl; ok = false; } } } if(!ok){return 0;} //row check const uint32_t GOAL = (1u << (RSIZE*RSIZE)) - 1; for(int r=0;r<RSIZE*RSIZE;++r) { uint32_t xx = 0; for(int i=0;i<RSIZE*RSIZE;++i) { const int nr = r; const int nc = i; xx |= s.bitstate[nr][nc]; } if(xx != GOAL) { if(foo) std::cerr << "Row " << r << " is no good" << std::endl; ok = false; break; } } if(!ok){return 0;} for(int c=0;c<RSIZE*RSIZE;++c) { uint32_t xx = 0; for(int i=0;i<RSIZE*RSIZE;++i) { const int nr = i; const int nc = c; xx |= s.bitstate[nr][nc]; } if(xx != GOAL) { if(foo) std::cerr << "Col " << c << " is no good" << std::endl; ok = false; break; } } if(!ok){return 0;} for(int br=0;br<RSIZE && ok;++br) { for(int bc=0;bc<RSIZE;++bc) { uint32_t xx = 0; for(int i=0;i<RSIZE*RSIZE;++i) { const int nr = RSIZE*br + (i/RSIZE); const int nc = RSIZE*bc + (i%RSIZE); xx |= s.bitstate[nr][nc]; } if(xx != GOAL) { if(foo) std::cerr << "Region " << br << "," << bc << " is no good" << std::endl; ok = false; break; } } } if(!ok){return 0;} if(foo) std::cerr << "ALL GOOD!" << std::endl; return 1; } static double timeval_diff(const struct timeval *start, const struct timeval *end) { return 1e-6 * (end->tv_usec - start->tv_usec) + (end->tv_sec - start->tv_sec); } #define SIZE 9 #define RSIZE 3 int cpu_naive_recurse(SudokuState<9> *p) { //const int RSIZE = 3; //const int SIZE = RSIZE*RSIZE; uint32_t hold_row[SIZE]; uint32_t hold_col[SIZE]; uint32_t hold_region[SIZE]; for(int r=0;r<SIZE;++r) { for(int c=0;c<SIZE;++c) { uint32_t x = p->bitstate[r][c]; if(x == 0){return 0;} if(!(x & (x-1))) { for(int i=0;i<SIZE;++i) { if(i != r && p->bitstate[i][c] == x){return 0;} if(i != c && p->bitstate[r][i] == x){return 0;} const int nr = (RSIZE*(r/RSIZE))+(i/RSIZE); const int nc = (RSIZE*(c/RSIZE))+(i%RSIZE); if(!(nr == r && nc == c) && p->bitstate[nr][nc] == x){return 0;} } } } } for(int r=0;r<SIZE;++r) { for(int c=0;c<SIZE;++c) { uint32_t oldval = p->bitstate[r][c]; if((oldval & (oldval-1)) != 0){ for(int q=0;q<SIZE;++q) { if(oldval & (1u<<q)) { const uint32_t mask = ~(1u<<q); const int baser = RSIZE * (r/RSIZE); const int basec = RSIZE * (c/RSIZE); //try doing this with q for(int i=0;i<SIZE;++i){ if(i != c) { hold_row[i] = p->bitstate[r][i]; p->bitstate[r][i] &= mask; } if(i != r) { hold_col[i] = p->bitstate[i][c]; p->bitstate[i][c] &= mask; } const int nr = baser + (i/RSIZE); const int nc = basec + (i%RSIZE); if(!(nr == r && nc == c)) { hold_region[i] = p->bitstate[nr][nc]; p->bitstate[nr][nc] &= mask; } } p->bitstate[r][c] = (1u<<q); if(cpu_naive_recurse(p)) {return 1;} p->bitstate[r][c] = oldval; for(int i=SIZE-1;i>=0;--i){ const int nr = baser + (i/RSIZE); const int nc = basec + (i%RSIZE); if(!(nr == r && nc == c)) { p->bitstate[nr][nc] = hold_region[i]; } if(i != r) { p->bitstate[i][c] = hold_col[i]; } if(i != c) { p->bitstate[r][i] = hold_row[i]; } } } } return 0; } } } return 1; } #undef SIZE #undef RSIZE template<int RSIZE> __device__ bool iterate_guess(SudokuState<RSIZE*RSIZE> *guesser, SudokuState<RSIZE*RSIZE> *dest) { int rr = guesser->curr_r; int cc = guesser->curr_c; int dd = guesser->curr_dig; //increment for next thing ++dd; if(dd == RSIZE*RSIZE){ dd = 0; ++cc; if(cc == RSIZE*RSIZE) { cc = 0;++rr; } } for(;rr < RSIZE*RSIZE;++rr) { for(;cc < RSIZE*RSIZE;++cc) { if(__popc(guesser->bitstate[rr][cc]) <= 1){dd = 0;continue;} for(;dd < RSIZE*RSIZE;++dd) { if(guesser->bitstate[rr][cc] & (1u<<dd)) { //we found a spot to try memcpy(dest, guesser, sizeof(SudokuState<RSIZE*RSIZE>)); dest->bitstate[rr][cc] = (1u << dd); guesser->curr_r = rr; guesser->curr_c = cc; guesser->curr_dig = dd; return true; } } dd = 0; } cc = 0; } return false; } template<int RSIZE> __global__ void sudokusolver_gpu_main(SudokuState<RSIZE*RSIZE> *p, SudokuState<RSIZE*RSIZE> *save_stack, int ss_size, int *rc) { const dim3 num_block(1,1,1); const dim3 super_num_block(RSIZE*RSIZE,1,1); const dim3 threads_per_block(RSIZE*RSIZE,RSIZE*RSIZE,1); int stack_ptr = 0; thetop:; for(*rc = STAT_UPDATED;*rc == STAT_UPDATED;) { *rc = STAT_NOCHG; __syncthreads(); hipLaunchKernelGGL(( quickcheck<RSIZE>), dim3(num_block), dim3(threads_per_block), 0, 0, p, rc); hipDeviceSynchronize(); GPU_PF("QUICKCHECK - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} //simple_cand_elim<RSIZE><<<num_block, threads_per_block>>>(p, rc); hipLaunchKernelGGL(( simple_cand_elim_v2<RSIZE>), dim3(num_block), dim3(threads_per_block), 0, 0, p, rc); hipDeviceSynchronize(); GPU_PF("SIMPLE - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} hipLaunchKernelGGL(( singleton_search<RSIZE>), dim3(num_block), dim3(threads_per_block), 0, 0, p,rc); //singleton_search_v2<RSIZE><<<num_block, threads_per_block>>>(p,rc); hipDeviceSynchronize(); GPU_PF("SINGLETON - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} #ifdef ENABLE_PAIR //pair_search<RSIZE><<<num_block, threads_per_block>>>(p, rc); hipLaunchKernelGGL(( pair_search_v2<RSIZE>), dim3(super_num_block), dim3(threads_per_block), 0, 0, p, rc); hipDeviceSynchronize(); GPU_PF("PAIR SEARCH - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} #endif #ifdef ENABLE_TRIPLE hipLaunchKernelGGL(( triple_search<RSIZE>), dim3(num_block), dim3(threads_per_block), 0, 0, p, rc); hipDeviceSynchronize(); GPU_PF("TRIPLE SEARCH - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} #endif #ifdef ENABLE_INTERSECTION hipLaunchKernelGGL(( intersection_search<RSIZE>), dim3(num_block), dim3(threads_per_block), 0, 0, p, rc); hipDeviceSynchronize(); GPU_PF("INTERSECTION SEARCH - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} #endif #ifdef ENABLE_XWING hipLaunchKernelGGL(( xwing_search<RSIZE>), dim3(num_block), dim3(threads_per_block), 0, 0, p, rc); hipDeviceSynchronize(); GPU_PF("XWING SEARCH - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} #endif #ifdef ENABLE_YWING hipLaunchKernelGGL(( ywing_search<RSIZE>), dim3(num_block), dim3(threads_per_block), 0, 0, p, rc); hipDeviceSynchronize(); GPU_PF("YWING SEARCH - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} #endif } //did we win? if(*rc == STAT_FINISHED) { return; } else if(*rc == STAT_NOTOK || stack_ptr >= ss_size) { //we've hit a contradiction, so we need to iter the stack while(stack_ptr > 0) { /* remove the current guess from the save stack */ SudokuState<RSIZE*RSIZE> &ss = save_stack[stack_ptr-1]; int nextdig; for(nextdig=ss.curr_dig+1;nextdig<RSIZE*RSIZE;++nextdig) { if(ss.bitstate[ss.curr_r][ss.curr_c] & (1u<<nextdig)) { ss.curr_dig = nextdig; memcpy(p, &ss, sizeof(SudokuState<RSIZE*RSIZE>)); p->bitstate[ss.curr_r][ss.curr_c] = (1u<<nextdig); GPU_PF("NOW TRYING %d %d %d %d\n", stack_ptr-1, ss.curr_r, ss.curr_c, nextdig); goto thetop; } } //no next digit, we're also a failure! GPU_PF("Now popping to %d\n", stack_ptr-1); --stack_ptr; } } else if(stack_ptr < ss_size) { //we are stuck, but not dead, make a guess and add to stack, unless we're out of stack memcpy(&save_stack[stack_ptr], p, sizeof(SudokuState<RSIZE*RSIZE>)); int bestr = 0;int bestc = 0; int bestsc = RSIZE*RSIZE+1; for(int rr=0;rr<RSIZE*RSIZE;++rr) { for(int cc=0;cc<RSIZE*RSIZE;++cc) { int x = __popc(p->bitstate[rr][cc]); if(x > 1 && x < bestsc) { bestr = rr;bestc = cc;bestsc = x; } } } save_stack[stack_ptr].curr_r = bestr; save_stack[stack_ptr].curr_c = bestc; save_stack[stack_ptr].curr_dig = 0; SudokuState<RSIZE*RSIZE> &ss = save_stack[stack_ptr]; for(int nextdig=0;nextdig<RSIZE*RSIZE;++nextdig) { if(ss.bitstate[ss.curr_r][ss.curr_c] & (1u<<nextdig)) { GPU_PF("GUESS TIME %d %d %d %d %d\n", stack_ptr, ss.curr_r, ss.curr_c, nextdig, bestsc); ss.curr_dig = nextdig; memcpy(p, &ss, sizeof(SudokuState<RSIZE*RSIZE>)); p->bitstate[ss.curr_r][ss.curr_c] = (1u<<nextdig); ++stack_ptr; goto thetop; } } //we really shouldn't get here, let the whole thing die GPU_PF("SHOULDNT GET HERE!!!!\n"); } } static SudokuState<9> *d_state; static SudokuState<9> *d_sstack; static int *d_rc; static int gpumalloc = 0; int test_basics2(SudokuState<9> &state) { int h_rc; /* TODO: better timing */ struct timeval tstart, tend; const int num_stack = NUM_STACK; if(!gpumalloc) { GPU_CHECKERROR(hipMalloc((void **)&d_state, sizeof(SudokuState<9>))); GPU_CHECKERROR(hipMalloc((void **)&d_sstack, num_stack*sizeof(SudokuState<9>))); GPU_CHECKERROR(hipMalloc((void **)&d_rc, sizeof(int))); gpumalloc = 1; } gettimeofday(&tstart, 0); { GPU_CHECKERROR(hipMemset(d_rc, 0, sizeof(int))); GPU_CHECKERROR(hipMemcpy(d_state, &state, sizeof(SudokuState<9>), hipMemcpyHostToDevice)); const dim3 num_block(1,1,1); const dim3 threads_per_block(1,1,1); hipLaunchKernelGGL(( sudokusolver_gpu_main<3>), dim3(num_block), dim3(threads_per_block), 0, 0, d_state, d_sstack, num_stack, d_rc); GPU_CHECKERROR(hipGetLastError()); GPU_CHECKERROR(hipMemcpy(&h_rc, d_rc, sizeof(int), hipMemcpyDeviceToHost)); GPU_CHECKERROR(hipMemcpy(&state, d_state, sizeof(SudokuState<9>), hipMemcpyDeviceToHost)); hipDeviceSynchronize(); } gettimeofday(&tend, 0); std::cerr << "GOT OVERALL RC " << h_rc << std::endl; std::cerr << "TOOK TIME " << timeval_diff(&tstart, &tend) * 1000.0 << " ms" << std::endl; /* we don't free here in the interest of bulk mode */ //hipFree(d_state); //hipFree(d_rc); if(check_state<3>(state)) { std::cerr << "PASS" << std::endl; return 1; } else { std::cerr << "*****FAIL*****" << std::endl; return 0; } } template<int RSIZE> void pretty_print(SudokuState<RSIZE> &state) { for(int i=0;i<RSIZE;++i) { for(int j=0;j<RSIZE;++j) { uint32_t x = state.bitstate[i][j]; if(x && !(x & (x-1))) { for(int i=0;i<32;++i) { if(x & (1u<<i)) { if(i+1 < 10) { std::cout << (char)('0'+(i+1)); } else { std::cout << (char)('A'+(i+1-10)); } } } } else { std::cout << "."; } } std::cout << std::endl; } } template<int RSIZE> void run_batch(SudokuState<RSIZE*RSIZE> *states, size_t num_states) { const int num_streams = 8; SudokuState<RSIZE*RSIZE> *d_states[num_streams]; SudokuState<RSIZE*RSIZE> *d_sstack[num_streams]; int *d_rcs; const int num_stack = NUM_STACK; int VV = 0; if(getenv("VERBOSE") != NULL){VV = 1;} int PP = 0; if(getenv("PRETTY") != NULL){PP = 1;} struct timeval tstart, tend; std::vector<int> h_rcs(num_states, 0); std::vector<hipEvent_t> e_starts(num_states); std::vector<hipEvent_t> e_stops(num_states); std::vector<hipStream_t> streams(num_streams); if(num_states > 0 && PP) { pretty_print(states[0]); } const dim3 num_block(1,1,1); const dim3 threads_per_block(1,1,1); for(int i=0;i<num_streams;++i) { GPU_CHECKERROR(hipMalloc((void **)&d_states[i], sizeof(states[0]))); GPU_CHECKERROR(hipMalloc((void **)&d_sstack[i], num_stack*sizeof(states[0]))); GPU_CHECKERROR(hipStreamCreate(&streams[i])); } for(int i=0;i<num_states;++i) { GPU_CHECKERROR(hipEventCreate(&e_starts[i])); GPU_CHECKERROR(hipEventCreate(&e_stops[i])); } GPU_CHECKERROR(hipMalloc((void **)&d_rcs, num_states * sizeof(int))); gettimeofday(&tstart, 0); { GPU_CHECKERROR(hipMemset((void *)d_rcs, 0, num_states * sizeof(int))); for(int i=0;i<num_states;++i) { int stream_num = i % num_streams; GPU_CHECKERROR(hipEventRecord(e_starts[i], streams[stream_num])); GPU_CHECKERROR(hipMemcpyAsync(d_states[stream_num], states + i, sizeof(states[i]), hipMemcpyHostToDevice, streams[stream_num])); hipLaunchKernelGGL(( sudokusolver_gpu_main<RSIZE>), dim3(num_block), dim3(threads_per_block), 0, streams[stream_num], d_states[stream_num], d_sstack[stream_num], num_stack, &d_rcs[i]); GPU_CHECKERROR(hipMemcpyAsync(states+i, d_states[stream_num], sizeof(states[i]), hipMemcpyDeviceToHost, streams[stream_num])); GPU_CHECKERROR(hipMemcpyAsync(&h_rcs[i], d_rcs + i, sizeof(int), hipMemcpyDeviceToHost, streams[stream_num])); GPU_CHECKERROR(hipEventRecord(e_stops[i], streams[stream_num])); } } for(int i=0;i<num_states;++i) { hipEventSynchronize(e_stops[i]); } gettimeofday(&tend, 0); if(num_states > 0 && PP) { std::cout << std::endl; pretty_print(states[0]); } for(int i=0;i<num_states;++i) { float time_taken; hipEventElapsedTime(&time_taken, e_starts[i], e_stops[i]); std::cout << "PUZZLE " << i << " RC " << h_rcs[i] << " TIME " << time_taken << " ms" << std::endl; if(VV) { print_state<RSIZE*RSIZE>(states[i]); } if(check_state<RSIZE>(states[i])) { std::cout << "PASS" << std::endl; } else { std::cout << "FAIL" << std::endl; } } } int main(int argc, char **argv) { if(argc > 1) { std::cerr << "Entering bulk mode on file " << argv[1] << std::endl; std::ifstream fin(argv[1]); std::vector<std::string> vs; int RS = 3; if(argc > 2) { sscanf(argv[2], "%d", &RS); } if(!(RS >= 3 && RS <= 5)) { std::cerr << "NOT VALID SIZE" << std::endl; } int SS = RS*RS; { std::string s; while(fin >> s) { if(s.size() != SS*SS){ std::cerr << "Warning, incomplete string '" << s << "', skipping" << std::endl; } else { vs.push_back(s); } } } if(RS == 3) { std::vector<SudokuState<9> > states(vs.size()); for(int i=0;i<vs.size();++i) { SudokuProblem<9> problem; memset(&problem, 0, sizeof(problem)); const std::string &s = vs[i]; for(int t=0;t<s.size();++t) { if(s[t] >= '1' && s[t] <= '9') { int dig = s[t] - '0'; int r = t/9; int c = t%9; problem.givens[r][c] = dig; } } fill_state_from_problem(&states[i], problem); } run_batch<3>(&states[0], states.size()); } else if(RS == 4) { std::vector<SudokuState<16> > states(vs.size()); for(int i=0;i<vs.size();++i) { SudokuProblem<16> problem; memset(&problem, 0, sizeof(problem)); const std::string &s = vs[i]; for(int t=0;t<s.size();++t) { if(s[t] >= '1' && s[t] <= '9') { int dig = s[t] - '0'; int r = t/16; int c = t%16; problem.givens[r][c] = dig; } else if(s[t] >= 'A' && s[t] <= 'G') { int dig = s[t] - 'A' + 10; int r = t/16; int c = t%16; problem.givens[r][c] = dig; } } fill_state_from_problem(&states[i], problem); } run_batch<4>(&states[0], states.size()); } else if(RS == 5) { std::vector<SudokuState<25> > states(vs.size()); for(int i=0;i<vs.size();++i) { SudokuProblem<25> problem; memset(&problem, 0, sizeof(problem)); const std::string &s = vs[i]; for(int t=0;t<s.size();++t) { if(s[t] >= '1' && s[t] <= '9') { int dig = s[t] - '0'; int r = t/25; int c = t%25; problem.givens[r][c] = dig; } else if(s[t] >= 'A' && s[t] <= 'Z') { int dig = s[t] - 'A' + 10; int r = t/25; int c = t%25; problem.givens[r][c] = dig; } } fill_state_from_problem(&states[i], problem); } run_batch<5>(&states[0], states.size()); } return 0; } std::string s; std::cin >> s; if(s.size() != 81) { std::cerr << "NEED 81 cells" << std::endl; } SudokuProblem<9> problem; memset(&problem, 0, sizeof(problem)); for(int i=0;i<s.size();++i) { if(s[i] >= '1' && s[i] <= '9') { int dig = s[i] - '0'; int r = i/9; int c = i % 9; problem.givens[r][c] = dig; } } SudokuState<9> mystate; fill_state_from_problem(&mystate, problem); //print_state(mystate); test_basics2(mystate); if(gpumalloc) { hipFree(d_state); hipFree(d_rc); } return 0; }
74c457fb90da46b94a3fb52c7f17e96af8c95f5e.cu
#include <iostream> #include <string> #include <fstream> #include <fstream> #include <vector> #include <stdint.h> #include <cassert> #include <sys/time.h> #define NUM_STACK 80 //#define GPUDEBUG #define ENABLE_PAIR #define ENABLE_TRIPLE #define ENABLE_INTERSECTION #define ENABLE_XWING #define ENABLE_YWING #ifdef GPUDEBUG #define GPU_PF(...) printf(__VA_ARGS__) #else #define GPU_PF(...) #endif #define GPU_CHECKERROR( err ) (gpuCheckError( err, __FILE__, __LINE__ )) static void gpuCheckError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } enum { STAT_NOCHG = 0, STAT_UPDATED = 1, STAT_NOTOK = 2, STAT_FINISHED = 3, }; enum { SUFLG_PROPAGATE_SINGLE = (1<<0), //propagated the fact that I solved this cell already }; template<int SIZE> struct SudokuProblem { uint32_t givens[SIZE][SIZE]; //0 if unknown, digit otherwise }; /* warning: will break on sizes > 32 */ template<int SIZE> struct SudokuState { uint32_t bitstate[SIZE][SIZE]; uint8_t work_flag[SIZE][SIZE]; //flags to cache work done int8_t curr_r, curr_c, curr_dig; //counter for recursion }; template<int SIZE> void print_state(const SudokuState<SIZE> &s) { for(int r=0;r<SIZE;++r) { for(int c=0;c<SIZE;++c) { fprintf(stderr, "(%d, %d)", r,c); for(int t=0;t<SIZE;++t) { if(s.bitstate[r][c] & (1u<<t)) { fprintf(stderr, " %d", t+1); } } fprintf(stderr, "\n"); } } } __device__ inline void do_remove_mask(uint32_t *data, int mask, int *bstatus) { if(atomicAnd(data, ~mask) & mask){ *bstatus = STAT_UPDATED; } } #include <quickcheck.h> #include <simple_cand_elim.h> #include <singleton_search.h> #include <pair_search.h> template<int RSIZE> __global__ void triple_search(SudokuState<RSIZE*RSIZE> *p, int *rc) { __shared__ SudokuState<RSIZE*RSIZE> s; __shared__ int block_status, block_ok; __shared__ uint32_t bit_counts[RSIZE*RSIZE]; const int r = threadIdx.x; const int c = threadIdx.y; if(r == 0 && c == 0){block_status = STAT_NOCHG;block_ok = 1;} //copy current values const uint32_t myval = p->bitstate[r][c]; s.bitstate[r][c] = myval; //rows for(int row=0;row<RSIZE*RSIZE;++row) { bit_counts[r] = 0; __syncthreads(); //look at bit r of cell c if(s.bitstate[row][c] & (1<<r)) { atomicOr(&bit_counts[r], (1u << c)); } __syncthreads(); if(!block_ok){break;} //check if the pair of digits (r,c) has a third digit so that they happen in exactly 3 places if(r < c) { const uint32_t xx = (bit_counts[r] | bit_counts[c]); for(int odig=c+1;odig<RSIZE*RSIZE;++odig) { const uint32_t x = (bit_counts[odig] | xx); int ct = __popc(x); if(ct <= 2) { //There aren't enough cells //That's no good! block_ok = 0; } else if(ct == 3) { const uint32_t qq = (1u<<r)|(1u<<c)|(1u<<odig); for(int t=0;t<RSIZE*RSIZE;++t) { if(x & (1u<<t)) { if(qq != (qq | atomicAnd(&s.bitstate[row][t], qq))) { block_status = STAT_UPDATED; } } } } } } __syncthreads(); //check if the pair of cells (r, c) has a third cell that has only 3 digits if(r < c) { const uint32_t basepair = ( s.bitstate[row][r]| s.bitstate[row][c]); if(__popc(basepair) <= 3) { for(int ocell=c+1;ocell<RSIZE*RSIZE;++ocell) { const uint32_t tmask = (basepair| s.bitstate[row][ocell]); if(__popc(tmask) == 3) { for(int t=0;t<RSIZE*RSIZE;++t) { if(t != r && t != c && t != ocell) { do_remove_mask(&s.bitstate[row][t], tmask, &block_status); } } } } } } } if(!block_ok){goto ending;} //columns for(int col=0;col<RSIZE*RSIZE;++col) { if(!block_ok){break;} bit_counts[r] = 0; __syncthreads(); //look at bit r of cell c if(s.bitstate[c][col] & (1<<r)) { atomicOr(&bit_counts[r], (1u << c)); } __syncthreads(); //check if the pair of digits (r,c) has a third digit so that they happen in exactly 3 places if(r < c) { const uint32_t xx = (bit_counts[r] | bit_counts[c]); for(int odig=c+1;odig<RSIZE*RSIZE;++odig) { const uint32_t x = (bit_counts[odig] | xx); int ct = __popc(x); if(ct <= 2) { //There aren't enough cells //That's no good! block_ok = 0; } else if(ct == 3) { const uint32_t qq = (1u<<r)|(1u<<c)|(1u<<odig); for(int t=0;t<RSIZE*RSIZE;++t) { if(x & (1u<<t)) { if(qq != (qq | atomicAnd(&s.bitstate[t][col], qq))) { block_status = STAT_UPDATED; } } } } } } __syncthreads(); //check if the pair of cells (r, c) has a third cell that has only 3 digits if(r < c) { const uint32_t basepair = ( s.bitstate[r][col]| s.bitstate[c][col]); if(__popc(basepair) <= 3) { for(int ocell=c+1;ocell<RSIZE*RSIZE;++ocell) { const uint32_t tmask = (basepair| s.bitstate[ocell][col]); if(__popc(tmask) == 3) { for(int t=0;t<RSIZE*RSIZE;++t) { if(t != r && t != c && t != ocell) { do_remove_mask(&s.bitstate[t][col], tmask, &block_status); } } } } } } } if(!block_ok){goto ending;} //regions for(int regionid=0;regionid<RSIZE*RSIZE;++regionid) { const int baser = RSIZE*(regionid/RSIZE); const int basec = RSIZE*(regionid%RSIZE); if(!block_ok){break;} bit_counts[r] = 0; __syncthreads(); //look at bit r of cell c if(s.bitstate[baser+(c/RSIZE)][basec+(c%RSIZE)] & (1<<r)) { atomicOr(&bit_counts[r], (1u << c)); } __syncthreads(); //check if the pair of digits (r,c) has a third digit so that they happen in exactly 3 places if(r < c) { const uint32_t xx = (bit_counts[r] | bit_counts[c]); for(int odig=c+1;odig<RSIZE*RSIZE;++odig) { const uint32_t x = (bit_counts[odig] | xx); int ct = __popc(x); if(ct <= 2) { //There aren't enough cells //That's no good! block_ok = 0; } else if(ct == 3) { const uint32_t qq = (1u<<r)|(1u<<c)|(1u<<odig); for(int t=0;t<RSIZE*RSIZE;++t) { if(x & (1u<<t)) { if(qq != (qq | atomicAnd(&s.bitstate[baser+(t/RSIZE)][basec+(t%RSIZE)], qq))) { block_status = STAT_UPDATED; } } } } } } __syncthreads(); //check if the pair of cells (r, c) has a third cell that has only 3 digits if(r < c) { const uint32_t basepair = ( s.bitstate[baser+(r/RSIZE)][basec+(r%RSIZE)]| s.bitstate[baser+(c/RSIZE)][basec+(c%RSIZE)]); if(__popc(basepair) <= 3) { for(int ocell=c+1;ocell<RSIZE*RSIZE;++ocell) { const uint32_t tmask = (basepair| s.bitstate[baser+(ocell/RSIZE)][basec+(ocell%RSIZE)]); if(__popc(tmask) == 3) { for(int t=0;t<RSIZE*RSIZE;++t) { if(t != r && t != c && t != ocell) { do_remove_mask(&s.bitstate[baser+(t/RSIZE)][basec+(t%RSIZE)], tmask, &block_status); } } } } } } } __syncthreads(); ending: if(block_ok && block_status == STAT_UPDATED) { p->bitstate[r][c] = s.bitstate[r][c]; } __syncthreads(); if(r == 0 && c == 0) { if(!block_ok) { block_status = STAT_NOTOK; } *rc = block_status; } } /* "Pointing Pairs", but also includes other (2 of box/row/column) interactions */ template<int RSIZE> __global__ void intersection_search(SudokuState<RSIZE*RSIZE> *p, int *rc) { __shared__ SudokuState<RSIZE*RSIZE> s; __shared__ int block_status, block_ok; __shared__ uint32_t bit_counts[RSIZE*RSIZE]; const int r = threadIdx.x; const int c = threadIdx.y; if(r == 0 && c == 0){block_status = STAT_NOCHG;block_ok = 1;} //copy current values const uint32_t myval = p->bitstate[r][c]; const uint32_t BASEMASK = (RSIZE == 3)?0x49: ( (RSIZE == 4)?0x1111: ( (RSIZE == 5)?0x108421:0)); s.bitstate[r][c] = myval; __syncthreads(); /* look for pairs/triples in box */ for(int boxr=0;boxr<RSIZE;++boxr) for(int boxc=0;boxc<RSIZE;++boxc) { const int baser = RSIZE * boxr; const int basec = RSIZE * boxc; //thread (r,c) will check for digit r in cell c of the box if(c == 0) { bit_counts[r] = 0; } __syncthreads(); if(s.bitstate[baser+(c/RSIZE)][basec+(c%RSIZE)] & (1<<r)) { atomicOr(&bit_counts[r], (1u << c)); } __syncthreads(); //now we check for each digit whether it signals a row or column intersection //for digit d //if bit counts mask == xxx000000 // eliminate rest in row 0 //if bit counts mask == 000xxx000 // eliminate rest in row 1 //... { //row intersection if(!(c >= basec && c < basec+RSIZE)) { for(int x=0,mymask=((1u<<RSIZE)-1);x<RSIZE;++x, mymask<<=RSIZE) { if((bit_counts[r] & mymask) == bit_counts[r]) { do_remove_mask(&s.bitstate[baser+x][c], (1u<<r), &block_status); } } } //column intersection if(!(c >= baser && c < baser+RSIZE)) { for(int x=0,mymask=BASEMASK;x<RSIZE;++x, mymask<<=1) { if((bit_counts[r] & mymask) == bit_counts[r]) { do_remove_mask(&s.bitstate[c][basec+x], (1u<<r), &block_status); } } } } __syncthreads(); } /* TODO: row/box and col/box interaction */ if(block_ok && block_status == STAT_UPDATED) { //implies ok && finalval has 1 bit set p->bitstate[r][c] = s.bitstate[r][c]; } __syncthreads(); if(r == 0 && c == 0) { if(!block_ok) { block_status = STAT_NOTOK; } *rc = block_status; } } template<int RSIZE> __global__ void xwing_search(SudokuState<RSIZE*RSIZE> *p, int *rc) { __shared__ SudokuState<RSIZE*RSIZE> s; __shared__ int block_status, block_ok; __shared__ uint32_t bit_counts[RSIZE*RSIZE]; const int r = threadIdx.x; const int c = threadIdx.y; if(r == 0 && c == 0){block_status = STAT_NOCHG;block_ok = 1;} //copy current values const uint32_t myval = p->bitstate[r][c]; s.bitstate[r][c] = myval; __syncthreads(); for(int dig=0;dig<RSIZE*RSIZE;++dig) { //look by row if(c == 0) bit_counts[r] = 0; __syncthreads(); if((s.bitstate[r][c] & (1u<<dig))) { atomicOr(&bit_counts[r], (1u << c)); } __syncthreads(); //now we look for 2 rows that are identical and //have exactly two spots open if(r < c) { if(bit_counts[r] == bit_counts[c] && __popc(bit_counts[r]) == 2) { //we have to remove this digit from the other things in the two bits that are there int col_a = __ffs(bit_counts[r])-1; int col_b = 31-__clz(bit_counts[r]); //GPU_PF("Found xw %x %d by row %d %d, cols %d %d\n", bit_counts[r], dig, r, c, col_a, col_b); for(int t=0;t<RSIZE*RSIZE;++t) { if(t != r && t != c) { do_remove_mask(&s.bitstate[t][col_a], (1u << dig), &block_status); do_remove_mask(&s.bitstate[t][col_b], (1u << dig), &block_status); } } } } //now look by column if(c == 0) bit_counts[r] = 0; __syncthreads(); if((s.bitstate[r][c] & (1u<<dig))) { atomicOr(&bit_counts[c], (1u << r)); } __syncthreads(); if(r < c) { if(bit_counts[r] == bit_counts[c] && __popc(bit_counts[r]) == 2) { //we have to remove this digit from the other things in the two bits that are there int row_a = __ffs(bit_counts[r])-1; int row_b = 31-__clz(bit_counts[r]); //GPU_PF("Found xw %x %d by col %d %d, rows %d %d\n", bit_counts[r], dig, r, c, row_a, row_b); for(int t=0;t<RSIZE*RSIZE;++t) { if(t != r && t != c) { do_remove_mask(&s.bitstate[row_a][t], (1u << dig), &block_status); do_remove_mask(&s.bitstate[row_b][t], (1u << dig), &block_status); } } } } __syncthreads(); } if(block_ok && block_status == STAT_UPDATED) { p->bitstate[r][c] = s.bitstate[r][c]; } __syncthreads(); if(r == 0 && c == 0) { if(!block_ok) { block_status = STAT_NOTOK; } *rc = block_status; } } template<int RSIZE> __global__ void ywing_search(SudokuState<RSIZE*RSIZE> *p, int *rc) { __shared__ SudokuState<RSIZE*RSIZE> s; __shared__ int block_status, block_ok; //left versus right __shared__ uint32_t bit_counts[RSIZE*RSIZE][RSIZE*RSIZE][2]; const int r = threadIdx.x; const int c = threadIdx.y; if(r == 0 && c == 0){block_status = STAT_NOCHG;block_ok = 1;} //copy current values const uint32_t myval = p->bitstate[r][c]; const int mybits = __popc(myval); s.bitstate[r][c] = myval; __syncthreads(); //iterate over every cell with exactly 2 values, //propagate a search for other bivalued cells with one value in common, //set bitmask for the other side to indicate that cell //then take the set bitmasks and propagate those, hopefully you then have overlap to //remove from main board for(int keyr=0;keyr<RSIZE*RSIZE;++keyr) { for(int keyc=0;keyc<RSIZE*RSIZE;++keyc) { const uint32_t kval = s.bitstate[keyr][keyc]; const uint32_t lowval = __ffs(kval)-1; if(__popc(kval) != 2){continue;} bit_counts[r][c][0] = bit_counts[r][c][1] = 0; __syncthreads(); //now attempt to propagate if((r == keyr) || (c == keyc) || ((r/RSIZE) == (keyr/RSIZE) && (c/RSIZE) == (keyc/RSIZE))) { if(mybits == 2) { uint32_t x = (myval & (~kval)); if(x && !(x & (x-1))) { int target_idx = 0; if(myval == (x | (1u<<lowval))) { target_idx = 0; } else { target_idx = 1; } //propagate to all cells stemming from this one for(int t=0;t<9;++t) { if(t != c){ atomicOr(&bit_counts[r][t][target_idx], x); } if(t != r) { atomicOr(&bit_counts[t][c][target_idx], x); } if(t != (RSIZE*(r%RSIZE) + (c%RSIZE))) { atomicOr(&bit_counts[RSIZE*(r/RSIZE)+(t/3)][RSIZE*(c/RSIZE)+(t%3)][target_idx], x); } } } } } __syncthreads(); //now do intersection on each propagated thing { const uint32_t rval = (bit_counts[r][c][0]&bit_counts[r][c][1]); if(rval) { do_remove_mask(&s.bitstate[r][c], rval, &block_status); } } __syncthreads(); if(block_status == STAT_UPDATED) { goto ending; } if(!block_ok) { goto ending; } } } ending:; if(block_ok && block_status == STAT_UPDATED) { p->bitstate[r][c] = s.bitstate[r][c]; } __syncthreads(); if(r == 0 && c == 0) { if(!block_ok) { block_status = STAT_NOTOK; } *rc = block_status; } } template<int SIZE> void fill_state_from_problem(SudokuState<SIZE> *state, const SudokuProblem<SIZE> &problem) { memset(state, 0, sizeof(SudokuState<SIZE>)); for(int r=0;r<SIZE;++r) { for(int c=0;c<SIZE;++c) { if(problem.givens[r][c] != 0) { assert(problem.givens[r][c] >= 1); assert(problem.givens[r][c] <= SIZE); state->bitstate[r][c] = (1u << (problem.givens[r][c]-1)); } else { state->bitstate[r][c] = (1u << SIZE) - 1; } } } } template<int RSIZE> int check_state(const SudokuState<RSIZE*RSIZE> &s) { const char *foo = getenv("VERBOSE"); bool ok = true; for(int r=0;r<RSIZE*RSIZE;++r) { for(int c=0;c<RSIZE*RSIZE;++c) { const uint32_t bs = s.bitstate[r][c]; if(bs == 0 || (bs & (bs-1)) != 0) { if(foo) std::cerr << "Row " << r << " " << "col " << c << " not singleton!" << std::endl; ok = false; } } } if(!ok){return 0;} //row check const uint32_t GOAL = (1u << (RSIZE*RSIZE)) - 1; for(int r=0;r<RSIZE*RSIZE;++r) { uint32_t xx = 0; for(int i=0;i<RSIZE*RSIZE;++i) { const int nr = r; const int nc = i; xx |= s.bitstate[nr][nc]; } if(xx != GOAL) { if(foo) std::cerr << "Row " << r << " is no good" << std::endl; ok = false; break; } } if(!ok){return 0;} for(int c=0;c<RSIZE*RSIZE;++c) { uint32_t xx = 0; for(int i=0;i<RSIZE*RSIZE;++i) { const int nr = i; const int nc = c; xx |= s.bitstate[nr][nc]; } if(xx != GOAL) { if(foo) std::cerr << "Col " << c << " is no good" << std::endl; ok = false; break; } } if(!ok){return 0;} for(int br=0;br<RSIZE && ok;++br) { for(int bc=0;bc<RSIZE;++bc) { uint32_t xx = 0; for(int i=0;i<RSIZE*RSIZE;++i) { const int nr = RSIZE*br + (i/RSIZE); const int nc = RSIZE*bc + (i%RSIZE); xx |= s.bitstate[nr][nc]; } if(xx != GOAL) { if(foo) std::cerr << "Region " << br << "," << bc << " is no good" << std::endl; ok = false; break; } } } if(!ok){return 0;} if(foo) std::cerr << "ALL GOOD!" << std::endl; return 1; } static double timeval_diff(const struct timeval *start, const struct timeval *end) { return 1e-6 * (end->tv_usec - start->tv_usec) + (end->tv_sec - start->tv_sec); } #define SIZE 9 #define RSIZE 3 int cpu_naive_recurse(SudokuState<9> *p) { //const int RSIZE = 3; //const int SIZE = RSIZE*RSIZE; uint32_t hold_row[SIZE]; uint32_t hold_col[SIZE]; uint32_t hold_region[SIZE]; for(int r=0;r<SIZE;++r) { for(int c=0;c<SIZE;++c) { uint32_t x = p->bitstate[r][c]; if(x == 0){return 0;} if(!(x & (x-1))) { for(int i=0;i<SIZE;++i) { if(i != r && p->bitstate[i][c] == x){return 0;} if(i != c && p->bitstate[r][i] == x){return 0;} const int nr = (RSIZE*(r/RSIZE))+(i/RSIZE); const int nc = (RSIZE*(c/RSIZE))+(i%RSIZE); if(!(nr == r && nc == c) && p->bitstate[nr][nc] == x){return 0;} } } } } for(int r=0;r<SIZE;++r) { for(int c=0;c<SIZE;++c) { uint32_t oldval = p->bitstate[r][c]; if((oldval & (oldval-1)) != 0){ for(int q=0;q<SIZE;++q) { if(oldval & (1u<<q)) { const uint32_t mask = ~(1u<<q); const int baser = RSIZE * (r/RSIZE); const int basec = RSIZE * (c/RSIZE); //try doing this with q for(int i=0;i<SIZE;++i){ if(i != c) { hold_row[i] = p->bitstate[r][i]; p->bitstate[r][i] &= mask; } if(i != r) { hold_col[i] = p->bitstate[i][c]; p->bitstate[i][c] &= mask; } const int nr = baser + (i/RSIZE); const int nc = basec + (i%RSIZE); if(!(nr == r && nc == c)) { hold_region[i] = p->bitstate[nr][nc]; p->bitstate[nr][nc] &= mask; } } p->bitstate[r][c] = (1u<<q); if(cpu_naive_recurse(p)) {return 1;} p->bitstate[r][c] = oldval; for(int i=SIZE-1;i>=0;--i){ const int nr = baser + (i/RSIZE); const int nc = basec + (i%RSIZE); if(!(nr == r && nc == c)) { p->bitstate[nr][nc] = hold_region[i]; } if(i != r) { p->bitstate[i][c] = hold_col[i]; } if(i != c) { p->bitstate[r][i] = hold_row[i]; } } } } return 0; } } } return 1; } #undef SIZE #undef RSIZE template<int RSIZE> __device__ bool iterate_guess(SudokuState<RSIZE*RSIZE> *guesser, SudokuState<RSIZE*RSIZE> *dest) { int rr = guesser->curr_r; int cc = guesser->curr_c; int dd = guesser->curr_dig; //increment for next thing ++dd; if(dd == RSIZE*RSIZE){ dd = 0; ++cc; if(cc == RSIZE*RSIZE) { cc = 0;++rr; } } for(;rr < RSIZE*RSIZE;++rr) { for(;cc < RSIZE*RSIZE;++cc) { if(__popc(guesser->bitstate[rr][cc]) <= 1){dd = 0;continue;} for(;dd < RSIZE*RSIZE;++dd) { if(guesser->bitstate[rr][cc] & (1u<<dd)) { //we found a spot to try memcpy(dest, guesser, sizeof(SudokuState<RSIZE*RSIZE>)); dest->bitstate[rr][cc] = (1u << dd); guesser->curr_r = rr; guesser->curr_c = cc; guesser->curr_dig = dd; return true; } } dd = 0; } cc = 0; } return false; } template<int RSIZE> __global__ void sudokusolver_gpu_main(SudokuState<RSIZE*RSIZE> *p, SudokuState<RSIZE*RSIZE> *save_stack, int ss_size, int *rc) { const dim3 num_block(1,1,1); const dim3 super_num_block(RSIZE*RSIZE,1,1); const dim3 threads_per_block(RSIZE*RSIZE,RSIZE*RSIZE,1); int stack_ptr = 0; thetop:; for(*rc = STAT_UPDATED;*rc == STAT_UPDATED;) { *rc = STAT_NOCHG; __syncthreads(); quickcheck<RSIZE><<<num_block, threads_per_block>>>(p, rc); cudaDeviceSynchronize(); GPU_PF("QUICKCHECK - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} //simple_cand_elim<RSIZE><<<num_block, threads_per_block>>>(p, rc); simple_cand_elim_v2<RSIZE><<<num_block, threads_per_block>>>(p, rc); cudaDeviceSynchronize(); GPU_PF("SIMPLE - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} singleton_search<RSIZE><<<num_block, threads_per_block>>>(p,rc); //singleton_search_v2<RSIZE><<<num_block, threads_per_block>>>(p,rc); cudaDeviceSynchronize(); GPU_PF("SINGLETON - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} #ifdef ENABLE_PAIR //pair_search<RSIZE><<<num_block, threads_per_block>>>(p, rc); pair_search_v2<RSIZE><<<super_num_block, threads_per_block>>>(p, rc); cudaDeviceSynchronize(); GPU_PF("PAIR SEARCH - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} #endif #ifdef ENABLE_TRIPLE triple_search<RSIZE><<<num_block, threads_per_block>>>(p, rc); cudaDeviceSynchronize(); GPU_PF("TRIPLE SEARCH - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} #endif #ifdef ENABLE_INTERSECTION intersection_search<RSIZE><<<num_block, threads_per_block>>>(p, rc); cudaDeviceSynchronize(); GPU_PF("INTERSECTION SEARCH - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} #endif #ifdef ENABLE_XWING xwing_search<RSIZE><<<num_block, threads_per_block>>>(p, rc); cudaDeviceSynchronize(); GPU_PF("XWING SEARCH - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} #endif #ifdef ENABLE_YWING ywing_search<RSIZE><<<num_block, threads_per_block>>>(p, rc); cudaDeviceSynchronize(); GPU_PF("YWING SEARCH - GOT RC %d\n", *rc); if(*rc != STAT_NOCHG){continue;} #endif } //did we win? if(*rc == STAT_FINISHED) { return; } else if(*rc == STAT_NOTOK || stack_ptr >= ss_size) { //we've hit a contradiction, so we need to iter the stack while(stack_ptr > 0) { /* remove the current guess from the save stack */ SudokuState<RSIZE*RSIZE> &ss = save_stack[stack_ptr-1]; int nextdig; for(nextdig=ss.curr_dig+1;nextdig<RSIZE*RSIZE;++nextdig) { if(ss.bitstate[ss.curr_r][ss.curr_c] & (1u<<nextdig)) { ss.curr_dig = nextdig; memcpy(p, &ss, sizeof(SudokuState<RSIZE*RSIZE>)); p->bitstate[ss.curr_r][ss.curr_c] = (1u<<nextdig); GPU_PF("NOW TRYING %d %d %d %d\n", stack_ptr-1, ss.curr_r, ss.curr_c, nextdig); goto thetop; } } //no next digit, we're also a failure! GPU_PF("Now popping to %d\n", stack_ptr-1); --stack_ptr; } } else if(stack_ptr < ss_size) { //we are stuck, but not dead, make a guess and add to stack, unless we're out of stack memcpy(&save_stack[stack_ptr], p, sizeof(SudokuState<RSIZE*RSIZE>)); int bestr = 0;int bestc = 0; int bestsc = RSIZE*RSIZE+1; for(int rr=0;rr<RSIZE*RSIZE;++rr) { for(int cc=0;cc<RSIZE*RSIZE;++cc) { int x = __popc(p->bitstate[rr][cc]); if(x > 1 && x < bestsc) { bestr = rr;bestc = cc;bestsc = x; } } } save_stack[stack_ptr].curr_r = bestr; save_stack[stack_ptr].curr_c = bestc; save_stack[stack_ptr].curr_dig = 0; SudokuState<RSIZE*RSIZE> &ss = save_stack[stack_ptr]; for(int nextdig=0;nextdig<RSIZE*RSIZE;++nextdig) { if(ss.bitstate[ss.curr_r][ss.curr_c] & (1u<<nextdig)) { GPU_PF("GUESS TIME %d %d %d %d %d\n", stack_ptr, ss.curr_r, ss.curr_c, nextdig, bestsc); ss.curr_dig = nextdig; memcpy(p, &ss, sizeof(SudokuState<RSIZE*RSIZE>)); p->bitstate[ss.curr_r][ss.curr_c] = (1u<<nextdig); ++stack_ptr; goto thetop; } } //we really shouldn't get here, let the whole thing die GPU_PF("SHOULDNT GET HERE!!!!\n"); } } static SudokuState<9> *d_state; static SudokuState<9> *d_sstack; static int *d_rc; static int gpumalloc = 0; int test_basics2(SudokuState<9> &state) { int h_rc; /* TODO: better timing */ struct timeval tstart, tend; const int num_stack = NUM_STACK; if(!gpumalloc) { GPU_CHECKERROR(cudaMalloc((void **)&d_state, sizeof(SudokuState<9>))); GPU_CHECKERROR(cudaMalloc((void **)&d_sstack, num_stack*sizeof(SudokuState<9>))); GPU_CHECKERROR(cudaMalloc((void **)&d_rc, sizeof(int))); gpumalloc = 1; } gettimeofday(&tstart, 0); { GPU_CHECKERROR(cudaMemset(d_rc, 0, sizeof(int))); GPU_CHECKERROR(cudaMemcpy(d_state, &state, sizeof(SudokuState<9>), cudaMemcpyHostToDevice)); const dim3 num_block(1,1,1); const dim3 threads_per_block(1,1,1); sudokusolver_gpu_main<3><<<num_block, threads_per_block>>>(d_state, d_sstack, num_stack, d_rc); GPU_CHECKERROR(cudaGetLastError()); GPU_CHECKERROR(cudaMemcpy(&h_rc, d_rc, sizeof(int), cudaMemcpyDeviceToHost)); GPU_CHECKERROR(cudaMemcpy(&state, d_state, sizeof(SudokuState<9>), cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); } gettimeofday(&tend, 0); std::cerr << "GOT OVERALL RC " << h_rc << std::endl; std::cerr << "TOOK TIME " << timeval_diff(&tstart, &tend) * 1000.0 << " ms" << std::endl; /* we don't free here in the interest of bulk mode */ //cudaFree(d_state); //cudaFree(d_rc); if(check_state<3>(state)) { std::cerr << "PASS" << std::endl; return 1; } else { std::cerr << "*****FAIL*****" << std::endl; return 0; } } template<int RSIZE> void pretty_print(SudokuState<RSIZE> &state) { for(int i=0;i<RSIZE;++i) { for(int j=0;j<RSIZE;++j) { uint32_t x = state.bitstate[i][j]; if(x && !(x & (x-1))) { for(int i=0;i<32;++i) { if(x & (1u<<i)) { if(i+1 < 10) { std::cout << (char)('0'+(i+1)); } else { std::cout << (char)('A'+(i+1-10)); } } } } else { std::cout << "."; } } std::cout << std::endl; } } template<int RSIZE> void run_batch(SudokuState<RSIZE*RSIZE> *states, size_t num_states) { const int num_streams = 8; SudokuState<RSIZE*RSIZE> *d_states[num_streams]; SudokuState<RSIZE*RSIZE> *d_sstack[num_streams]; int *d_rcs; const int num_stack = NUM_STACK; int VV = 0; if(getenv("VERBOSE") != NULL){VV = 1;} int PP = 0; if(getenv("PRETTY") != NULL){PP = 1;} struct timeval tstart, tend; std::vector<int> h_rcs(num_states, 0); std::vector<cudaEvent_t> e_starts(num_states); std::vector<cudaEvent_t> e_stops(num_states); std::vector<cudaStream_t> streams(num_streams); if(num_states > 0 && PP) { pretty_print(states[0]); } const dim3 num_block(1,1,1); const dim3 threads_per_block(1,1,1); for(int i=0;i<num_streams;++i) { GPU_CHECKERROR(cudaMalloc((void **)&d_states[i], sizeof(states[0]))); GPU_CHECKERROR(cudaMalloc((void **)&d_sstack[i], num_stack*sizeof(states[0]))); GPU_CHECKERROR(cudaStreamCreate(&streams[i])); } for(int i=0;i<num_states;++i) { GPU_CHECKERROR(cudaEventCreate(&e_starts[i])); GPU_CHECKERROR(cudaEventCreate(&e_stops[i])); } GPU_CHECKERROR(cudaMalloc((void **)&d_rcs, num_states * sizeof(int))); gettimeofday(&tstart, 0); { GPU_CHECKERROR(cudaMemset((void *)d_rcs, 0, num_states * sizeof(int))); for(int i=0;i<num_states;++i) { int stream_num = i % num_streams; GPU_CHECKERROR(cudaEventRecord(e_starts[i], streams[stream_num])); GPU_CHECKERROR(cudaMemcpyAsync(d_states[stream_num], states + i, sizeof(states[i]), cudaMemcpyHostToDevice, streams[stream_num])); sudokusolver_gpu_main<RSIZE><<<num_block, threads_per_block, 0, streams[stream_num]>>>(d_states[stream_num], d_sstack[stream_num], num_stack, &d_rcs[i]); GPU_CHECKERROR(cudaMemcpyAsync(states+i, d_states[stream_num], sizeof(states[i]), cudaMemcpyDeviceToHost, streams[stream_num])); GPU_CHECKERROR(cudaMemcpyAsync(&h_rcs[i], d_rcs + i, sizeof(int), cudaMemcpyDeviceToHost, streams[stream_num])); GPU_CHECKERROR(cudaEventRecord(e_stops[i], streams[stream_num])); } } for(int i=0;i<num_states;++i) { cudaEventSynchronize(e_stops[i]); } gettimeofday(&tend, 0); if(num_states > 0 && PP) { std::cout << std::endl; pretty_print(states[0]); } for(int i=0;i<num_states;++i) { float time_taken; cudaEventElapsedTime(&time_taken, e_starts[i], e_stops[i]); std::cout << "PUZZLE " << i << " RC " << h_rcs[i] << " TIME " << time_taken << " ms" << std::endl; if(VV) { print_state<RSIZE*RSIZE>(states[i]); } if(check_state<RSIZE>(states[i])) { std::cout << "PASS" << std::endl; } else { std::cout << "FAIL" << std::endl; } } } int main(int argc, char **argv) { if(argc > 1) { std::cerr << "Entering bulk mode on file " << argv[1] << std::endl; std::ifstream fin(argv[1]); std::vector<std::string> vs; int RS = 3; if(argc > 2) { sscanf(argv[2], "%d", &RS); } if(!(RS >= 3 && RS <= 5)) { std::cerr << "NOT VALID SIZE" << std::endl; } int SS = RS*RS; { std::string s; while(fin >> s) { if(s.size() != SS*SS){ std::cerr << "Warning, incomplete string '" << s << "', skipping" << std::endl; } else { vs.push_back(s); } } } if(RS == 3) { std::vector<SudokuState<9> > states(vs.size()); for(int i=0;i<vs.size();++i) { SudokuProblem<9> problem; memset(&problem, 0, sizeof(problem)); const std::string &s = vs[i]; for(int t=0;t<s.size();++t) { if(s[t] >= '1' && s[t] <= '9') { int dig = s[t] - '0'; int r = t/9; int c = t%9; problem.givens[r][c] = dig; } } fill_state_from_problem(&states[i], problem); } run_batch<3>(&states[0], states.size()); } else if(RS == 4) { std::vector<SudokuState<16> > states(vs.size()); for(int i=0;i<vs.size();++i) { SudokuProblem<16> problem; memset(&problem, 0, sizeof(problem)); const std::string &s = vs[i]; for(int t=0;t<s.size();++t) { if(s[t] >= '1' && s[t] <= '9') { int dig = s[t] - '0'; int r = t/16; int c = t%16; problem.givens[r][c] = dig; } else if(s[t] >= 'A' && s[t] <= 'G') { int dig = s[t] - 'A' + 10; int r = t/16; int c = t%16; problem.givens[r][c] = dig; } } fill_state_from_problem(&states[i], problem); } run_batch<4>(&states[0], states.size()); } else if(RS == 5) { std::vector<SudokuState<25> > states(vs.size()); for(int i=0;i<vs.size();++i) { SudokuProblem<25> problem; memset(&problem, 0, sizeof(problem)); const std::string &s = vs[i]; for(int t=0;t<s.size();++t) { if(s[t] >= '1' && s[t] <= '9') { int dig = s[t] - '0'; int r = t/25; int c = t%25; problem.givens[r][c] = dig; } else if(s[t] >= 'A' && s[t] <= 'Z') { int dig = s[t] - 'A' + 10; int r = t/25; int c = t%25; problem.givens[r][c] = dig; } } fill_state_from_problem(&states[i], problem); } run_batch<5>(&states[0], states.size()); } return 0; } std::string s; std::cin >> s; if(s.size() != 81) { std::cerr << "NEED 81 cells" << std::endl; } SudokuProblem<9> problem; memset(&problem, 0, sizeof(problem)); for(int i=0;i<s.size();++i) { if(s[i] >= '1' && s[i] <= '9') { int dig = s[i] - '0'; int r = i/9; int c = i % 9; problem.givens[r][c] = dig; } } SudokuState<9> mystate; fill_state_from_problem(&mystate, problem); //print_state(mystate); test_basics2(mystate); if(gpumalloc) { cudaFree(d_state); cudaFree(d_rc); } return 0; }
befb587daddf0b4a483eb57fc1501bda29af5b61.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <typeinfo> #include <opencv2/opencv.hpp> #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include <time.h> using namespace std; using cv::Mat; using cv::imread; using cv::waitKey; __global__ void HelloFromGPU(void) { printf("1\n"); } __global__ void conv2CUDA(float* origin, float* goal, int numRows, int numCols, float* kernel) { int row = blockIdx.x; //block_num0 int col = blockIdx.y; if (row == 0 || col == 0 || row == numRows-1 || col == numCols-1) { return; // } int top_start = numCols * (col - 1) + row - 1; //3X3 int mid_start = numCols * (col)+row - 1; // int button_start = numCols * (col + 1) + row - 1; // // for (int i = 0; i <= 2; i++) { goal[top_start + i] = origin[top_start + i] * kernel[0 + i]; // goal[mid_start + i] = origin[mid_start + i] * kernel[3 + i]; // goal[button_start + i] = origin[button_start + i] * kernel[6 + i]; // } } __global__ void cuda_block_thread(float* origin, float* goal, int numRows, int numCols, float* kernel) { //4096C //printf("c"); //ID //gird8x8 block8x8 int tid = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.y + threadIdx.x; for (int abc = 0; abc < 9; abc++) { kernel[abc] = 11.0 + float(abc); } //XY //int thread_x = tid % 64; //int thread_y = (tid+1) / 64; //8*8 //64 64*64=4096 int row_start = 8*8*64*((tid + 1) / 64)+(tid % 64)*64; for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { //i0-8 // int point_index = row_start + j; // int point_row = point_index % 512; int point_col = (point_index + 1) / 512; if (point_row == 0 || point_row == numRows - 1 || point_col == 0 || point_col == numCols - 1) { continue; } int top_start = (point_col - 1) * numRows + point_row - 1;//3X3(1,1) int mid_start = (point_col) * numRows + point_row - 1; //3X3(2,1) int button_start = (point_col + 1) * numRows + point_row - 1; //3x3(3,1) for (int k = 0; k <= 2; k++) { goal[top_start + k] = origin[top_start + k] ; // goal[mid_start + k] = origin[mid_start + k] ; // goal[button_start + k] = origin[button_start + k] ; // } printf("goal_after = %f\n", goal[top_start]); } ////rowstart //printf("%d\n",row_start); row_start = row_start + numRows; } } __global__ void kernel_check(float* kernel) { printf("gpukernel"); for (int abc = 0; abc < 9; abc++) { printf("%f ",kernel[abc]); } } __global__ void litte_block(float* origin, float* goal, int numRows, int numCols) { long int start_id = blockIdx.y * 8 * 64 * 64 + blockIdx.x * 64; //long int pix_y = start_id / 512; //long int pix_x = start_id % 512; //printf("start_id = %ld pix_x = %ld pix_y=%ld block_id_y = %d\n", start_id,pix_x,pix_y,blockIdx //.x); for (int i = 0; i < 64; i++) { for (int j = 0; j < 64; j++) { long pix = start_id + j; long pix_x = pix % 512; long pix_y = pix / 512; if (pix_x == 0 || pix_y == 0 || pix_x == numRows - 1 || pix_y == numCols - 1) { continue; // } long top_start = pix - 512 - 1; long mid_start = pix - 1; long button_start = pix + 512 - 1; float* temp = (float*)malloc(sizeof(float) * 9); for (int k = 0; k <= 2; k++) { temp[0+k] = origin[top_start + k]; // temp[3+k] = origin[mid_start + k]; // temp[6+k] = origin[button_start + k]; // } /* if (pix == 525) { printf("temp="); for (int cd = 0; cd < 9; cd++) { printf("%f ", temp[cd]); } printf("\n"); } */ /* // for (int sort_i = 1; i < 8; i++) { float sort_temp = temp[sort_i]; int sort_j = sort_i - 1; while (sort_j >= 0) { if (temp[sort_j] > sort_temp) temp[sort_j + 1] = temp[sort_j]; else break; sort_j--; } temp[sort_j + 1] = sort_temp; } */ // for (int sort_i = 0; sort_i < 8; sort_i++) { for (int sort_j = 0; sort_j <= 8-i; sort_j++) { if (temp[sort_j] >= temp[sort_j + 1]) { float sort_temp = temp[sort_j + 1]; temp[sort_j + 1] = temp[sort_j]; temp[sort_j] = sort_temp; } } } /* if (pix == 525) { printf("temp="); for (int cd = 0; cd < 9; cd++) { printf("%f ", temp[cd]); } printf("\n"); } */ goal[pix] = temp[4]; free(temp); } start_id = start_id + 512; } } // void conv2(float* origin, float* goal, int numRows, int numCols, float* kernel) { int totalPixels = numRows * numCols; // // float* deviceOrigin; //,CUDA float* deviceGoal; //CUDA float* deviceKernal; //CUDA // hipMalloc(&deviceOrigin, sizeof(float) * totalPixels); // hipMalloc(&deviceGoal, sizeof(float) * totalPixels); // hipMalloc(&deviceKernal, sizeof(float) * 3 * 3); //CPU-GPU hipMemcpy(deviceOrigin, origin, sizeof(float) * totalPixels, hipMemcpyHostToDevice); hipMemcpy(deviceKernal, kernel, sizeof(float) * 3*3, hipMemcpyHostToDevice); hipMemcpy(deviceGoal, origin, sizeof(float) * totalPixels, hipMemcpyHostToDevice); //kernal //printf("kernal="); for (int abc = 0; abc < 9; abc++) { kernel[abc] = 11.0 + abc; /*printf("%f ", kernel[abc]);*/ } //printf("\n"); // float time_consume = 0; // event hipEvent_t time_start; hipEvent_t time_end; hipEventCreate(&time_start); hipEventCreate(&time_end); // hipEventRecord(time_start, 0); // //dim 8*8*(8*8)=4096 dim3 n_thread(8, 8); //8x8 dim3 gridSize(8, 8); //8x8 //global //conv2CUDA<<<gridSize,n_thread>>>(deviceOrigin, deviceGoal, numRows, numCols, deviceKernal); int c = (numCols + n_thread.x - 1) / n_thread.x; // 4096 //HelloFromGPU << <gridSize,n_thread >> > (); ////8x8 8x8 4096 //cuda_block_thread << <gridSize, n_thread >> > (deviceOrigin, deviceGoal, numRows, numCols, deviceKernal); ////gpukernel //kernel_check << <1, 1 >> > (deviceKernal); //8*8 64 litte_block << <gridSize, 1 >> > (deviceOrigin, deviceGoal,numRows,numCols); // hipEventRecord(time_end, 0); hipEventSynchronize(time_start); hipEventSynchronize(time_end); // hipEventElapsedTime(&time_consume, time_start, time_end); // hipMemcpy(goal, deviceGoal, sizeof(float) * totalPixels, hipMemcpyDeviceToHost); // printf("%f(ms)\n", time_consume); //free Event hipEventDestroy(time_start); hipEventDestroy(time_end); //free hipFree(deviceOrigin); hipFree(deviceGoal); hipFree(deviceKernal); } void test(int n) { printf("n=%d", n); } int main(void) { // Mat Img = imread("1.jpg"); cv::resize(Img, Img, cv::Size(512, 512)); // int height = Img.rows; int width = Img.cols; printf("height=%d\nwidth=%d\n", height,width); // cv::cvtColor(Img, Img, cv::COLOR_BGR2GRAY); // float* origin = (float*)malloc(sizeof(float) * height * width); float* target = (float*)malloc(sizeof(float) * height * width); int index = 0; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { origin[index] = Img.at<uchar>(i, j); target[index] = index; index++; } } clock_t func_time_start; clock_t func_time_end; //float *origin = (float*)malloc(sizeof(float) * 10000); //float *target = (float*)malloc(sizeof(float) * 10000); //for (int i = 0; i < 10000; i++) //{ // srand((int)time(0)); // origin[i] = rand()%10; // target[i] = 1; //} //printf("target_start = %f\n", target[1515]); //printf("origin_start = %f\n", origin[1515]); float kernal[9] = { 11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0 }; func_time_start = clock(); conv2(origin, target, height, width, kernal); func_time_end = clock(); double func_time_consume = (double)(func_time_end - func_time_start) / CLOCKS_PER_SEC; printf("%f seconds\n", func_time_consume); //for (int abc = 523; abc < 553; abc++) //{ // printf(" %f ", origin[abc]); // printf(" %f\n", target[abc]); //} return 0; }
befb587daddf0b4a483eb57fc1501bda29af5b61.cu
#include <iostream> #include <typeinfo> #include <opencv2/opencv.hpp> #include "device_launch_parameters.h" #include "cuda_runtime.h" #include <time.h> using namespace std; using cv::Mat; using cv::imread; using cv::waitKey; __global__ void HelloFromGPU(void) { printf("1\n"); } __global__ void conv2CUDA(float* origin, float* goal, int numRows, int numCols, float* kernel) { int row = blockIdx.x; //block_num从0开始 int col = blockIdx.y; if (row == 0 || col == 0 || row == numRows-1 || col == numCols-1) { return; //边缘部分不滤波 } int top_start = numCols * (col - 1) + row - 1; //起始矩阵位置,3X3矩阵中心的左上角 int mid_start = numCols * (col)+row - 1; //中间左边位置的矩阵 int button_start = numCols * (col + 1) + row - 1; //下面起始矩阵的位置 //卷积 滤波 for (int i = 0; i <= 2; i++) { goal[top_start + i] = origin[top_start + i] * kernel[0 + i]; //卷积操作 goal[mid_start + i] = origin[mid_start + i] * kernel[3 + i]; //卷积操作 goal[button_start + i] = origin[button_start + i] * kernel[6 + i]; //卷积操作 } } __global__ void cuda_block_thread(float* origin, float* goal, int numRows, int numCols, float* kernel) { //4096个线程全部运行打印C //printf("c"); //计算块,然后再加上线程ID //总共线程数是gird的8x8 和线程的block的8x8 int tid = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.y + threadIdx.x; for (int abc = 0; abc < 9; abc++) { kernel[abc] = 11.0 + float(abc); } //将线程块分成X和Y的矩阵 //int thread_x = tid % 64; //int thread_y = (tid+1) / 64; //8*8 线程内矩阵的大小 //64 线程内一行的大小 (总大小 64*64=4096) int row_start = 8*8*64*((tid + 1) / 64)+(tid % 64)*64; for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { //对数据块中i行的第0-8个数据进行处理 //具体像素点的索引 int point_index = row_start + j; //求出对应点的行和列索引 int point_row = point_index % 512; int point_col = (point_index + 1) / 512; if (point_row == 0 || point_row == numRows - 1 || point_col == 0 || point_col == numCols - 1) { continue; } int top_start = (point_col - 1) * numRows + point_row - 1;//3X3矩阵的(1,1) int mid_start = (point_col) * numRows + point_row - 1; //3X3矩阵的(2,1) int button_start = (point_col + 1) * numRows + point_row - 1; //3x3矩阵的(3,1) for (int k = 0; k <= 2; k++) { goal[top_start + k] = origin[top_start + k] ; //复制操作 goal[mid_start + k] = origin[mid_start + k] ; //复制操作 goal[button_start + k] = origin[button_start + k] ; //复制操作 } printf("goal_after = %f\n", goal[top_start]); } ////rowstart 输出 //printf("%d\n",row_start); row_start = row_start + numRows; } } __global__ void kernel_check(float* kernel) { printf("gpu中的kernel是:"); for (int abc = 0; abc < 9; abc++) { printf("%f ",kernel[abc]); } } __global__ void litte_block(float* origin, float* goal, int numRows, int numCols) { long int start_id = blockIdx.y * 8 * 64 * 64 + blockIdx.x * 64; //long int pix_y = start_id / 512; //long int pix_x = start_id % 512; //printf("start_id = %ld pix_x = %ld pix_y=%ld block_id_y = %d\n", start_id,pix_x,pix_y,blockIdx //.x); for (int i = 0; i < 64; i++) { for (int j = 0; j < 64; j++) { long pix = start_id + j; long pix_x = pix % 512; long pix_y = pix / 512; if (pix_x == 0 || pix_y == 0 || pix_x == numRows - 1 || pix_y == numCols - 1) { continue; //边缘部分不滤波 } long top_start = pix - 512 - 1; long mid_start = pix - 1; long button_start = pix + 512 - 1; float* temp = (float*)malloc(sizeof(float) * 9); for (int k = 0; k <= 2; k++) { temp[0+k] = origin[top_start + k]; //复制操作 temp[3+k] = origin[mid_start + k]; //复制操作 temp[6+k] = origin[button_start + k]; //复制操作 构造临时矩阵 } /* if (pix == 525) { printf("排序前的temp="); for (int cd = 0; cd < 9; cd++) { printf("%f ", temp[cd]); } printf("\n"); } */ /* //插入排序 for (int sort_i = 1; i < 8; i++) { float sort_temp = temp[sort_i]; int sort_j = sort_i - 1; while (sort_j >= 0) { if (temp[sort_j] > sort_temp) temp[sort_j + 1] = temp[sort_j]; else break; sort_j--; } temp[sort_j + 1] = sort_temp; } */ //冒泡排序 for (int sort_i = 0; sort_i < 8; sort_i++) { for (int sort_j = 0; sort_j <= 8-i; sort_j++) { if (temp[sort_j] >= temp[sort_j + 1]) { float sort_temp = temp[sort_j + 1]; temp[sort_j + 1] = temp[sort_j]; temp[sort_j] = sort_temp; } } } /* if (pix == 525) { printf("temp="); for (int cd = 0; cd < 9; cd++) { printf("%f ", temp[cd]); } printf("\n"); } */ goal[pix] = temp[4]; free(temp); } start_id = start_id + 512; } } //申请、分配空间的函数 void conv2(float* origin, float* goal, int numRows, int numCols, float* kernel) { int totalPixels = numRows * numCols; //总共的像素数量 //内存指针定义 float* deviceOrigin; //原始图片的内存,CUDA float* deviceGoal; //目标图片的内存,CUDA float* deviceKernal; //核函数的内存,CUDA //指针内存分配 cudaMalloc(&deviceOrigin, sizeof(float) * totalPixels); //分配原始图片内存 cudaMalloc(&deviceGoal, sizeof(float) * totalPixels); //目标图片的内存数量 cudaMalloc(&deviceKernal, sizeof(float) * 3 * 3); //CPU内存-》GPU内存 cudaMemcpy(deviceOrigin, origin, sizeof(float) * totalPixels, cudaMemcpyHostToDevice); cudaMemcpy(deviceKernal, kernel, sizeof(float) * 3*3, cudaMemcpyHostToDevice); cudaMemcpy(deviceGoal, origin, sizeof(float) * totalPixels, cudaMemcpyHostToDevice); //输出运行前的kernal //printf("原始的kernal="); for (int abc = 0; abc < 9; abc++) { kernel[abc] = 11.0 + abc; /*printf("%f ", kernel[abc]);*/ } //printf("\n"); //初始化时间消耗 float time_consume = 0; //记录时间 创建事件event cudaEvent_t time_start; cudaEvent_t time_end; cudaEventCreate(&time_start); cudaEventCreate(&time_end); //记录当前时间 cudaEventRecord(time_start, 0); //记录程序运行次数 //矩阵dim,维度 8*8*(8*8)=4096线程 dim3 n_thread(8, 8); //8x8线程 dim3 gridSize(8, 8); //8x8块 //调用global函数,执行函数操作 //conv2CUDA<<<gridSize,n_thread>>>(deviceOrigin, deviceGoal, numRows, numCols, deviceKernal); int c = (numCols + n_thread.x - 1) / n_thread.x; //测试线程上限 4096 //HelloFromGPU << <gridSize,n_thread >> > (); ////8x8 8x8 4096 的分块线程 计算卷积 //cuda_block_thread << <gridSize, n_thread >> > (deviceOrigin, deviceGoal, numRows, numCols, deviceKernal); ////检查gpu中的kernel是否正确 //kernel_check << <1, 1 >> > (deviceKernal); //8*8 64个分块线程 计算中值滤波 litte_block << <gridSize, 1 >> > (deviceOrigin, deviceGoal,numRows,numCols); //记录结束时间 cudaEventRecord(time_end, 0); cudaEventSynchronize(time_start); cudaEventSynchronize(time_end); //计算总共消耗的时间 cudaEventElapsedTime(&time_consume, time_start, time_end); //输出数据 cudaMemcpy(goal, deviceGoal, sizeof(float) * totalPixels, cudaMemcpyDeviceToHost); //输出执行的时间 printf("执行时间:%f(ms)\n", time_consume); //free 事件Event cudaEventDestroy(time_start); cudaEventDestroy(time_end); //free内存 cudaFree(deviceOrigin); cudaFree(deviceGoal); cudaFree(deviceKernal); } void test(int n) { printf("n=%d", n); } int main(void) { //读入图片 Mat Img = imread("1.jpg"); cv::resize(Img, Img, cv::Size(512, 512)); //图片大小 int height = Img.rows; int width = Img.cols; printf("height=%d\nwidth=%d\n", height,width); //转灰度图 cv::cvtColor(Img, Img, cv::COLOR_BGR2GRAY); //分配原始矩阵和目标矩阵的大小 float* origin = (float*)malloc(sizeof(float) * height * width); float* target = (float*)malloc(sizeof(float) * height * width); int index = 0; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { origin[index] = Img.at<uchar>(i, j); target[index] = index; index++; } } clock_t func_time_start; clock_t func_time_end; //float *origin = (float*)malloc(sizeof(float) * 10000); //float *target = (float*)malloc(sizeof(float) * 10000); //for (int i = 0; i < 10000; i++) //{ // srand((int)time(0)); // origin[i] = rand()%10; // target[i] = 1; //} //printf("target_start = %f\n", target[1515]); //printf("origin_start = %f\n", origin[1515]); float kernal[9] = { 11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0 }; func_time_start = clock(); conv2(origin, target, height, width, kernal); func_time_end = clock(); double func_time_consume = (double)(func_time_end - func_time_start) / CLOCKS_PER_SEC; printf("%f seconds\n", func_time_consume); //for (int abc = 523; abc < 553; abc++) //{ // printf(" %f ", origin[abc]); // printf(" %f\n", target[abc]); //} return 0; }
dea7ea0dd3635dd979120cbfd53874f0db14e357.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/IVFInterleaved.cuh> #include <faiss/gpu/impl/scan/IVFInterleavedImpl.cuh> namespace faiss { namespace gpu { constexpr uint32_t kMaxUInt32 = std::numeric_limits<uint32_t>::max(); // Second-pass kernel to further k-select the results from the first pass across // IVF lists and produce the final results template <int ThreadsPerBlock, int NumWarpQ, int NumThreadQ> __global__ void ivfInterleavedScan2( Tensor<float, 3, true> distanceIn, Tensor<int, 3, true> indicesIn, Tensor<int, 2, true> listIds, int k, void** listIndices, IndicesOptions opt, bool dir, Tensor<float, 2, true> distanceOut, Tensor<Index::idx_t, 2, true> indicesOut) { int queryId = blockIdx.x; constexpr int kNumWarps = ThreadsPerBlock / kWarpSize; __shared__ float smemK[kNumWarps * NumWarpQ]; __shared__ uint32_t smemV[kNumWarps * NumWarpQ]; // To avoid creating excessive specializations, we combine direction // kernels, selecting for the smallest element. If `dir` is true, we negate // all values being selected (so that we are selecting the largest element). BlockSelect< float, uint32_t, false, Comparator<float>, NumWarpQ, NumThreadQ, ThreadsPerBlock> heap(kFloatMax, kMaxUInt32, smemK, smemV, k); // nprobe x k int num = distanceIn.getSize(1) * distanceIn.getSize(2); auto distanceBase = distanceIn[queryId].data(); int limit = utils::roundDown(num, kWarpSize); // This will keep our negation factor float adj = dir ? -1 : 1; int i = threadIdx.x; for (; i < limit; i += blockDim.x) { // We represent the index as (probe id)(k) // Right now, both are limited to a maximum of 2048, but we will // dedicate each to the high and low words of a uint32_t static_assert(GPU_MAX_SELECTION_K <= 65536, ""); uint32_t curProbe = i / k; uint32_t curK = i % k; uint32_t index = (curProbe << 16) | (curK & (uint32_t)0xffff); int listId = listIds[queryId][curProbe]; if (listId != -1) { // Adjust the value we are selecting based on the sorting order heap.addThreadQ(distanceBase[i] * adj, index); } heap.checkThreadQ(); } // Handle warp divergence separately if (i < num) { uint32_t curProbe = i / k; uint32_t curK = i % k; uint32_t index = (curProbe << 16) | (curK & (uint32_t)0xffff); int listId = listIds[queryId][curProbe]; if (listId != -1) { heap.addThreadQ(distanceBase[i] * adj, index); } } // Merge all final results heap.reduce(); for (int i = threadIdx.x; i < k; i += blockDim.x) { // Re-adjust the value we are selecting based on the sorting order distanceOut[queryId][i] = smemK[i] * adj; auto packedIndex = smemV[i]; // We need to remap to the user-provided indices Index::idx_t index = -1; // We may not have at least k values to return; in this function, max // uint32 is our sentinel value if (packedIndex != kMaxUInt32) { uint32_t curProbe = packedIndex >> 16; uint32_t curK = packedIndex & 0xffff; int listId = listIds[queryId][curProbe]; int listOffset = indicesIn[queryId][curProbe][curK]; if (opt == INDICES_32_BIT) { index = (Index::idx_t)((int*)listIndices[listId])[listOffset]; } else if (opt == INDICES_64_BIT) { index = ((Index::idx_t*)listIndices[listId])[listOffset]; } else { index = ((Index::idx_t)listId << 32 | (Index::idx_t)listOffset); } } indicesOut[queryId][i] = index; } } void runIVFInterleavedScan2( Tensor<float, 3, true>& distanceIn, Tensor<int, 3, true>& indicesIn, Tensor<int, 2, true>& listIds, int k, thrust::device_vector<void*>& listIndices, IndicesOptions indicesOptions, bool dir, Tensor<float, 2, true>& distanceOut, Tensor<Index::idx_t, 2, true>& indicesOut, hipStream_t stream) { #define IVF_SCAN_2(THREADS, NUM_WARP_Q, NUM_THREAD_Q) \ hipLaunchKernelGGL(( ivfInterleavedScan2<THREADS, NUM_WARP_Q, NUM_THREAD_Q>) \ , dim3(distanceIn.getSize(0)), dim3(THREADS), 0, stream, \ distanceIn, \ indicesIn, \ listIds, \ k, \ listIndices.data().get(), \ indicesOptions, \ dir, \ distanceOut, \ indicesOut) if (k == 1) { IVF_SCAN_2(128, 1, 1); } else if (k <= 32) { IVF_SCAN_2(128, 32, 2); } else if (k <= 64) { IVF_SCAN_2(128, 64, 3); } else if (k <= 128) { IVF_SCAN_2(128, 128, 3); } else if (k <= 256) { IVF_SCAN_2(128, 256, 4); } else if (k <= 512) { IVF_SCAN_2(128, 512, 8); } else if (k <= 1024) { IVF_SCAN_2(128, 1024, 8); } #if GPU_MAX_SELECTION_K >= 2048 else if (k <= 2048) { IVF_SCAN_2(64, 2048, 8); } #endif } void runIVFInterleavedScan( Tensor<float, 2, true>& queries, Tensor<int, 2, true>& listIds, thrust::device_vector<void*>& listData, thrust::device_vector<void*>& listIndices, IndicesOptions indicesOptions, thrust::device_vector<int>& listLengths, int k, faiss::MetricType metric, bool useResidual, Tensor<float, 3, true>& residualBase, GpuScalarQuantizer* scalarQ, // output Tensor<float, 2, true>& outDistances, // output Tensor<Index::idx_t, 2, true>& outIndices, GpuResources* res) { // caught for exceptions at a higher level FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); if (k == 1) { IVF_INTERLEAVED_CALL(1); } else if (k <= 32) { IVF_INTERLEAVED_CALL(32); } else if (k <= 64) { IVF_INTERLEAVED_CALL(64); } else if (k <= 128) { IVF_INTERLEAVED_CALL(128); } else if (k <= 256) { IVF_INTERLEAVED_CALL(256); } else if (k <= 512) { IVF_INTERLEAVED_CALL(512); } else if (k <= 1024) { IVF_INTERLEAVED_CALL(1024); } #if GPU_MAX_SELECTION_K >= 2048 else if (k <= 2048) { IVF_INTERLEAVED_CALL(2048); } #endif } } // namespace gpu } // namespace faiss
dea7ea0dd3635dd979120cbfd53874f0db14e357.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/IVFInterleaved.cuh> #include <faiss/gpu/impl/scan/IVFInterleavedImpl.cuh> namespace faiss { namespace gpu { constexpr uint32_t kMaxUInt32 = std::numeric_limits<uint32_t>::max(); // Second-pass kernel to further k-select the results from the first pass across // IVF lists and produce the final results template <int ThreadsPerBlock, int NumWarpQ, int NumThreadQ> __global__ void ivfInterleavedScan2( Tensor<float, 3, true> distanceIn, Tensor<int, 3, true> indicesIn, Tensor<int, 2, true> listIds, int k, void** listIndices, IndicesOptions opt, bool dir, Tensor<float, 2, true> distanceOut, Tensor<Index::idx_t, 2, true> indicesOut) { int queryId = blockIdx.x; constexpr int kNumWarps = ThreadsPerBlock / kWarpSize; __shared__ float smemK[kNumWarps * NumWarpQ]; __shared__ uint32_t smemV[kNumWarps * NumWarpQ]; // To avoid creating excessive specializations, we combine direction // kernels, selecting for the smallest element. If `dir` is true, we negate // all values being selected (so that we are selecting the largest element). BlockSelect< float, uint32_t, false, Comparator<float>, NumWarpQ, NumThreadQ, ThreadsPerBlock> heap(kFloatMax, kMaxUInt32, smemK, smemV, k); // nprobe x k int num = distanceIn.getSize(1) * distanceIn.getSize(2); auto distanceBase = distanceIn[queryId].data(); int limit = utils::roundDown(num, kWarpSize); // This will keep our negation factor float adj = dir ? -1 : 1; int i = threadIdx.x; for (; i < limit; i += blockDim.x) { // We represent the index as (probe id)(k) // Right now, both are limited to a maximum of 2048, but we will // dedicate each to the high and low words of a uint32_t static_assert(GPU_MAX_SELECTION_K <= 65536, ""); uint32_t curProbe = i / k; uint32_t curK = i % k; uint32_t index = (curProbe << 16) | (curK & (uint32_t)0xffff); int listId = listIds[queryId][curProbe]; if (listId != -1) { // Adjust the value we are selecting based on the sorting order heap.addThreadQ(distanceBase[i] * adj, index); } heap.checkThreadQ(); } // Handle warp divergence separately if (i < num) { uint32_t curProbe = i / k; uint32_t curK = i % k; uint32_t index = (curProbe << 16) | (curK & (uint32_t)0xffff); int listId = listIds[queryId][curProbe]; if (listId != -1) { heap.addThreadQ(distanceBase[i] * adj, index); } } // Merge all final results heap.reduce(); for (int i = threadIdx.x; i < k; i += blockDim.x) { // Re-adjust the value we are selecting based on the sorting order distanceOut[queryId][i] = smemK[i] * adj; auto packedIndex = smemV[i]; // We need to remap to the user-provided indices Index::idx_t index = -1; // We may not have at least k values to return; in this function, max // uint32 is our sentinel value if (packedIndex != kMaxUInt32) { uint32_t curProbe = packedIndex >> 16; uint32_t curK = packedIndex & 0xffff; int listId = listIds[queryId][curProbe]; int listOffset = indicesIn[queryId][curProbe][curK]; if (opt == INDICES_32_BIT) { index = (Index::idx_t)((int*)listIndices[listId])[listOffset]; } else if (opt == INDICES_64_BIT) { index = ((Index::idx_t*)listIndices[listId])[listOffset]; } else { index = ((Index::idx_t)listId << 32 | (Index::idx_t)listOffset); } } indicesOut[queryId][i] = index; } } void runIVFInterleavedScan2( Tensor<float, 3, true>& distanceIn, Tensor<int, 3, true>& indicesIn, Tensor<int, 2, true>& listIds, int k, thrust::device_vector<void*>& listIndices, IndicesOptions indicesOptions, bool dir, Tensor<float, 2, true>& distanceOut, Tensor<Index::idx_t, 2, true>& indicesOut, cudaStream_t stream) { #define IVF_SCAN_2(THREADS, NUM_WARP_Q, NUM_THREAD_Q) \ ivfInterleavedScan2<THREADS, NUM_WARP_Q, NUM_THREAD_Q> \ <<<distanceIn.getSize(0), THREADS, 0, stream>>>( \ distanceIn, \ indicesIn, \ listIds, \ k, \ listIndices.data().get(), \ indicesOptions, \ dir, \ distanceOut, \ indicesOut) if (k == 1) { IVF_SCAN_2(128, 1, 1); } else if (k <= 32) { IVF_SCAN_2(128, 32, 2); } else if (k <= 64) { IVF_SCAN_2(128, 64, 3); } else if (k <= 128) { IVF_SCAN_2(128, 128, 3); } else if (k <= 256) { IVF_SCAN_2(128, 256, 4); } else if (k <= 512) { IVF_SCAN_2(128, 512, 8); } else if (k <= 1024) { IVF_SCAN_2(128, 1024, 8); } #if GPU_MAX_SELECTION_K >= 2048 else if (k <= 2048) { IVF_SCAN_2(64, 2048, 8); } #endif } void runIVFInterleavedScan( Tensor<float, 2, true>& queries, Tensor<int, 2, true>& listIds, thrust::device_vector<void*>& listData, thrust::device_vector<void*>& listIndices, IndicesOptions indicesOptions, thrust::device_vector<int>& listLengths, int k, faiss::MetricType metric, bool useResidual, Tensor<float, 3, true>& residualBase, GpuScalarQuantizer* scalarQ, // output Tensor<float, 2, true>& outDistances, // output Tensor<Index::idx_t, 2, true>& outIndices, GpuResources* res) { // caught for exceptions at a higher level FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); if (k == 1) { IVF_INTERLEAVED_CALL(1); } else if (k <= 32) { IVF_INTERLEAVED_CALL(32); } else if (k <= 64) { IVF_INTERLEAVED_CALL(64); } else if (k <= 128) { IVF_INTERLEAVED_CALL(128); } else if (k <= 256) { IVF_INTERLEAVED_CALL(256); } else if (k <= 512) { IVF_INTERLEAVED_CALL(512); } else if (k <= 1024) { IVF_INTERLEAVED_CALL(1024); } #if GPU_MAX_SELECTION_K >= 2048 else if (k <= 2048) { IVF_INTERLEAVED_CALL(2048); } #endif } } // namespace gpu } // namespace faiss
475941ed498756a1e88122c80d0db8ad8ac1daa2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/cpm/layers/nms_layer.hpp" #include <iostream> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include "caffe/cpm/util/math_functions.hpp" // caffe::updiv #define NUMBER_THREADS_PER_BLOCK_1D 16 #define NUMBER_THREADS_PER_BLOCK 256 namespace caffe { template <typename Dtype> __global__ void nms_register_kernel(const Dtype* const src_pointer, int* workspace, const int w, const int h, const Dtype threshold) { // get pixel location (x,y) const int x = (blockIdx.x * blockDim.x) + threadIdx.x; const int y = (blockIdx.y * blockDim.y) + threadIdx.y; if( x>0 && x<(w-1) && y>0 && y<(h-1) ){ const Dtype value = src_pointer[y*w + x]; if(value > threshold){ const Dtype top = src_pointer[(y-1)*w + x]; const Dtype bottom = src_pointer[(y+1)*w + x]; const Dtype left = src_pointer[y*w + (x-1)]; const Dtype right = src_pointer[y*w + (x+1)]; const Dtype top_left = src_pointer[(y-1)*w + x-1]; const Dtype top_right = src_pointer[(y-1)*w + x+1]; const Dtype bottom_left = src_pointer[(y+1)*w + x-1]; const Dtype bottom_right = src_pointer[(y+1)*w + x+1]; if(value > top && value > bottom && value > left && value > right && value > top_left && value > bottom_left && value > bottom_right && value > top_right ){ workspace[y*w + x] = 1; } else { workspace[y*w + x] = 0; } } else { workspace[y*w + x] = 0; } } else if( x==0 || x==(w-1) || y==0 || y==(h-1) ){ workspace[y*w + x] = 0; } } template <typename Dtype> __global__ void writeResultKernel(const int length, const int* const input, const Dtype* const src_pointer, Dtype* output, const int width, const int max_peaks){ __shared__ int local[NUMBER_THREADS_PER_BLOCK+1]; // one more const int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; if(globalIdx < length){ local[threadIdx.x] = input[globalIdx]; if(threadIdx.x == NUMBER_THREADS_PER_BLOCK - 1 && globalIdx != length - 1){ //last thread in the block but not globally last, load one more local[threadIdx.x+1] = input[globalIdx+1]; } __syncthreads(); // see difference, except the globally last one if(globalIdx != length - 1){ if(local[threadIdx.x] != local[threadIdx.x + 1]) { //means A[globalIdx] == A[globalIdx + 1] as the input[globalIdx]-th repeat const int peak_index = input[globalIdx]; //0-index const int peak_loc = globalIdx; const int peak_loc_x = peak_loc % width; const int peak_loc_y = peak_loc / width; if(peak_index < max_peaks){ //limitation //output[input[globalIdx]] = globalIdx; // if (1) { float x_acc = 0.f; float y_acc = 0.f; float score_acc = 0.f; // int count = 0; for (int dy=-3;dy<4;dy++) { if ((peak_loc_y+dy)>0 && (peak_loc_y+dy)<width) { for (int dx=-3;dx<4;dx++) { if ((peak_loc_x+dx)>0 && (peak_loc_x+dx)<width) { const float score = src_pointer[(peak_loc_y+dy)*width + peak_loc_x+dx]; const float x = peak_loc_x+dx; const float y = peak_loc_y+dy; if (score>0) { x_acc += x*score; y_acc += y*score; score_acc += score; // count += 1; } } } } } const int output_index = (peak_index + 1) * 3; output[output_index] = x_acc/score_acc; output[output_index + 1] = y_acc/score_acc; output[output_index + 2] = src_pointer[peak_loc_y*width + peak_loc_x]; // } else { // const int output_index = (peak_index + 1) * 3; // output[output_index] = peak_loc_x; // output[output_index + 1] = peak_loc_y; // output[output_index + 2] = src_pointer[peak_loc_y*width + peak_loc_x]; // } } } } else { output[0] = input[globalIdx]; //number of peaks } } } template <typename Dtype> void NmsLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){ //Forward_cpu(bottom, top); const int num = bottom[0]->shape(0); //int channel = bottom[0]->shape(1); const int height = bottom[0]->shape(2); const int width = bottom[0]->shape(3); const int offset = height * width; const int offset_dst = (max_peaks_+1)*3; const dim3 threadsPerBlock(NUMBER_THREADS_PER_BLOCK_1D, NUMBER_THREADS_PER_BLOCK_1D); const dim3 numBlocks(updiv(width, threadsPerBlock.x), updiv(height, threadsPerBlock.y)); // const int count = bottom[0]->count(); // std::cout << count << "\t\t" // << CAFFE_GET_BLOCKS(count) << " " << CAFFE_CUDA_NUM_THREADS << "\t\t" // << updiv(offset,NUMBER_THREADS_PER_BLOCK) << " " << NUMBER_THREADS_PER_BLOCK << "\t\t" // << numBlocks.x << " " << threadsPerBlock.x << std::endl; // std::cout << "num_t: " << top[0]->shape(0) << "\t"; // = 1 // std::cout << "channel_t: " << top[0]->shape(1) << "\t"; // = 18 // std::cout << "height_t: " << top[0]->shape(2) << "\t"; // = 3 // std::cout << "width_t: " << top[0]->shape(3) << "\n"; // = 65 // std::cout << "num_b: " << bottom[0]->shape(0) << "\t"; // = 1 // std::cout << "channel_b: " << bottom[0]->shape(1) << "\t"; // = 57 // std::cout << "height_b: " << bottom[0]->shape(2) << "\t"; // = 368 // std::cout << "width_b: " << bottom[0]->shape(3) << std::endl;// = 656 for(int n = 0; n < num; n++){ // batch for(int c = 0; c < num_parts_; c++){ //std::cout << "channel: " << c << std::endl; int* w_pointer1 = workspace.mutable_gpu_data() + n * num_parts_ * offset + c * offset; const Dtype* const src = bottom[0]->gpu_data() + n * num_parts_ * offset + c * offset; Dtype* dst = top[0]->mutable_gpu_data() + n * num_parts_ * offset_dst + c * offset_dst; // old model // if(c==14){ // Dtype* src = bottom[0]->mutable_gpu_data() + n * parts_num * offset + 28 * offset; // } // This returns w_pointer1, a binary array with 0s & 1s. 1s in the local maximum positions (size = size(src)) hipLaunchKernelGGL(( nms_register_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, src, w_pointer1, width, height, threshold_);//[0,0,0,0,1,0,0,0,0,1,0,0,0,0] //LOG(ERROR) << "register done";; //debug // if(c==3){ // char filename[50]; // sprintf(filename, "work%02d.txt", c); // std::ofstream fout(filename); // int* w_pointer1_local = workspace.mutable_cpu_data() + n * parts_num * offset + c * offset; // for(int y = 0; y < height; y++){ // for(int x = 0; x < width; x++){ // fout << w_pointer1_local[y*width + x] << "\t"; // } // fout<< std::endl; // } // fout.close(); // } thrust::device_ptr<int> dev_ptr = thrust::device_pointer_cast(w_pointer1); //LOG(ERROR) << "pointer done" // This modifies w_pointer1, now it indicates the local maximum indexes. Format: 0,0,0,1,1,1,1,2,2,2,... First maximum: 2, second: 6, etc... thrust::exclusive_scan(dev_ptr, dev_ptr + offset, dev_ptr); //[0,0,0,0,0,1,1,1,1,1,2,2,2,2] //LOG(ERROR) << "thrust done"; // This returns dst, with the NMS applied over it hipLaunchKernelGGL(( writeResultKernel), dim3(updiv(offset,NUMBER_THREADS_PER_BLOCK)), dim3(NUMBER_THREADS_PER_BLOCK), 0, 0, offset, w_pointer1, src, dst, width, max_peaks_); //LOG(ERROR) << "write done"; } } //w_pointer } template <typename Dtype> void NmsLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){ NOT_IMPLEMENTED; } INSTANTIATE_LAYER_GPU_FUNCS(NmsLayer); } // namespace caffe
475941ed498756a1e88122c80d0db8ad8ac1daa2.cu
#include "caffe/cpm/layers/nms_layer.hpp" #include <iostream> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include "caffe/cpm/util/math_functions.hpp" // caffe::updiv #define NUMBER_THREADS_PER_BLOCK_1D 16 #define NUMBER_THREADS_PER_BLOCK 256 namespace caffe { template <typename Dtype> __global__ void nms_register_kernel(const Dtype* const src_pointer, int* workspace, const int w, const int h, const Dtype threshold) { // get pixel location (x,y) const int x = (blockIdx.x * blockDim.x) + threadIdx.x; const int y = (blockIdx.y * blockDim.y) + threadIdx.y; if( x>0 && x<(w-1) && y>0 && y<(h-1) ){ const Dtype value = src_pointer[y*w + x]; if(value > threshold){ const Dtype top = src_pointer[(y-1)*w + x]; const Dtype bottom = src_pointer[(y+1)*w + x]; const Dtype left = src_pointer[y*w + (x-1)]; const Dtype right = src_pointer[y*w + (x+1)]; const Dtype top_left = src_pointer[(y-1)*w + x-1]; const Dtype top_right = src_pointer[(y-1)*w + x+1]; const Dtype bottom_left = src_pointer[(y+1)*w + x-1]; const Dtype bottom_right = src_pointer[(y+1)*w + x+1]; if(value > top && value > bottom && value > left && value > right && value > top_left && value > bottom_left && value > bottom_right && value > top_right ){ workspace[y*w + x] = 1; } else { workspace[y*w + x] = 0; } } else { workspace[y*w + x] = 0; } } else if( x==0 || x==(w-1) || y==0 || y==(h-1) ){ workspace[y*w + x] = 0; } } template <typename Dtype> __global__ void writeResultKernel(const int length, const int* const input, const Dtype* const src_pointer, Dtype* output, const int width, const int max_peaks){ __shared__ int local[NUMBER_THREADS_PER_BLOCK+1]; // one more const int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; if(globalIdx < length){ local[threadIdx.x] = input[globalIdx]; if(threadIdx.x == NUMBER_THREADS_PER_BLOCK - 1 && globalIdx != length - 1){ //last thread in the block but not globally last, load one more local[threadIdx.x+1] = input[globalIdx+1]; } __syncthreads(); // see difference, except the globally last one if(globalIdx != length - 1){ if(local[threadIdx.x] != local[threadIdx.x + 1]) { //means A[globalIdx] == A[globalIdx + 1] as the input[globalIdx]-th repeat const int peak_index = input[globalIdx]; //0-index const int peak_loc = globalIdx; const int peak_loc_x = peak_loc % width; const int peak_loc_y = peak_loc / width; if(peak_index < max_peaks){ //limitation //output[input[globalIdx]] = globalIdx; // if (1) { float x_acc = 0.f; float y_acc = 0.f; float score_acc = 0.f; // int count = 0; for (int dy=-3;dy<4;dy++) { if ((peak_loc_y+dy)>0 && (peak_loc_y+dy)<width) { for (int dx=-3;dx<4;dx++) { if ((peak_loc_x+dx)>0 && (peak_loc_x+dx)<width) { const float score = src_pointer[(peak_loc_y+dy)*width + peak_loc_x+dx]; const float x = peak_loc_x+dx; const float y = peak_loc_y+dy; if (score>0) { x_acc += x*score; y_acc += y*score; score_acc += score; // count += 1; } } } } } const int output_index = (peak_index + 1) * 3; output[output_index] = x_acc/score_acc; output[output_index + 1] = y_acc/score_acc; output[output_index + 2] = src_pointer[peak_loc_y*width + peak_loc_x]; // } else { // const int output_index = (peak_index + 1) * 3; // output[output_index] = peak_loc_x; // output[output_index + 1] = peak_loc_y; // output[output_index + 2] = src_pointer[peak_loc_y*width + peak_loc_x]; // } } } } else { output[0] = input[globalIdx]; //number of peaks } } } template <typename Dtype> void NmsLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){ //Forward_cpu(bottom, top); const int num = bottom[0]->shape(0); //int channel = bottom[0]->shape(1); const int height = bottom[0]->shape(2); const int width = bottom[0]->shape(3); const int offset = height * width; const int offset_dst = (max_peaks_+1)*3; const dim3 threadsPerBlock(NUMBER_THREADS_PER_BLOCK_1D, NUMBER_THREADS_PER_BLOCK_1D); const dim3 numBlocks(updiv(width, threadsPerBlock.x), updiv(height, threadsPerBlock.y)); // const int count = bottom[0]->count(); // std::cout << count << "\t\t" // << CAFFE_GET_BLOCKS(count) << " " << CAFFE_CUDA_NUM_THREADS << "\t\t" // << updiv(offset,NUMBER_THREADS_PER_BLOCK) << " " << NUMBER_THREADS_PER_BLOCK << "\t\t" // << numBlocks.x << " " << threadsPerBlock.x << std::endl; // std::cout << "num_t: " << top[0]->shape(0) << "\t"; // = 1 // std::cout << "channel_t: " << top[0]->shape(1) << "\t"; // = 18 // std::cout << "height_t: " << top[0]->shape(2) << "\t"; // = 3 // std::cout << "width_t: " << top[0]->shape(3) << "\n"; // = 65 // std::cout << "num_b: " << bottom[0]->shape(0) << "\t"; // = 1 // std::cout << "channel_b: " << bottom[0]->shape(1) << "\t"; // = 57 // std::cout << "height_b: " << bottom[0]->shape(2) << "\t"; // = 368 // std::cout << "width_b: " << bottom[0]->shape(3) << std::endl;// = 656 for(int n = 0; n < num; n++){ // batch for(int c = 0; c < num_parts_; c++){ //std::cout << "channel: " << c << std::endl; int* w_pointer1 = workspace.mutable_gpu_data() + n * num_parts_ * offset + c * offset; const Dtype* const src = bottom[0]->gpu_data() + n * num_parts_ * offset + c * offset; Dtype* dst = top[0]->mutable_gpu_data() + n * num_parts_ * offset_dst + c * offset_dst; // old model // if(c==14){ // Dtype* src = bottom[0]->mutable_gpu_data() + n * parts_num * offset + 28 * offset; // } // This returns w_pointer1, a binary array with 0s & 1s. 1s in the local maximum positions (size = size(src)) nms_register_kernel<<<numBlocks, threadsPerBlock>>>(src, w_pointer1, width, height, threshold_);//[0,0,0,0,1,0,0,0,0,1,0,0,0,0] //LOG(ERROR) << "register done";; //debug // if(c==3){ // char filename[50]; // sprintf(filename, "work%02d.txt", c); // std::ofstream fout(filename); // int* w_pointer1_local = workspace.mutable_cpu_data() + n * parts_num * offset + c * offset; // for(int y = 0; y < height; y++){ // for(int x = 0; x < width; x++){ // fout << w_pointer1_local[y*width + x] << "\t"; // } // fout<< std::endl; // } // fout.close(); // } thrust::device_ptr<int> dev_ptr = thrust::device_pointer_cast(w_pointer1); //LOG(ERROR) << "pointer done" // This modifies w_pointer1, now it indicates the local maximum indexes. Format: 0,0,0,1,1,1,1,2,2,2,... First maximum: 2, second: 6, etc... thrust::exclusive_scan(dev_ptr, dev_ptr + offset, dev_ptr); //[0,0,0,0,0,1,1,1,1,1,2,2,2,2] //LOG(ERROR) << "thrust done"; // This returns dst, with the NMS applied over it writeResultKernel<<<updiv(offset,NUMBER_THREADS_PER_BLOCK), NUMBER_THREADS_PER_BLOCK>>>(offset, w_pointer1, src, dst, width, max_peaks_); //LOG(ERROR) << "write done"; } } //w_pointer } template <typename Dtype> void NmsLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){ NOT_IMPLEMENTED; } INSTANTIATE_LAYER_GPU_FUNCS(NmsLayer); } // namespace caffe
3cc044671fdccad9ca4129fcede2b9e0b72f90f7.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include <math.h> #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime_api.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <torch/extension.h> #include "strided_batched_gemm.h" #include "softmax.h" #include "dropout.h" #include "layer_norm.h" // symbol to be automatically resolved by PyTorch libs extern THCState *state; namespace multihead_attn { namespace self_bias { namespace cublas_gemmex { std::vector<torch::Tensor> fwd_cuda( bool use_time_mask, bool is_training, int heads, torch::Tensor const& inputs, torch::Tensor const& input_weights, torch::Tensor const& output_weights, torch::Tensor const& input_biases, torch::Tensor const& output_biases, const uint8_t* pad_mask, float dropout_prob ) { const int embed_dim = inputs.size(2); const int sequences = inputs.size(1); const int q_seq_len = inputs.size(0); const int k_seq_len = q_seq_len; const int batches = sequences * q_seq_len; const int head_dim = embed_dim / heads; const int output_lin_dim = 3 * embed_dim; const int attn_batches = heads * sequences; const int lead_dim = attn_batches * 3 * head_dim; const int batch_stride = 3 * head_dim; const int dropout_elems = attn_batches * q_seq_len * k_seq_len; const float alpha = 1.0; const float beta_zero = 0.0; const float beta_one = 1.0; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); // There is no reason to use more than one stream as every kernel is // sequentially dependent hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); hipblasSetStream(handle, stream); // 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code) auto act_options = inputs.options().requires_grad(false); auto mask_options = act_options.dtype(torch::kUInt8); torch::Tensor input_lin_results = torch::empty({q_seq_len, sequences, output_lin_dim}, act_options); torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options); torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options); torch::Tensor outputs = torch::empty_like(inputs, act_options); // Input Linear Results Pointers to Q, K, and V of interviewed activations void* q_lin_results_ptr = static_cast<void*>(input_lin_results.data_ptr()); void* k_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_results.data_ptr()) + head_dim); void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_results.data_ptr()) + 2*head_dim); // Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax) void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr()); char a_layout_t{'t'}; char a_layout_n{'n'}; char b_layout_n{'n'}; TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); // Input Linear Fwd input_lin_results.copy_(input_biases); TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, output_lin_dim, batches, embed_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(inputs.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(&beta_one), q_lin_results_ptr, HIP_R_16F, output_lin_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size) gemm_switch_fp32accum( state, a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, scale, static_cast<const half*>(k_lin_results_ptr), lead_dim, batch_stride, static_cast<const half*>(q_lin_results_ptr), lead_dim, batch_stride, beta_zero, static_cast<half*>(softmax_results_ptr), k_seq_len, k_seq_len*q_seq_len, attn_batches); // Padded Softmax bool softmax_success = false; if (pad_mask == nullptr) { softmax_success = dispatch_softmax<half, half, float>( reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<const half*>(softmax_results_ptr), k_seq_len, k_seq_len, attn_batches*q_seq_len); } else { if (use_time_mask) { softmax_success = dispatch_time_masked_softmax<half, half, float>( reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<const half*>(softmax_results_ptr), pad_mask, k_seq_len, k_seq_len, attn_batches*q_seq_len, q_seq_len); } else { softmax_success = dispatch_masked_softmax<half, half, float>( reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<const half*>(softmax_results_ptr), pad_mask, k_seq_len, k_seq_len, attn_batches*q_seq_len, attn_batches*q_seq_len/sequences); } } if (is_training) { //use at:: function so that C++ version generates the same random mask as python version auto dropout_tuple = at::_fused_dropout(softmax_results, 1.0f-dropout_prob); dropout_results = std::get<0>(dropout_tuple); dropout_mask = std::get<1>(dropout_tuple); } // Matmul2 gemm_switch_fp32accum( state, a_layout_n, b_layout_n, head_dim, q_seq_len, k_seq_len, alpha, static_cast<const half*>(v_lin_results_ptr), lead_dim, batch_stride, (is_training) ? static_cast<const half*>(dropout_results.data_ptr()) : static_cast<const half*>(softmax_results.data_ptr()) , k_seq_len, k_seq_len*q_seq_len, beta_zero, static_cast<half*>(matmul2_results.data_ptr()), head_dim*attn_batches, head_dim, attn_batches); outputs.copy_(output_biases); // Output Linear TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, embed_dim, batches, embed_dim, static_cast<const void*>(&alpha), static_cast<const void*>(output_weights.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(matmul2_results.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(&beta_one), static_cast<void*>(outputs.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, //CUBLAS_GEMM_ALGO1_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); return { input_lin_results, softmax_results, dropout_results, dropout_mask, matmul2_results, outputs }; } std::vector<torch::Tensor> bwd_cuda( int heads, torch::Tensor const& output_grads, torch::Tensor const& matmul2_results, torch::Tensor const& dropout_results, torch::Tensor const& softmax_results, torch::Tensor const& input_lin_results, torch::Tensor const& inputs, torch::Tensor const& input_weights, torch::Tensor const& output_weights, torch::Tensor const& dropout_mask, float dropout_prob ) { const int embed_dim = inputs.size(2); const int sequences = inputs.size(1); const int q_seq_len = inputs.size(0); const int k_seq_len = q_seq_len; const int batches = sequences * q_seq_len; const int head_dim = embed_dim / heads; const int output_lin_dim = 3 * embed_dim; const int attn_batches = heads * sequences; const int lead_dim = attn_batches * 3 * head_dim; const int batch_stride = 3 * head_dim; const int dropout_elems = attn_batches * q_seq_len * k_seq_len; const float alpha = 1.0; const float beta = 0.0; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); // TODO: Streams can be used in Backprop but I haven't added more than one // in my first attempt to create the code hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); hipblasSetStream(handle, stream); // Output Tensor Allocations torch::Tensor input_grads = torch::empty_like(inputs); torch::Tensor input_weight_grads = torch::empty_like(input_weights); torch::Tensor output_weight_grads = torch::empty_like(output_weights); // Intermediate Tensor Allocations at::Tensor output_lin_grads = torch::empty_like(matmul2_results); at::Tensor matmul2_grads = torch::empty_like(dropout_results); at::Tensor input_lin_output_grads = torch::empty_like(input_lin_results); auto q_lin_results_ptr = static_cast<half*>(input_lin_results.data_ptr()); auto k_lin_results_ptr = static_cast<half*>(input_lin_results.data_ptr()) + head_dim; auto v_lin_results_ptr = static_cast<half*>(input_lin_results.data_ptr()) + 2*head_dim; auto q_lin_grads_ptr = static_cast<half*>(input_lin_output_grads.data_ptr()); auto k_lin_grads_ptr = static_cast<half*>(input_lin_output_grads.data_ptr()) + head_dim; auto v_lin_grads_ptr = static_cast<half*>(input_lin_output_grads.data_ptr()) + 2*head_dim; char a_layout_n{'n'}; char a_layout_t{'t'}; char b_layout_n{'n'}; char b_layout_t{'t'}; TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); // Output Linear Dgrad TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, embed_dim, batches, embed_dim, static_cast<const void*>(&alpha), static_cast<const void*>(output_weights.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(output_grads.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(&beta), static_cast<void*>(output_lin_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Output Linear Wgrad TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, embed_dim, embed_dim, batches, static_cast<const void*>(&alpha), static_cast<const void*>(matmul2_results.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(output_grads.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(&beta), static_cast<void*>(output_weight_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); auto output_bias_grads = output_grads.view({-1, embed_dim}) .sum(0, false); // MatMul2 Dgrad1 gemm_switch_fp32accum( state, a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, alpha, static_cast<const half*>(v_lin_results_ptr), lead_dim, batch_stride, static_cast<const half*>(output_lin_grads.data_ptr()), head_dim*attn_batches, head_dim, beta, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, attn_batches); // Matmul2 Dgrad2 gemm_switch_fp32accum( state, a_layout_n, b_layout_t, head_dim, k_seq_len, q_seq_len, alpha, static_cast<const half*>(output_lin_grads.data_ptr()), head_dim*attn_batches, head_dim, static_cast<const half*>(dropout_results.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, v_lin_grads_ptr, lead_dim, batch_stride, attn_batches); // Apply Dropout Mask and Scale by Dropout Probability // Softmax Grad dispatch_masked_scale_softmax_backward_stream<half, half, float,false>( static_cast<half*>(matmul2_grads.data_ptr()), static_cast<half*>(matmul2_grads.data_ptr()), reinterpret_cast<half const*>(softmax_results.data_ptr()), static_cast<uint8_t const*>(dropout_mask.data_ptr()), 1.0/(1.0-dropout_prob), k_seq_len, k_seq_len, attn_batches*q_seq_len, stream); // Matmul1 Dgrad1 gemm_switch_fp32accum( state, a_layout_n, b_layout_n, head_dim, q_seq_len, k_seq_len, scale, k_lin_results_ptr, lead_dim, batch_stride, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, q_lin_grads_ptr, lead_dim, batch_stride, attn_batches); // Matmul1 Dgrad2 gemm_switch_fp32accum( state, a_layout_n, b_layout_t, head_dim, k_seq_len, q_seq_len, scale, q_lin_results_ptr, lead_dim, batch_stride, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, k_lin_grads_ptr, lead_dim, batch_stride, attn_batches); // Input Linear Dgrad TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, embed_dim, batches, output_lin_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(input_lin_output_grads.data_ptr()), //static_cast<const void*>(q_lin_grads_ptr), HIP_R_16F, output_lin_dim, static_cast<const void*>(&beta), static_cast<void*>(input_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, //CUBLAS_GEMM_ALGO10_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear Wgrad TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, embed_dim, output_lin_dim, batches, static_cast<const void*>(&alpha), static_cast<const void*>(inputs.data_ptr()), HIP_R_16F, embed_dim, static_cast<const void*>(q_lin_grads_ptr), HIP_R_16F, output_lin_dim, static_cast<const void*>(&beta), static_cast<void*>(input_weight_grads.data_ptr()), HIP_R_16F, embed_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); auto input_bias_grads = input_lin_output_grads.view({-1, output_lin_dim}).sum(0, false); TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); return { input_grads, input_weight_grads, output_weight_grads, input_bias_grads, output_bias_grads }; } } // end namespace cublas_gemmex } // end namespace self } // end namespace multihead_attn
3cc044671fdccad9ca4129fcede2b9e0b72f90f7.cu
#include <vector> #include <math.h> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cuda_profiler_api.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <torch/extension.h> #include "strided_batched_gemm.h" #include "softmax.h" #include "dropout.h" #include "layer_norm.h" // symbol to be automatically resolved by PyTorch libs extern THCState *state; namespace multihead_attn { namespace self_bias { namespace cublas_gemmex { std::vector<torch::Tensor> fwd_cuda( bool use_time_mask, bool is_training, int heads, torch::Tensor const& inputs, torch::Tensor const& input_weights, torch::Tensor const& output_weights, torch::Tensor const& input_biases, torch::Tensor const& output_biases, const uint8_t* pad_mask, float dropout_prob ) { const int embed_dim = inputs.size(2); const int sequences = inputs.size(1); const int q_seq_len = inputs.size(0); const int k_seq_len = q_seq_len; const int batches = sequences * q_seq_len; const int head_dim = embed_dim / heads; const int output_lin_dim = 3 * embed_dim; const int attn_batches = heads * sequences; const int lead_dim = attn_batches * 3 * head_dim; const int batch_stride = 3 * head_dim; const int dropout_elems = attn_batches * q_seq_len * k_seq_len; const float alpha = 1.0; const float beta_zero = 0.0; const float beta_one = 1.0; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); // There is no reason to use more than one stream as every kernel is // sequentially dependent cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); // 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code) auto act_options = inputs.options().requires_grad(false); auto mask_options = act_options.dtype(torch::kUInt8); torch::Tensor input_lin_results = torch::empty({q_seq_len, sequences, output_lin_dim}, act_options); torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options); torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options); torch::Tensor outputs = torch::empty_like(inputs, act_options); // Input Linear Results Pointers to Q, K, and V of interviewed activations void* q_lin_results_ptr = static_cast<void*>(input_lin_results.data_ptr()); void* k_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_results.data_ptr()) + head_dim); void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_results.data_ptr()) + 2*head_dim); // Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax) void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr()); char a_layout_t{'t'}; char a_layout_n{'n'}; char b_layout_n{'n'}; TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); // Input Linear Fwd input_lin_results.copy_(input_biases); TORCH_CUDABLAS_CHECK(cublasGemmEx(handle, CUBLAS_OP_T, CUBLAS_OP_N, output_lin_dim, batches, embed_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(inputs.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(&beta_one), q_lin_results_ptr, CUDA_R_16F, output_lin_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size) gemm_switch_fp32accum( state, a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, scale, static_cast<const half*>(k_lin_results_ptr), lead_dim, batch_stride, static_cast<const half*>(q_lin_results_ptr), lead_dim, batch_stride, beta_zero, static_cast<half*>(softmax_results_ptr), k_seq_len, k_seq_len*q_seq_len, attn_batches); // Padded Softmax bool softmax_success = false; if (pad_mask == nullptr) { softmax_success = dispatch_softmax<half, half, float>( reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<const half*>(softmax_results_ptr), k_seq_len, k_seq_len, attn_batches*q_seq_len); } else { if (use_time_mask) { softmax_success = dispatch_time_masked_softmax<half, half, float>( reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<const half*>(softmax_results_ptr), pad_mask, k_seq_len, k_seq_len, attn_batches*q_seq_len, q_seq_len); } else { softmax_success = dispatch_masked_softmax<half, half, float>( reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<const half*>(softmax_results_ptr), pad_mask, k_seq_len, k_seq_len, attn_batches*q_seq_len, attn_batches*q_seq_len/sequences); } } if (is_training) { //use at:: function so that C++ version generates the same random mask as python version auto dropout_tuple = at::_fused_dropout(softmax_results, 1.0f-dropout_prob); dropout_results = std::get<0>(dropout_tuple); dropout_mask = std::get<1>(dropout_tuple); } // Matmul2 gemm_switch_fp32accum( state, a_layout_n, b_layout_n, head_dim, q_seq_len, k_seq_len, alpha, static_cast<const half*>(v_lin_results_ptr), lead_dim, batch_stride, (is_training) ? static_cast<const half*>(dropout_results.data_ptr()) : static_cast<const half*>(softmax_results.data_ptr()) , k_seq_len, k_seq_len*q_seq_len, beta_zero, static_cast<half*>(matmul2_results.data_ptr()), head_dim*attn_batches, head_dim, attn_batches); outputs.copy_(output_biases); // Output Linear TORCH_CUDABLAS_CHECK(cublasGemmEx(handle, CUBLAS_OP_T, CUBLAS_OP_N, embed_dim, batches, embed_dim, static_cast<const void*>(&alpha), static_cast<const void*>(output_weights.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(matmul2_results.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(&beta_one), static_cast<void*>(outputs.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, //CUBLAS_GEMM_ALGO1_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); return { input_lin_results, softmax_results, dropout_results, dropout_mask, matmul2_results, outputs }; } std::vector<torch::Tensor> bwd_cuda( int heads, torch::Tensor const& output_grads, torch::Tensor const& matmul2_results, torch::Tensor const& dropout_results, torch::Tensor const& softmax_results, torch::Tensor const& input_lin_results, torch::Tensor const& inputs, torch::Tensor const& input_weights, torch::Tensor const& output_weights, torch::Tensor const& dropout_mask, float dropout_prob ) { const int embed_dim = inputs.size(2); const int sequences = inputs.size(1); const int q_seq_len = inputs.size(0); const int k_seq_len = q_seq_len; const int batches = sequences * q_seq_len; const int head_dim = embed_dim / heads; const int output_lin_dim = 3 * embed_dim; const int attn_batches = heads * sequences; const int lead_dim = attn_batches * 3 * head_dim; const int batch_stride = 3 * head_dim; const int dropout_elems = attn_batches * q_seq_len * k_seq_len; const float alpha = 1.0; const float beta = 0.0; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); // TODO: Streams can be used in Backprop but I haven't added more than one // in my first attempt to create the code cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); // Output Tensor Allocations torch::Tensor input_grads = torch::empty_like(inputs); torch::Tensor input_weight_grads = torch::empty_like(input_weights); torch::Tensor output_weight_grads = torch::empty_like(output_weights); // Intermediate Tensor Allocations at::Tensor output_lin_grads = torch::empty_like(matmul2_results); at::Tensor matmul2_grads = torch::empty_like(dropout_results); at::Tensor input_lin_output_grads = torch::empty_like(input_lin_results); auto q_lin_results_ptr = static_cast<half*>(input_lin_results.data_ptr()); auto k_lin_results_ptr = static_cast<half*>(input_lin_results.data_ptr()) + head_dim; auto v_lin_results_ptr = static_cast<half*>(input_lin_results.data_ptr()) + 2*head_dim; auto q_lin_grads_ptr = static_cast<half*>(input_lin_output_grads.data_ptr()); auto k_lin_grads_ptr = static_cast<half*>(input_lin_output_grads.data_ptr()) + head_dim; auto v_lin_grads_ptr = static_cast<half*>(input_lin_output_grads.data_ptr()) + 2*head_dim; char a_layout_n{'n'}; char a_layout_t{'t'}; char b_layout_n{'n'}; char b_layout_t{'t'}; TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); // Output Linear Dgrad TORCH_CUDABLAS_CHECK(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, embed_dim, batches, embed_dim, static_cast<const void*>(&alpha), static_cast<const void*>(output_weights.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(output_grads.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(&beta), static_cast<void*>(output_lin_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Output Linear Wgrad TORCH_CUDABLAS_CHECK(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_T, embed_dim, embed_dim, batches, static_cast<const void*>(&alpha), static_cast<const void*>(matmul2_results.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(output_grads.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(&beta), static_cast<void*>(output_weight_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); auto output_bias_grads = output_grads.view({-1, embed_dim}) .sum(0, false); // MatMul2 Dgrad1 gemm_switch_fp32accum( state, a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, alpha, static_cast<const half*>(v_lin_results_ptr), lead_dim, batch_stride, static_cast<const half*>(output_lin_grads.data_ptr()), head_dim*attn_batches, head_dim, beta, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, attn_batches); // Matmul2 Dgrad2 gemm_switch_fp32accum( state, a_layout_n, b_layout_t, head_dim, k_seq_len, q_seq_len, alpha, static_cast<const half*>(output_lin_grads.data_ptr()), head_dim*attn_batches, head_dim, static_cast<const half*>(dropout_results.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, v_lin_grads_ptr, lead_dim, batch_stride, attn_batches); // Apply Dropout Mask and Scale by Dropout Probability // Softmax Grad dispatch_masked_scale_softmax_backward_stream<half, half, float,false>( static_cast<half*>(matmul2_grads.data_ptr()), static_cast<half*>(matmul2_grads.data_ptr()), reinterpret_cast<half const*>(softmax_results.data_ptr()), static_cast<uint8_t const*>(dropout_mask.data_ptr()), 1.0/(1.0-dropout_prob), k_seq_len, k_seq_len, attn_batches*q_seq_len, stream); // Matmul1 Dgrad1 gemm_switch_fp32accum( state, a_layout_n, b_layout_n, head_dim, q_seq_len, k_seq_len, scale, k_lin_results_ptr, lead_dim, batch_stride, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, q_lin_grads_ptr, lead_dim, batch_stride, attn_batches); // Matmul1 Dgrad2 gemm_switch_fp32accum( state, a_layout_n, b_layout_t, head_dim, k_seq_len, q_seq_len, scale, q_lin_results_ptr, lead_dim, batch_stride, static_cast<half*>(matmul2_grads.data_ptr()), k_seq_len, k_seq_len*q_seq_len, beta, k_lin_grads_ptr, lead_dim, batch_stride, attn_batches); // Input Linear Dgrad TORCH_CUDABLAS_CHECK(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, embed_dim, batches, output_lin_dim, static_cast<const void*>(&alpha), static_cast<const void*>(input_weights.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(input_lin_output_grads.data_ptr()), //static_cast<const void*>(q_lin_grads_ptr), CUDA_R_16F, output_lin_dim, static_cast<const void*>(&beta), static_cast<void*>(input_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, //CUBLAS_GEMM_ALGO10_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // Input Linear Wgrad TORCH_CUDABLAS_CHECK(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_T, embed_dim, output_lin_dim, batches, static_cast<const void*>(&alpha), static_cast<const void*>(inputs.data_ptr()), CUDA_R_16F, embed_dim, static_cast<const void*>(q_lin_grads_ptr), CUDA_R_16F, output_lin_dim, static_cast<const void*>(&beta), static_cast<void*>(input_weight_grads.data_ptr()), CUDA_R_16F, embed_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); auto input_bias_grads = input_lin_output_grads.view({-1, output_lin_dim}).sum(0, false); TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); return { input_grads, input_weight_grads, output_weight_grads, input_bias_grads, output_bias_grads }; } } // end namespace cublas_gemmex } // end namespace self } // end namespace multihead_attn
4114ca3259f4794445c9a3f6b864df2ecc304344.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_list_utilities.hpp> #include <cudf_test/type_lists.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> struct DispatcherTest : public cudf::test::BaseFixture { }; template <typename T> struct TypedDispatcherTest : public DispatcherTest { }; TYPED_TEST_CASE(TypedDispatcherTest, cudf::test::AllTypes); namespace { template <typename Expected> struct type_tester { template <typename Dispatched> bool operator()() { return std::is_same<Expected, Dispatched>::value; } }; } // namespace TYPED_TEST(TypedDispatcherTest, TypeToId) { EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()}, type_tester<TypeParam>{})); } namespace { struct verify_dispatched_type { template <typename T> __host__ __device__ bool operator()(cudf::type_id id) { return id == cudf::type_to_id<T>(); } }; __global__ void dispatch_test_kernel(cudf::type_id id, bool* d_result) { if (0 == threadIdx.x + blockIdx.x * blockDim.x) *d_result = cudf::type_dispatcher(cudf::data_type{id}, verify_dispatched_type{}, id); } } // namespace TYPED_TEST(TypedDispatcherTest, DeviceDispatch) { auto result = cudf::detail::make_zeroed_device_uvector_sync<bool>(1); hipLaunchKernelGGL(( dispatch_test_kernel), dim3(1), dim3(1), 0, 0, cudf::type_to_id<TypeParam>(), result.data()); CUDA_TRY(hipDeviceSynchronize()); EXPECT_EQ(true, result.front_element(rmm::cuda_stream_default)); } struct IdDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> { }; INSTANTIATE_TEST_CASE_P(TestAllIds, IdDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids)); TEST_P(IdDispatcherTest, IdToType) { auto t = GetParam(); EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{t}, verify_dispatched_type{}, t)); } template <typename T> struct TypedDoubleDispatcherTest : public DispatcherTest { }; TYPED_TEST_CASE(TypedDoubleDispatcherTest, cudf::test::AllTypes); namespace { template <typename Expected1, typename Expected2> struct two_type_tester { template <typename Dispatched1, typename Dispatched2> bool operator()() { return std::is_same<Expected1, Dispatched1>::value && std::is_same<Expected2, Dispatched2>::value; } }; } // namespace TYPED_TEST(TypedDoubleDispatcherTest, TypeToId) { EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()}, cudf::data_type{cudf::type_to_id<TypeParam>()}, two_type_tester<TypeParam, TypeParam>{})); } namespace { struct verify_double_dispatched_type { template <typename T1, typename T2> __host__ __device__ bool operator()(cudf::type_id id1, cudf::type_id id2) { return id1 == cudf::type_to_id<T1>() && id2 == cudf::type_to_id<T2>(); } }; __global__ void double_dispatch_test_kernel(cudf::type_id id1, cudf::type_id id2, bool* d_result) { if (0 == threadIdx.x + blockIdx.x * blockDim.x) *d_result = cudf::double_type_dispatcher( cudf::data_type{id1}, cudf::data_type{id2}, verify_double_dispatched_type{}, id1, id2); } } // namespace TYPED_TEST(TypedDoubleDispatcherTest, DeviceDoubleDispatch) { auto result = cudf::detail::make_zeroed_device_uvector_sync<bool>(1); hipLaunchKernelGGL(( double_dispatch_test_kernel), dim3(1), dim3(1), 0, 0, cudf::type_to_id<TypeParam>(), cudf::type_to_id<TypeParam>(), result.data()); CUDA_TRY(hipDeviceSynchronize()); EXPECT_EQ(true, result.front_element(rmm::cuda_stream_default)); } struct IdDoubleDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> { }; INSTANTIATE_TEST_CASE_P(TestAllIds, IdDoubleDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids)); TEST_P(IdDoubleDispatcherTest, IdToType) { // Test double-dispatch of all types using the same type for both dispatches auto t = GetParam(); EXPECT_TRUE(cudf::double_type_dispatcher( cudf::data_type{t}, cudf::data_type{t}, verify_double_dispatched_type{}, t, t)); } struct IdFixedDoubleDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> { }; INSTANTIATE_TEST_CASE_P(TestAllIds, IdFixedDoubleDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids)); TEST_P(IdFixedDoubleDispatcherTest, IdToType) { // Test double-dispatch of all types against one fixed type, in each direction auto t = GetParam(); EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{t}, cudf::data_type{cudf::type_to_id<float>()}, verify_double_dispatched_type{}, t, cudf::type_to_id<float>())); EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{cudf::type_to_id<float>()}, cudf::data_type{t}, verify_double_dispatched_type{}, cudf::type_to_id<float>(), t)); } CUDF_TEST_PROGRAM_MAIN()
4114ca3259f4794445c9a3f6b864df2ecc304344.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_list_utilities.hpp> #include <cudf_test/type_lists.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> struct DispatcherTest : public cudf::test::BaseFixture { }; template <typename T> struct TypedDispatcherTest : public DispatcherTest { }; TYPED_TEST_CASE(TypedDispatcherTest, cudf::test::AllTypes); namespace { template <typename Expected> struct type_tester { template <typename Dispatched> bool operator()() { return std::is_same<Expected, Dispatched>::value; } }; } // namespace TYPED_TEST(TypedDispatcherTest, TypeToId) { EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()}, type_tester<TypeParam>{})); } namespace { struct verify_dispatched_type { template <typename T> __host__ __device__ bool operator()(cudf::type_id id) { return id == cudf::type_to_id<T>(); } }; __global__ void dispatch_test_kernel(cudf::type_id id, bool* d_result) { if (0 == threadIdx.x + blockIdx.x * blockDim.x) *d_result = cudf::type_dispatcher(cudf::data_type{id}, verify_dispatched_type{}, id); } } // namespace TYPED_TEST(TypedDispatcherTest, DeviceDispatch) { auto result = cudf::detail::make_zeroed_device_uvector_sync<bool>(1); dispatch_test_kernel<<<1, 1>>>(cudf::type_to_id<TypeParam>(), result.data()); CUDA_TRY(cudaDeviceSynchronize()); EXPECT_EQ(true, result.front_element(rmm::cuda_stream_default)); } struct IdDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> { }; INSTANTIATE_TEST_CASE_P(TestAllIds, IdDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids)); TEST_P(IdDispatcherTest, IdToType) { auto t = GetParam(); EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{t}, verify_dispatched_type{}, t)); } template <typename T> struct TypedDoubleDispatcherTest : public DispatcherTest { }; TYPED_TEST_CASE(TypedDoubleDispatcherTest, cudf::test::AllTypes); namespace { template <typename Expected1, typename Expected2> struct two_type_tester { template <typename Dispatched1, typename Dispatched2> bool operator()() { return std::is_same<Expected1, Dispatched1>::value && std::is_same<Expected2, Dispatched2>::value; } }; } // namespace TYPED_TEST(TypedDoubleDispatcherTest, TypeToId) { EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()}, cudf::data_type{cudf::type_to_id<TypeParam>()}, two_type_tester<TypeParam, TypeParam>{})); } namespace { struct verify_double_dispatched_type { template <typename T1, typename T2> __host__ __device__ bool operator()(cudf::type_id id1, cudf::type_id id2) { return id1 == cudf::type_to_id<T1>() && id2 == cudf::type_to_id<T2>(); } }; __global__ void double_dispatch_test_kernel(cudf::type_id id1, cudf::type_id id2, bool* d_result) { if (0 == threadIdx.x + blockIdx.x * blockDim.x) *d_result = cudf::double_type_dispatcher( cudf::data_type{id1}, cudf::data_type{id2}, verify_double_dispatched_type{}, id1, id2); } } // namespace TYPED_TEST(TypedDoubleDispatcherTest, DeviceDoubleDispatch) { auto result = cudf::detail::make_zeroed_device_uvector_sync<bool>(1); double_dispatch_test_kernel<<<1, 1>>>( cudf::type_to_id<TypeParam>(), cudf::type_to_id<TypeParam>(), result.data()); CUDA_TRY(cudaDeviceSynchronize()); EXPECT_EQ(true, result.front_element(rmm::cuda_stream_default)); } struct IdDoubleDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> { }; INSTANTIATE_TEST_CASE_P(TestAllIds, IdDoubleDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids)); TEST_P(IdDoubleDispatcherTest, IdToType) { // Test double-dispatch of all types using the same type for both dispatches auto t = GetParam(); EXPECT_TRUE(cudf::double_type_dispatcher( cudf::data_type{t}, cudf::data_type{t}, verify_double_dispatched_type{}, t, t)); } struct IdFixedDoubleDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> { }; INSTANTIATE_TEST_CASE_P(TestAllIds, IdFixedDoubleDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids)); TEST_P(IdFixedDoubleDispatcherTest, IdToType) { // Test double-dispatch of all types against one fixed type, in each direction auto t = GetParam(); EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{t}, cudf::data_type{cudf::type_to_id<float>()}, verify_double_dispatched_type{}, t, cudf::type_to_id<float>())); EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{cudf::type_to_id<float>()}, cudf::data_type{t}, verify_double_dispatched_type{}, cudf::type_to_id<float>(), t)); } CUDF_TEST_PROGRAM_MAIN()
8539c39bc407534980e763ea0dff09b5a1af1dba.hip
// !!! This is a file automatically generated by hipify!!! /* This is the Porter stemming algorithm, coded up as thread-safe ANSI C by the author. It may be be regarded as cononical, in that it follows the algorithm presented in Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14, no. 3, pp 130-137, only differing from it at the points maked --DEPARTURE-- below. See also http://www.tartarus.org/~martin/PorterStemmer The algorithm as described in the paper could be exactly replicated by adjusting the points of DEPARTURE, but this is barely necessary, because (a) the points of DEPARTURE are definitely improvements, and (b) no encoding of the Porter stemmer I have seen is anything like as exact as this version, even with the points of DEPARTURE! You can compile it on Unix with 'gcc -O3 -o stem stem.c' after which 'stem' takes a list of inputs and sends the stemmed equivalent to stdout. The algorithm as encoded here is particularly fast. Release 2 (the more old-fashioned, non-thread-safe version may be regarded as release 1.) */ #include <stdio.h> #include <stdlib.h> /* for malloc, free */ #include <ctype.h> /* for isupper, islower, tolower */ #include <string.h> /* for memcmp, memmove */ #include <hip/hip_runtime.h> #include <pthread.h> #include <limits.h> #include <float.h> #include <math.h> #include <sys/time.h> #include "../../utils/timer.h" /* You will probably want to move the following declarations to a central header file. */ struct stemmer; extern struct stemmer *create_stemmer(void); extern void free_stemmer(struct stemmer *z); extern int stem(struct stemmer *z, char *b, int k); /* The main part of the stemming algorithm starts here. */ #define TRUE 1 #define FALSE 0 #define INC 32 /* size units in which s is increased */ /* stemmer is a structure for a few local bits of data, */ struct stemmer { // char *b; /* buffer for word to be stemmed */ char b[INC + 1]; /* buffer for word to be stemmed */ int k; /* offset to the end of the string */ int j; /* a general offset into the string */ }; /* Member b is a buffer holding a word to be stemmed. The letters are in b[0], b[1] ... ending at b[z->k]. Member k is readjusted downwards as the stemming progresses. Zero termination is not in fact used in the algorithm. Note that only lower case sequences are stemmed. Forcing to lower case should be done before stem(...) is called. Typical usage is: struct stemmer * z = create_stemmer(); char b[] = "pencils"; int res = stem(z, b, 6); /- stem the 7 characters of b[0] to b[6]. The result, res, will be 5 (the 's' is removed). -/ free_stemmer(z); */ extern struct stemmer *create_stemmer(void) { return (struct stemmer *)malloc(sizeof(struct stemmer)); /* assume malloc succeeds */ } extern void free_stemmer(struct stemmer *z) { free(z); } /* cons(z, i) is TRUE <=> b[i] is a consonant. ('b' means 'z->b', but here and below we drop 'z->' in comments. */ __host__ __device__ static int cons1(struct stemmer *z, int i) { switch (z->b[i]) { case 'a': case 'e': case 'i': case 'o': case 'u': return FALSE; default: return TRUE; } } __host__ __device__ static int cons(struct stemmer *z, int i) { switch (z->b[i]) { case 'a': case 'e': case 'i': case 'o': case 'u': return FALSE; case 'y': return (i == 0) ? TRUE : !cons1(z, i - 1); default: return TRUE; } } /* m(z) measures the number of consonant sequences between 0 and j. if c is a consonant sequence and v a vowel sequence, and <..> indicates arbitrary presence, <c><v> gives 0 <c>vc<v> gives 1 <c>vcvc<v> gives 2 <c>vcvcvc<v> gives 3 .... */ __host__ __device__ static int m(struct stemmer *z) { int n = 0; int i = 0; int j = z->j; while (TRUE) { if (i > j) return n; if (!cons(z, i)) break; i++; } i++; while (TRUE) { while (TRUE) { if (i > j) return n; if (cons(z, i)) break; i++; } i++; n++; while (TRUE) { if (i > j) return n; if (!cons(z, i)) break; i++; } i++; } } /* vowelinstem(z) is TRUE <=> 0,...j contains a vowel */ __host__ __device__ static int vowelinstem(struct stemmer *z) { int j = z->j; int i; for (i = 0; i <= j; i++) if (!cons(z, i)) return TRUE; return FALSE; } /* doublec(z, j) is TRUE <=> j,(j-1) contain a double consonant. */ __host__ __device__ static int doublec(struct stemmer *z, int j) { char *b = z->b; if (j < 1) return FALSE; if (b[j] != b[j - 1]) return FALSE; return cons(z, j); } /* cvc(z, i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant and also if the second c is not w,x or y. this is used when trying to restore an e at the end of a short word. e.g. cav(e), lov(e), hop(e), crim(e), but snow, box, tray. */ __host__ __device__ static int cvc(struct stemmer *z, int i) { if (i < 2 || !cons(z, i) || cons(z, i - 1) || !cons(z, i - 2)) return FALSE; { int ch = z->b[i]; if (ch == 'w' || ch == 'x' || ch == 'y') return FALSE; } return TRUE; } /* ends(z, s) is TRUE <=> 0,...k ends with the string s. */ __host__ __device__ static int memcmp1(const void *buffer1, const void *buffer2, int count) { if (!count) return (0); while (--count && *(char *)buffer1 == *(char *)buffer2) { buffer1 = (char *)buffer1 + 1; buffer2 = (char *)buffer2 + 1; } return (*((unsigned char *)buffer1) - *((unsigned char *)buffer2)); } __host__ __device__ static int ends(struct stemmer *z, char *s) { int length = s[0]; char *b = z->b; int k = z->k; if (s[length] != b[k]) return FALSE; /* tiny speed-up */ if (length > k + 1) return FALSE; if (memcmp1(b + k - length + 1, s + 1, length) != 0) return FALSE; z->j = k - length; return TRUE; } /* setto(z, s) sets (j+1),...k to the characters in the string s, readjusting k. */ __host__ __device__ void memmove1(void *dst, const void *src, int count) { char *dst_t; char *src_t; if ((unsigned char *)dst <= (unsigned char *)src || (unsigned char *)dst >= ((unsigned char *)src + count)) { dst_t = (char *)dst; src_t = (char *)src; while (count--) { *dst_t++ = *src_t++; } } else { dst_t = (char *)dst + count - 1; src_t = (char *)src + count - 1; while (count--) { *dst_t-- = *src_t--; } } } __host__ __device__ static void setto(struct stemmer *z, char *s) { int length = s[0]; int j = z->j; memmove1(z->b + j + 1, s + 1, length); z->k = j + length; } /* r(z, s) is used further down. */ __host__ __device__ static void r(struct stemmer *z, char *s) { if (m(z) > 0) setto(z, s); } /* step1ab(z) gets rid of plurals and -ed or -ing. e.g. caresses -> caress ponies -> poni ties -> ti caress -> caress cats -> cat feed -> feed agreed -> agree disabled -> disable matting -> mat mating -> mate meeting -> meet milling -> mill messing -> mess meetings -> meet */ /* In stem(z, b, k), b is a char pointer, and the string to be stemmed is from b[0] to b[k] inclusive. Possibly b[k+1] == '\0', but it is not important. The stemmer adjusts the characters b[0] ... b[k] and returns the new end-point of the string, k'. Stemming never increases word length, so 0 <= k' <= k. */ __host__ __device__ static void step1ab(struct stemmer *z) { char *b = z->b; if (b[z->k] == 's') { if (ends(z, "\04" "sses")) z->k -= 2; else if (ends(z, "\03" "ies")) setto(z, "\01" "i"); else if (b[z->k - 1] != 's') z->k--; } if (ends(z, "\03" "eed")) { if (m(z) > 0) z->k--; } else if ((ends(z, "\02" "ed") || ends(z, "\03" "ing")) && vowelinstem(z)) { z->k = z->j; if (ends(z, "\02" "at")) setto(z, "\03" "ate"); else if (ends(z, "\02" "bl")) setto(z, "\03" "ble"); else if (ends(z, "\02" "iz")) setto(z, "\03" "ize"); else if (doublec(z, z->k)) { z->k--; { int ch = b[z->k]; if (ch == 'l' || ch == 's' || ch == 'z') z->k++; } } else if (m(z) == 1 && cvc(z, z->k)) setto(z, "\01" "e"); } } /* step1c(z) turns terminal y to i when there is another vowel in the stem. */ __host__ __device__ static void step1c(struct stemmer *z) { if (ends(z, "\01" "y") && vowelinstem(z)) z->b[z->k] = 'i'; } /* step2(z) maps double suffices to single ones. so -ization ( = -ize plus -ation) maps to -ize etc. note that the string before the suffix must give m(z) > 0. */ __host__ __device__ static void step2(struct stemmer *z) { switch (z->b[z->k - 1]) { case 'a': if (ends(z, "\07" "ational")) { r(z, "\03" "ate"); break; } if (ends(z, "\06" "tional")) { r(z, "\04" "tion"); break; } break; case 'c': if (ends(z, "\04" "enci")) { r(z, "\04" "ence"); break; } if (ends(z, "\04" "anci")) { r(z, "\04" "ance"); break; } break; case 'e': if (ends(z, "\04" "izer")) { r(z, "\03" "ize"); break; } break; case 'l': if (ends(z, "\03" "bli")) { r(z, "\03" "ble"); break; } /*-DEPARTURE-*/ /* To match the published algorithm, replace this line with case 'l': if (ends(z, "\04" "abli")) { r(z, "\04" "able"); break; } */ if (ends(z, "\04" "alli")) { r(z, "\02" "al"); break; } if (ends(z, "\05" "entli")) { r(z, "\03" "ent"); break; } if (ends(z, "\03" "eli")) { r(z, "\01" "e"); break; } if (ends(z, "\05" "ousli")) { r(z, "\03" "ous"); break; } break; case 'o': if (ends(z, "\07" "ization")) { r(z, "\03" "ize"); break; } if (ends(z, "\05" "ation")) { r(z, "\03" "ate"); break; } if (ends(z, "\04" "ator")) { r(z, "\03" "ate"); break; } break; case 's': if (ends(z, "\05" "alism")) { r(z, "\02" "al"); break; } if (ends(z, "\07" "iveness")) { r(z, "\03" "ive"); break; } if (ends(z, "\07" "fulness")) { r(z, "\03" "ful"); break; } if (ends(z, "\07" "ousness")) { r(z, "\03" "ous"); break; } break; case 't': if (ends(z, "\05" "aliti")) { r(z, "\02" "al"); break; } if (ends(z, "\05" "iviti")) { r(z, "\03" "ive"); break; } if (ends(z, "\06" "biliti")) { r(z, "\03" "ble"); break; } break; case 'g': if (ends(z, "\04" "logi")) { r(z, "\03" "log"); break; } /*-DEPARTURE-*/ /* To match the published algorithm, delete this line */ } } /* step3(z) deals with -ic-, -full, -ness etc. similar strategy to step2. */ __host__ __device__ static void step3(struct stemmer *z) { switch (z->b[z->k]) { case 'e': if (ends(z, "\05" "icate")) { r(z, "\02" "ic"); break; } if (ends(z, "\05" "ative")) { r(z, "\00" ""); break; } if (ends(z, "\05" "alize")) { r(z, "\02" "al"); break; } break; case 'i': if (ends(z, "\05" "iciti")) { r(z, "\02" "ic"); break; } break; case 'l': if (ends(z, "\04" "ical")) { r(z, "\02" "ic"); break; } if (ends(z, "\03" "ful")) { r(z, "\00" ""); break; } break; case 's': if (ends(z, "\04" "ness")) { r(z, "\00" ""); break; } break; } } /* step4(z) takes off -ant, -ence etc., in context <c>vcvc<v>. */ __host__ __device__ static void step4(struct stemmer *z) { switch (z->b[z->k - 1]) { case 'a': if (ends(z, "\02" "al")) break; return; case 'c': if (ends(z, "\04" "ance")) break; if (ends(z, "\04" "ence")) break; return; case 'e': if (ends(z, "\02" "er")) break; return; case 'i': if (ends(z, "\02" "ic")) break; return; case 'l': if (ends(z, "\04" "able")) break; if (ends(z, "\04" "ible")) break; return; case 'n': if (ends(z, "\03" "ant")) break; if (ends(z, "\05" "ement")) break; if (ends(z, "\04" "ment")) break; if (ends(z, "\03" "ent")) break; return; case 'o': if (ends(z, "\03" "ion") && (z->b[z->j] == 's' || z->b[z->j] == 't')) break; if (ends(z, "\02" "ou")) break; return; /* takes care of -ous */ case 's': if (ends(z, "\03" "ism")) break; return; case 't': if (ends(z, "\03" "ate")) break; if (ends(z, "\03" "iti")) break; return; case 'u': if (ends(z, "\03" "ous")) break; return; case 'v': if (ends(z, "\03" "ive")) break; return; case 'z': if (ends(z, "\03" "ize")) break; return; default: return; } if (m(z) > 1) z->k = z->j; } /* step5(z) removes a final -e if m(z) > 1, and changes -ll to -l if m(z) > 1. */ __host__ __device__ static void step5(struct stemmer *z) { char *b = z->b; z->j = z->k; if (b[z->k] == 'e') { int a = m(z); if (a > 1 || a == 1 && !cvc(z, z->k - 1)) z->k--; } if (b[z->k] == 'l' && doublec(z, z->k) && m(z) > 1) z->k--; } /* In stem(z, b, k), b is a char pointer, and the string to be stemmed is from b[0] to b[k] inclusive. Possibly b[k+1] == '\0', but it is not important. The stemmer adjusts the characters b[0] ... b[k] and returns the new end-point of the string, k'. Stemming never increases word length, so 0 <= k' <= k. */ __global__ void stem_gpu(struct stemmer *stem_list, int words) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < words) { if (stem_list[tid].k <= 1) { return; } step1ab(&(stem_list[tid])); step1c(&(stem_list[tid])); step2(&(stem_list[tid])); step3(&(stem_list[tid])); step4(&(stem_list[tid])); step5(&(stem_list[tid])); stem_list[tid].b[stem_list[tid].k + 1] = 0; } } /*--------------------stemmer definition ends here------------------------*/ #define ARRAYSIZE 1000000 #define A_INC 10000 static int a_max = ARRAYSIZE; static int i_max = INC; /* maximum offset in s */ struct stemmer *stem_list; struct stemmer *gpu_stem_list; #define LETTER(ch) (isupper(ch) || islower(ch)) int load_data(struct stemmer *stem_list, FILE *f) { int a_size = 0; while (TRUE) { int ch = getc(f); if (ch == EOF) return a_size; char *s = (char *)malloc(i_max + 1); if (LETTER(ch)) { int i = 0; while (TRUE) { if (i == i_max) { i_max += INC; s = (char *)realloc(s, i_max + 1); } ch = tolower(ch); /* forces lower case */ stem_list[a_size].b[i] = ch; s[i] = ch; i++; ch = getc(f); if (!LETTER(ch)) { ungetc(ch, f); break; } } stem_list[a_size].k = i - 1; if (a_size == a_max) { a_max += A_INC; stem_list = (struct stemmer *)realloc(stem_list, a_max * sizeof(struct stemmer)); } a_size += 1; } } } int main(int argc, char *argv[]) { if (argc < 2) { fprintf(stderr, "[ERROR] Invalid arguments provided.\n\n"); fprintf(stderr, "Usage: %s [INPUT FILE]\n\n", argv[0]); exit(0); } /* Timing */ STATS_INIT("kernel", "gpu_porter_stemming"); PRINT_STAT_STRING("abrv", "gpu_stemmer"); hipEvent_t eStart, eStop; float cuda_elapsedTime; // allocate data FILE *f; f = fopen(argv[1], "r"); if (f == 0) { fprintf(stderr, "File %s not found\n", argv[1]); exit(1); } hipHostMalloc((void **)&stem_list, ARRAYSIZE* sizeof(struct stemmer)); int words = load_data(stem_list, f); PRINT_STAT_INT("words", words); fclose(f); hipEventCreate(&eStart); hipEventCreate(&eStop); hipMalloc((void **)&gpu_stem_list, words * sizeof(struct stemmer)); hipEventRecord(eStart, 0); hipMemcpy(gpu_stem_list, stem_list, words * sizeof(struct stemmer), hipMemcpyHostToDevice); hipEventRecord(eStop, 0); hipEventSynchronize(eStop); hipEventElapsedTime(&cuda_elapsedTime, eStart, eStop); PRINT_STAT_DOUBLE("host_to_device", cuda_elapsedTime); hipEventRecord(eStart, 0); dim3 block(256); dim3 grid; grid.x = ceil(words * 1.0 / block.x); hipEventRecord(eStart, 0); stem_gpu << <grid, block>>> (gpu_stem_list, words); hipEventRecord(eStop, 0); hipEventSynchronize(eStop); hipEventElapsedTime(&cuda_elapsedTime, eStart, eStop); PRINT_STAT_DOUBLE("gpu_stemmer", cuda_elapsedTime); hipEventRecord(eStart, 0); hipMemcpy(stem_list, gpu_stem_list, words * sizeof(struct stemmer), hipMemcpyDeviceToHost); hipEventRecord(eStop, 0); hipEventSynchronize(eStop); hipEventElapsedTime(&cuda_elapsedTime, eStart, eStop); PRINT_STAT_DOUBLE("device_to_host", cuda_elapsedTime); hipEventDestroy(eStart); hipEventDestroy(eStop); STATS_END(); #ifdef TESTING f = fopen("../input/stem_porter.gpu", "w"); for (int i = 0; i < words; ++i) fprintf(f, "%s\n", stem_list[i].b); fclose(f); #endif hipHostFree(stem_list); hipFree(gpu_stem_list); return 0; }
8539c39bc407534980e763ea0dff09b5a1af1dba.cu
/* This is the Porter stemming algorithm, coded up as thread-safe ANSI C by the author. It may be be regarded as cononical, in that it follows the algorithm presented in Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14, no. 3, pp 130-137, only differing from it at the points maked --DEPARTURE-- below. See also http://www.tartarus.org/~martin/PorterStemmer The algorithm as described in the paper could be exactly replicated by adjusting the points of DEPARTURE, but this is barely necessary, because (a) the points of DEPARTURE are definitely improvements, and (b) no encoding of the Porter stemmer I have seen is anything like as exact as this version, even with the points of DEPARTURE! You can compile it on Unix with 'gcc -O3 -o stem stem.c' after which 'stem' takes a list of inputs and sends the stemmed equivalent to stdout. The algorithm as encoded here is particularly fast. Release 2 (the more old-fashioned, non-thread-safe version may be regarded as release 1.) */ #include <stdio.h> #include <stdlib.h> /* for malloc, free */ #include <ctype.h> /* for isupper, islower, tolower */ #include <string.h> /* for memcmp, memmove */ #include <cuda_runtime.h> #include <pthread.h> #include <limits.h> #include <float.h> #include <math.h> #include <sys/time.h> #include "../../utils/timer.h" /* You will probably want to move the following declarations to a central header file. */ struct stemmer; extern struct stemmer *create_stemmer(void); extern void free_stemmer(struct stemmer *z); extern int stem(struct stemmer *z, char *b, int k); /* The main part of the stemming algorithm starts here. */ #define TRUE 1 #define FALSE 0 #define INC 32 /* size units in which s is increased */ /* stemmer is a structure for a few local bits of data, */ struct stemmer { // char *b; /* buffer for word to be stemmed */ char b[INC + 1]; /* buffer for word to be stemmed */ int k; /* offset to the end of the string */ int j; /* a general offset into the string */ }; /* Member b is a buffer holding a word to be stemmed. The letters are in b[0], b[1] ... ending at b[z->k]. Member k is readjusted downwards as the stemming progresses. Zero termination is not in fact used in the algorithm. Note that only lower case sequences are stemmed. Forcing to lower case should be done before stem(...) is called. Typical usage is: struct stemmer * z = create_stemmer(); char b[] = "pencils"; int res = stem(z, b, 6); /- stem the 7 characters of b[0] to b[6]. The result, res, will be 5 (the 's' is removed). -/ free_stemmer(z); */ extern struct stemmer *create_stemmer(void) { return (struct stemmer *)malloc(sizeof(struct stemmer)); /* assume malloc succeeds */ } extern void free_stemmer(struct stemmer *z) { free(z); } /* cons(z, i) is TRUE <=> b[i] is a consonant. ('b' means 'z->b', but here and below we drop 'z->' in comments. */ __host__ __device__ static int cons1(struct stemmer *z, int i) { switch (z->b[i]) { case 'a': case 'e': case 'i': case 'o': case 'u': return FALSE; default: return TRUE; } } __host__ __device__ static int cons(struct stemmer *z, int i) { switch (z->b[i]) { case 'a': case 'e': case 'i': case 'o': case 'u': return FALSE; case 'y': return (i == 0) ? TRUE : !cons1(z, i - 1); default: return TRUE; } } /* m(z) measures the number of consonant sequences between 0 and j. if c is a consonant sequence and v a vowel sequence, and <..> indicates arbitrary presence, <c><v> gives 0 <c>vc<v> gives 1 <c>vcvc<v> gives 2 <c>vcvcvc<v> gives 3 .... */ __host__ __device__ static int m(struct stemmer *z) { int n = 0; int i = 0; int j = z->j; while (TRUE) { if (i > j) return n; if (!cons(z, i)) break; i++; } i++; while (TRUE) { while (TRUE) { if (i > j) return n; if (cons(z, i)) break; i++; } i++; n++; while (TRUE) { if (i > j) return n; if (!cons(z, i)) break; i++; } i++; } } /* vowelinstem(z) is TRUE <=> 0,...j contains a vowel */ __host__ __device__ static int vowelinstem(struct stemmer *z) { int j = z->j; int i; for (i = 0; i <= j; i++) if (!cons(z, i)) return TRUE; return FALSE; } /* doublec(z, j) is TRUE <=> j,(j-1) contain a double consonant. */ __host__ __device__ static int doublec(struct stemmer *z, int j) { char *b = z->b; if (j < 1) return FALSE; if (b[j] != b[j - 1]) return FALSE; return cons(z, j); } /* cvc(z, i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant and also if the second c is not w,x or y. this is used when trying to restore an e at the end of a short word. e.g. cav(e), lov(e), hop(e), crim(e), but snow, box, tray. */ __host__ __device__ static int cvc(struct stemmer *z, int i) { if (i < 2 || !cons(z, i) || cons(z, i - 1) || !cons(z, i - 2)) return FALSE; { int ch = z->b[i]; if (ch == 'w' || ch == 'x' || ch == 'y') return FALSE; } return TRUE; } /* ends(z, s) is TRUE <=> 0,...k ends with the string s. */ __host__ __device__ static int memcmp1(const void *buffer1, const void *buffer2, int count) { if (!count) return (0); while (--count && *(char *)buffer1 == *(char *)buffer2) { buffer1 = (char *)buffer1 + 1; buffer2 = (char *)buffer2 + 1; } return (*((unsigned char *)buffer1) - *((unsigned char *)buffer2)); } __host__ __device__ static int ends(struct stemmer *z, char *s) { int length = s[0]; char *b = z->b; int k = z->k; if (s[length] != b[k]) return FALSE; /* tiny speed-up */ if (length > k + 1) return FALSE; if (memcmp1(b + k - length + 1, s + 1, length) != 0) return FALSE; z->j = k - length; return TRUE; } /* setto(z, s) sets (j+1),...k to the characters in the string s, readjusting k. */ __host__ __device__ void memmove1(void *dst, const void *src, int count) { char *dst_t; char *src_t; if ((unsigned char *)dst <= (unsigned char *)src || (unsigned char *)dst >= ((unsigned char *)src + count)) { dst_t = (char *)dst; src_t = (char *)src; while (count--) { *dst_t++ = *src_t++; } } else { dst_t = (char *)dst + count - 1; src_t = (char *)src + count - 1; while (count--) { *dst_t-- = *src_t--; } } } __host__ __device__ static void setto(struct stemmer *z, char *s) { int length = s[0]; int j = z->j; memmove1(z->b + j + 1, s + 1, length); z->k = j + length; } /* r(z, s) is used further down. */ __host__ __device__ static void r(struct stemmer *z, char *s) { if (m(z) > 0) setto(z, s); } /* step1ab(z) gets rid of plurals and -ed or -ing. e.g. caresses -> caress ponies -> poni ties -> ti caress -> caress cats -> cat feed -> feed agreed -> agree disabled -> disable matting -> mat mating -> mate meeting -> meet milling -> mill messing -> mess meetings -> meet */ /* In stem(z, b, k), b is a char pointer, and the string to be stemmed is from b[0] to b[k] inclusive. Possibly b[k+1] == '\0', but it is not important. The stemmer adjusts the characters b[0] ... b[k] and returns the new end-point of the string, k'. Stemming never increases word length, so 0 <= k' <= k. */ __host__ __device__ static void step1ab(struct stemmer *z) { char *b = z->b; if (b[z->k] == 's') { if (ends(z, "\04" "sses")) z->k -= 2; else if (ends(z, "\03" "ies")) setto(z, "\01" "i"); else if (b[z->k - 1] != 's') z->k--; } if (ends(z, "\03" "eed")) { if (m(z) > 0) z->k--; } else if ((ends(z, "\02" "ed") || ends(z, "\03" "ing")) && vowelinstem(z)) { z->k = z->j; if (ends(z, "\02" "at")) setto(z, "\03" "ate"); else if (ends(z, "\02" "bl")) setto(z, "\03" "ble"); else if (ends(z, "\02" "iz")) setto(z, "\03" "ize"); else if (doublec(z, z->k)) { z->k--; { int ch = b[z->k]; if (ch == 'l' || ch == 's' || ch == 'z') z->k++; } } else if (m(z) == 1 && cvc(z, z->k)) setto(z, "\01" "e"); } } /* step1c(z) turns terminal y to i when there is another vowel in the stem. */ __host__ __device__ static void step1c(struct stemmer *z) { if (ends(z, "\01" "y") && vowelinstem(z)) z->b[z->k] = 'i'; } /* step2(z) maps double suffices to single ones. so -ization ( = -ize plus -ation) maps to -ize etc. note that the string before the suffix must give m(z) > 0. */ __host__ __device__ static void step2(struct stemmer *z) { switch (z->b[z->k - 1]) { case 'a': if (ends(z, "\07" "ational")) { r(z, "\03" "ate"); break; } if (ends(z, "\06" "tional")) { r(z, "\04" "tion"); break; } break; case 'c': if (ends(z, "\04" "enci")) { r(z, "\04" "ence"); break; } if (ends(z, "\04" "anci")) { r(z, "\04" "ance"); break; } break; case 'e': if (ends(z, "\04" "izer")) { r(z, "\03" "ize"); break; } break; case 'l': if (ends(z, "\03" "bli")) { r(z, "\03" "ble"); break; } /*-DEPARTURE-*/ /* To match the published algorithm, replace this line with case 'l': if (ends(z, "\04" "abli")) { r(z, "\04" "able"); break; } */ if (ends(z, "\04" "alli")) { r(z, "\02" "al"); break; } if (ends(z, "\05" "entli")) { r(z, "\03" "ent"); break; } if (ends(z, "\03" "eli")) { r(z, "\01" "e"); break; } if (ends(z, "\05" "ousli")) { r(z, "\03" "ous"); break; } break; case 'o': if (ends(z, "\07" "ization")) { r(z, "\03" "ize"); break; } if (ends(z, "\05" "ation")) { r(z, "\03" "ate"); break; } if (ends(z, "\04" "ator")) { r(z, "\03" "ate"); break; } break; case 's': if (ends(z, "\05" "alism")) { r(z, "\02" "al"); break; } if (ends(z, "\07" "iveness")) { r(z, "\03" "ive"); break; } if (ends(z, "\07" "fulness")) { r(z, "\03" "ful"); break; } if (ends(z, "\07" "ousness")) { r(z, "\03" "ous"); break; } break; case 't': if (ends(z, "\05" "aliti")) { r(z, "\02" "al"); break; } if (ends(z, "\05" "iviti")) { r(z, "\03" "ive"); break; } if (ends(z, "\06" "biliti")) { r(z, "\03" "ble"); break; } break; case 'g': if (ends(z, "\04" "logi")) { r(z, "\03" "log"); break; } /*-DEPARTURE-*/ /* To match the published algorithm, delete this line */ } } /* step3(z) deals with -ic-, -full, -ness etc. similar strategy to step2. */ __host__ __device__ static void step3(struct stemmer *z) { switch (z->b[z->k]) { case 'e': if (ends(z, "\05" "icate")) { r(z, "\02" "ic"); break; } if (ends(z, "\05" "ative")) { r(z, "\00" ""); break; } if (ends(z, "\05" "alize")) { r(z, "\02" "al"); break; } break; case 'i': if (ends(z, "\05" "iciti")) { r(z, "\02" "ic"); break; } break; case 'l': if (ends(z, "\04" "ical")) { r(z, "\02" "ic"); break; } if (ends(z, "\03" "ful")) { r(z, "\00" ""); break; } break; case 's': if (ends(z, "\04" "ness")) { r(z, "\00" ""); break; } break; } } /* step4(z) takes off -ant, -ence etc., in context <c>vcvc<v>. */ __host__ __device__ static void step4(struct stemmer *z) { switch (z->b[z->k - 1]) { case 'a': if (ends(z, "\02" "al")) break; return; case 'c': if (ends(z, "\04" "ance")) break; if (ends(z, "\04" "ence")) break; return; case 'e': if (ends(z, "\02" "er")) break; return; case 'i': if (ends(z, "\02" "ic")) break; return; case 'l': if (ends(z, "\04" "able")) break; if (ends(z, "\04" "ible")) break; return; case 'n': if (ends(z, "\03" "ant")) break; if (ends(z, "\05" "ement")) break; if (ends(z, "\04" "ment")) break; if (ends(z, "\03" "ent")) break; return; case 'o': if (ends(z, "\03" "ion") && (z->b[z->j] == 's' || z->b[z->j] == 't')) break; if (ends(z, "\02" "ou")) break; return; /* takes care of -ous */ case 's': if (ends(z, "\03" "ism")) break; return; case 't': if (ends(z, "\03" "ate")) break; if (ends(z, "\03" "iti")) break; return; case 'u': if (ends(z, "\03" "ous")) break; return; case 'v': if (ends(z, "\03" "ive")) break; return; case 'z': if (ends(z, "\03" "ize")) break; return; default: return; } if (m(z) > 1) z->k = z->j; } /* step5(z) removes a final -e if m(z) > 1, and changes -ll to -l if m(z) > 1. */ __host__ __device__ static void step5(struct stemmer *z) { char *b = z->b; z->j = z->k; if (b[z->k] == 'e') { int a = m(z); if (a > 1 || a == 1 && !cvc(z, z->k - 1)) z->k--; } if (b[z->k] == 'l' && doublec(z, z->k) && m(z) > 1) z->k--; } /* In stem(z, b, k), b is a char pointer, and the string to be stemmed is from b[0] to b[k] inclusive. Possibly b[k+1] == '\0', but it is not important. The stemmer adjusts the characters b[0] ... b[k] and returns the new end-point of the string, k'. Stemming never increases word length, so 0 <= k' <= k. */ __global__ void stem_gpu(struct stemmer *stem_list, int words) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < words) { if (stem_list[tid].k <= 1) { return; } step1ab(&(stem_list[tid])); step1c(&(stem_list[tid])); step2(&(stem_list[tid])); step3(&(stem_list[tid])); step4(&(stem_list[tid])); step5(&(stem_list[tid])); stem_list[tid].b[stem_list[tid].k + 1] = 0; } } /*--------------------stemmer definition ends here------------------------*/ #define ARRAYSIZE 1000000 #define A_INC 10000 static int a_max = ARRAYSIZE; static int i_max = INC; /* maximum offset in s */ struct stemmer *stem_list; struct stemmer *gpu_stem_list; #define LETTER(ch) (isupper(ch) || islower(ch)) int load_data(struct stemmer *stem_list, FILE *f) { int a_size = 0; while (TRUE) { int ch = getc(f); if (ch == EOF) return a_size; char *s = (char *)malloc(i_max + 1); if (LETTER(ch)) { int i = 0; while (TRUE) { if (i == i_max) { i_max += INC; s = (char *)realloc(s, i_max + 1); } ch = tolower(ch); /* forces lower case */ stem_list[a_size].b[i] = ch; s[i] = ch; i++; ch = getc(f); if (!LETTER(ch)) { ungetc(ch, f); break; } } stem_list[a_size].k = i - 1; if (a_size == a_max) { a_max += A_INC; stem_list = (struct stemmer *)realloc(stem_list, a_max * sizeof(struct stemmer)); } a_size += 1; } } } int main(int argc, char *argv[]) { if (argc < 2) { fprintf(stderr, "[ERROR] Invalid arguments provided.\n\n"); fprintf(stderr, "Usage: %s [INPUT FILE]\n\n", argv[0]); exit(0); } /* Timing */ STATS_INIT("kernel", "gpu_porter_stemming"); PRINT_STAT_STRING("abrv", "gpu_stemmer"); cudaEvent_t eStart, eStop; float cuda_elapsedTime; // allocate data FILE *f; f = fopen(argv[1], "r"); if (f == 0) { fprintf(stderr, "File %s not found\n", argv[1]); exit(1); } cudaMallocHost((void **)&stem_list, ARRAYSIZE* sizeof(struct stemmer)); int words = load_data(stem_list, f); PRINT_STAT_INT("words", words); fclose(f); cudaEventCreate(&eStart); cudaEventCreate(&eStop); cudaMalloc((void **)&gpu_stem_list, words * sizeof(struct stemmer)); cudaEventRecord(eStart, 0); cudaMemcpy(gpu_stem_list, stem_list, words * sizeof(struct stemmer), cudaMemcpyHostToDevice); cudaEventRecord(eStop, 0); cudaEventSynchronize(eStop); cudaEventElapsedTime(&cuda_elapsedTime, eStart, eStop); PRINT_STAT_DOUBLE("host_to_device", cuda_elapsedTime); cudaEventRecord(eStart, 0); dim3 block(256); dim3 grid; grid.x = ceil(words * 1.0 / block.x); cudaEventRecord(eStart, 0); stem_gpu << <grid, block>>> (gpu_stem_list, words); cudaEventRecord(eStop, 0); cudaEventSynchronize(eStop); cudaEventElapsedTime(&cuda_elapsedTime, eStart, eStop); PRINT_STAT_DOUBLE("gpu_stemmer", cuda_elapsedTime); cudaEventRecord(eStart, 0); cudaMemcpy(stem_list, gpu_stem_list, words * sizeof(struct stemmer), cudaMemcpyDeviceToHost); cudaEventRecord(eStop, 0); cudaEventSynchronize(eStop); cudaEventElapsedTime(&cuda_elapsedTime, eStart, eStop); PRINT_STAT_DOUBLE("device_to_host", cuda_elapsedTime); cudaEventDestroy(eStart); cudaEventDestroy(eStop); STATS_END(); #ifdef TESTING f = fopen("../input/stem_porter.gpu", "w"); for (int i = 0; i < words; ++i) fprintf(f, "%s\n", stem_list[i].b); fclose(f); #endif cudaFreeHost(stem_list); cudaFree(gpu_stem_list); return 0; }
09d644a11fb622f8a4ae8898bf5e2d8686413484.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #define CUDAERROR 6 int main(int argc, char** argv) { hipError_t i; printf("hipSuccess = %d\n", hipSuccess); printf("hipErrorMemoryAllocation = %d\n", hipErrorMemoryAllocation); printf("hipErrorLaunchTimeOut = %d\n", hipErrorLaunchTimeOut); return 0; }
09d644a11fb622f8a4ae8898bf5e2d8686413484.cu
#include <stdio.h> #define CUDAERROR 6 int main(int argc, char** argv) { cudaError_t i; printf("cudaSuccess = %d\n", cudaSuccess); printf("cudaErrorMemoryAllocation = %d\n", cudaErrorMemoryAllocation); printf("cudaErrorLaunchTimeout = %d\n", cudaErrorLaunchTimeout); return 0; }
4100af434feaca4b5f93843877407a571d3849bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <thrust/execution_policy.h> #include <thrust/reduce.h> #include "paddle/operators/accuracy_op.h" #include "paddle/platform/cuda_helper.h" #include "paddle/platform/gpu_info.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; template <int BlockSize> __global__ void AccuracyCudaKernel(const int N, const int D, const int64_t* Xdata, const int64_t* labeldata, int* correct_data, float* accuracy, int* total_data) { int count = 0; __shared__ int total[BlockSize]; // support only 1 block for (int i = threadIdx.x; i < (N); i += BlockSize) { for (int j = 0; j < D; ++j) { if (Xdata[i * D + j] == labeldata[i]) { ++count; break; } } } total[threadIdx.x] = count; __syncthreads(); // reduce the count with init value 0, and output accuracy. int result = thrust::reduce(thrust::device, total, total + BlockSize, 0); if (threadIdx.x == 0) { *correct_data = result; *accuracy = static_cast<float>(result) / static_cast<float>(N); *total_data = N; } } template <typename T> class AccuracyOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "It must use GPUPlace."); auto* inference = ctx.Input<Tensor>("Out"); auto* indices = ctx.Input<Tensor>("Indices"); auto* label = ctx.Input<Tensor>("Label"); auto* accuracy = ctx.Output<Tensor>("Accuracy"); auto* correct = ctx.Output<Tensor>("Correct"); auto* total = ctx.Output<Tensor>("Total"); // FIXME(typhoonzero): only support indices currently // if add support for output values, how to detect the data type? const int64_t* indices_data = indices->data<int64_t>(); const int64_t* label_data = label->data<int64_t>(); int* correct_data = correct->mutable_data<int>(ctx.GetPlace()); int* total_data = total->mutable_data<int>(ctx.GetPlace()); float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace()); int num_samples = static_cast<int>(inference->dims()[0]); size_t infer_width = inference->dims()[1]; auto stream = ctx.cuda_device_context().stream(); platform::GpuMemsetAsync(accuracy_data, 0, sizeof(float), stream); if (num_samples == 0) { return; } hipLaunchKernelGGL(( AccuracyCudaKernel< PADDLE_CUDA_NUM_THREADS>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, num_samples, infer_width, indices_data, label_data, correct_data, accuracy_data, total_data); } }; } // namespace operators } // namespace paddle // FIXME(typhoonzero): types of T is for inference data. // label data is always int64 REGISTER_OP_CUDA_KERNEL(accuracy, paddle::operators::AccuracyOpCUDAKernel<float>, paddle::operators::AccuracyOpCUDAKernel<double>);
4100af434feaca4b5f93843877407a571d3849bc.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <thrust/execution_policy.h> #include <thrust/reduce.h> #include "paddle/operators/accuracy_op.h" #include "paddle/platform/cuda_helper.h" #include "paddle/platform/gpu_info.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; template <int BlockSize> __global__ void AccuracyCudaKernel(const int N, const int D, const int64_t* Xdata, const int64_t* labeldata, int* correct_data, float* accuracy, int* total_data) { int count = 0; __shared__ int total[BlockSize]; // support only 1 block for (int i = threadIdx.x; i < (N); i += BlockSize) { for (int j = 0; j < D; ++j) { if (Xdata[i * D + j] == labeldata[i]) { ++count; break; } } } total[threadIdx.x] = count; __syncthreads(); // reduce the count with init value 0, and output accuracy. int result = thrust::reduce(thrust::device, total, total + BlockSize, 0); if (threadIdx.x == 0) { *correct_data = result; *accuracy = static_cast<float>(result) / static_cast<float>(N); *total_data = N; } } template <typename T> class AccuracyOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "It must use GPUPlace."); auto* inference = ctx.Input<Tensor>("Out"); auto* indices = ctx.Input<Tensor>("Indices"); auto* label = ctx.Input<Tensor>("Label"); auto* accuracy = ctx.Output<Tensor>("Accuracy"); auto* correct = ctx.Output<Tensor>("Correct"); auto* total = ctx.Output<Tensor>("Total"); // FIXME(typhoonzero): only support indices currently // if add support for output values, how to detect the data type? const int64_t* indices_data = indices->data<int64_t>(); const int64_t* label_data = label->data<int64_t>(); int* correct_data = correct->mutable_data<int>(ctx.GetPlace()); int* total_data = total->mutable_data<int>(ctx.GetPlace()); float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace()); int num_samples = static_cast<int>(inference->dims()[0]); size_t infer_width = inference->dims()[1]; auto stream = ctx.cuda_device_context().stream(); platform::GpuMemsetAsync(accuracy_data, 0, sizeof(float), stream); if (num_samples == 0) { return; } AccuracyCudaKernel< PADDLE_CUDA_NUM_THREADS><<<1, PADDLE_CUDA_NUM_THREADS, 0, stream>>>( num_samples, infer_width, indices_data, label_data, correct_data, accuracy_data, total_data); } }; } // namespace operators } // namespace paddle // FIXME(typhoonzero): types of T is for inference data. // label data is always int64 REGISTER_OP_CUDA_KERNEL(accuracy, paddle::operators::AccuracyOpCUDAKernel<float>, paddle::operators::AccuracyOpCUDAKernel<double>);
df85138a2a8cb7374c2084449b41edbc0d422269.hip
// !!! This is a file automatically generated by hipify!!! #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_gemv_batched_strided_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl" using ThreadBlockShape = cutlass::gemm::GemmShape<1, 128, 16>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 2>; using GemvKernel = cutlass::gemm::kernel::DefaultGemv< ThreadBlockShape, ThreadShape, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor>; template void megdnn::cuda::cutlass_wrapper:: cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>( BatchedGemmCoord const& problem_size, const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a, const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b, typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c, hipStream_t stream); #pragma GCC diagnostic pop #endif
df85138a2a8cb7374c2084449b41edbc0d422269.cu
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_gemv_batched_strided_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl" using ThreadBlockShape = cutlass::gemm::GemmShape<1, 128, 16>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 2>; using GemvKernel = cutlass::gemm::kernel::DefaultGemv< ThreadBlockShape, ThreadShape, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor>; template void megdnn::cuda::cutlass_wrapper:: cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>( BatchedGemmCoord const& problem_size, const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a, const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b, typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c, cudaStream_t stream); #pragma GCC diagnostic pop #endif
537da581d9ff701353baad73cf4213222bdd35ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "DamierMath.h" #include "IndiceTools_GPU.h" #include "DomaineMath_GPU.h" using namespace gpu; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void damier(uchar4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n, float t); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __global__ void damier(uchar4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n, float t) { DamierMath damierMath = DamierMath(n); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; uchar4 color; double x; double y; int pixelI;// in [0,h[ int pixelJ;// in [0,w[ int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &pixelI, &pixelJ); // update (pixelI, pixelJ) // (i,j) domaine ecran // (x,y) domaine math domaineMath.toXY(pixelI, pixelJ, &x, &y); // (i,j) -> (x,y) damierMath.colorXY(&color, x, y, t); // update color ptrDevPixels[s] = color; s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
537da581d9ff701353baad73cf4213222bdd35ba.cu
#include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "DamierMath.h" #include "IndiceTools_GPU.h" #include "DomaineMath_GPU.h" using namespace gpu; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void damier(uchar4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n, float t); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __global__ void damier(uchar4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n, float t) { DamierMath damierMath = DamierMath(n); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; uchar4 color; double x; double y; int pixelI;// in [0,h[ int pixelJ;// in [0,w[ int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &pixelI, &pixelJ); // update (pixelI, pixelJ) // (i,j) domaine ecran // (x,y) domaine math domaineMath.toXY(pixelI, pixelJ, &x, &y); // (i,j) -> (x,y) damierMath.colorXY(&color, x, y, t); // update color ptrDevPixels[s] = color; s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
360966f61f0758430dfdfa9502e76645a6b3b95e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/master/LICENSE #define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/operations.cu", line) #include "awkward/kernels/operations.h" #include <stdio.h> template <typename T, typename C> __global__ void cuda_ListArray_num( C *tonum, const T *fromstarts, const T *fromstops ) { int64_t block_id = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int64_t thread_id = block_id * blockDim.x + threadIdx.x; int64_t start = fromstarts[thread_id]; int64_t stop = fromstops[thread_id]; tonum[thread_id] = (C) (stop - start); } ERROR awkward_ListArray32_num_64( int64_t* tonum, const int32_t* fromstarts, const int32_t* fromstops, int64_t length) { dim3 blocks_per_grid; dim3 threads_per_block; if (length > 1024) { blocks_per_grid = dim3(ceil((length) / 1024.0), 1, 1); threads_per_block = dim3(1024, 1, 1); } else { blocks_per_grid = dim3(1, 1, 1); threads_per_block = dim3(length, 1, 1); } hipLaunchKernelGGL(( cuda_ListArray_num<int32_t, int64_t>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, tonum, fromstarts, fromstops); hipDeviceSynchronize(); return success(); } ERROR awkward_ListArrayU32_num_64( int64_t* tonum, const uint32_t* fromstarts, const uint32_t* fromstops, int64_t length) { dim3 blocks_per_grid; dim3 threads_per_block; if (length > 1024) { blocks_per_grid = dim3(ceil((length) / 1024.0), 1, 1); threads_per_block = dim3(1024, 1, 1); } else { blocks_per_grid = dim3(1, 1, 1); threads_per_block = dim3(length, 1, 1); } hipLaunchKernelGGL(( cuda_ListArray_num<uint32_t, int64_t>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, tonum, fromstarts, fromstops); hipDeviceSynchronize(); return success(); } ERROR awkward_ListArray64_num_64( int64_t* tonum, const int64_t* fromstarts, const int64_t* fromstops, int64_t length) { dim3 blocks_per_grid; dim3 threads_per_block; if (length > 1024) { blocks_per_grid = dim3(ceil((length) / 1024.0), 1, 1); threads_per_block = dim3(1024, 1, 1); } else { blocks_per_grid = dim3(1, 1, 1); threads_per_block = dim3(length, 1, 1); } hipLaunchKernelGGL(( cuda_ListArray_num<int64_t , int64_t>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, tonum, fromstarts, fromstops); hipDeviceSynchronize(); return success(); } template <typename T> __global__ void cuda_RegularArray_num( T* tonum, int64_t size, int64_t length) { int64_t block_id = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int64_t thread_id = block_id * blockDim.x + threadIdx.x; if (thread_id < length) { tonum[thread_id] = size; } } ERROR awkward_RegularArray_num_64( int64_t* tonum, int64_t size, int64_t length) { dim3 blocks_per_grid; dim3 threads_per_block; if (length > 1024) { blocks_per_grid = dim3(ceil((length) / 1024.0), 1, 1); threads_per_block = dim3(1024, 1, 1); } else { blocks_per_grid = dim3(1, 1, 1); threads_per_block = dim3(length, 1, 1); } hipLaunchKernelGGL(( cuda_RegularArray_num<int64_t>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, tonum, size, length); hipDeviceSynchronize(); return success(); }
360966f61f0758430dfdfa9502e76645a6b3b95e.cu
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/master/LICENSE #define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/operations.cu", line) #include "awkward/kernels/operations.h" #include <stdio.h> template <typename T, typename C> __global__ void cuda_ListArray_num( C *tonum, const T *fromstarts, const T *fromstops ) { int64_t block_id = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int64_t thread_id = block_id * blockDim.x + threadIdx.x; int64_t start = fromstarts[thread_id]; int64_t stop = fromstops[thread_id]; tonum[thread_id] = (C) (stop - start); } ERROR awkward_ListArray32_num_64( int64_t* tonum, const int32_t* fromstarts, const int32_t* fromstops, int64_t length) { dim3 blocks_per_grid; dim3 threads_per_block; if (length > 1024) { blocks_per_grid = dim3(ceil((length) / 1024.0), 1, 1); threads_per_block = dim3(1024, 1, 1); } else { blocks_per_grid = dim3(1, 1, 1); threads_per_block = dim3(length, 1, 1); } cuda_ListArray_num<int32_t, int64_t><<<blocks_per_grid, threads_per_block>>>( tonum, fromstarts, fromstops); cudaDeviceSynchronize(); return success(); } ERROR awkward_ListArrayU32_num_64( int64_t* tonum, const uint32_t* fromstarts, const uint32_t* fromstops, int64_t length) { dim3 blocks_per_grid; dim3 threads_per_block; if (length > 1024) { blocks_per_grid = dim3(ceil((length) / 1024.0), 1, 1); threads_per_block = dim3(1024, 1, 1); } else { blocks_per_grid = dim3(1, 1, 1); threads_per_block = dim3(length, 1, 1); } cuda_ListArray_num<uint32_t, int64_t><<<blocks_per_grid, threads_per_block>>>( tonum, fromstarts, fromstops); cudaDeviceSynchronize(); return success(); } ERROR awkward_ListArray64_num_64( int64_t* tonum, const int64_t* fromstarts, const int64_t* fromstops, int64_t length) { dim3 blocks_per_grid; dim3 threads_per_block; if (length > 1024) { blocks_per_grid = dim3(ceil((length) / 1024.0), 1, 1); threads_per_block = dim3(1024, 1, 1); } else { blocks_per_grid = dim3(1, 1, 1); threads_per_block = dim3(length, 1, 1); } cuda_ListArray_num<int64_t , int64_t><<<blocks_per_grid, threads_per_block>>>( tonum, fromstarts, fromstops); cudaDeviceSynchronize(); return success(); } template <typename T> __global__ void cuda_RegularArray_num( T* tonum, int64_t size, int64_t length) { int64_t block_id = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int64_t thread_id = block_id * blockDim.x + threadIdx.x; if (thread_id < length) { tonum[thread_id] = size; } } ERROR awkward_RegularArray_num_64( int64_t* tonum, int64_t size, int64_t length) { dim3 blocks_per_grid; dim3 threads_per_block; if (length > 1024) { blocks_per_grid = dim3(ceil((length) / 1024.0), 1, 1); threads_per_block = dim3(1024, 1, 1); } else { blocks_per_grid = dim3(1, 1, 1); threads_per_block = dim3(length, 1, 1); } cuda_RegularArray_num<int64_t><<<blocks_per_grid, threads_per_block>>>( tonum, size, length); cudaDeviceSynchronize(); return success(); }
8d2574be5b58c6ec19c102a6cb78b32e4c6f6086.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // matrix_ops.cu // // Created by Hovden Group on 8/17/2020. // Copyright 2020 Jonathan Schwartz. All rights reserved. // #include "matrix_ops.h" #include "shared.h" #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/transform_reduce.h> #include <cmath> // What's the best block size? 8? 16? How can we calculate this? #define BLKXSIZE 8 #define MAX(x,y) (x>y?x:y) #define MIN(x,y) (x<y?x:y) #define ABS(x) (x>0?x:-x) #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) __global__ void difference_kernel(float *output, float *vol1, float *vol2, int nx, int ny, int nz) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int ijk = (ny*nz)*i + nz*j + k; if ((i < nx) && (j < ny) && (k < nz)) { output[ijk] = (vol1[ijk] - vol2[ijk]) * (vol1[ijk] - vol2[ijk]); } } __global__ void cuda_positivity_kernel(float *vol, int nx, int ny, int nz) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int ijk = (ny*nz)*i + nz*j + k; if ((i < nx) && (j < ny) && (k < nz)) { if (vol[ijk] < 0.0f) { vol[ijk] = 0.0f; } } } __global__ void cuda_background_kernel(float *vol, int value, int nx, int ny, int nz) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int ijk = (ny*nz)*i + nz*j + k; if ((i < nx) && (j < ny) && (k < nz)) { if (vol[ijk] == 0.0f) { vol[ijk] = value; } } } // MAIN HOST FUNCTION // float cuda_norm(float *input, int nx, int ny, int nz, int gpuIndex) { // Set GPU Index if (gpuIndex != -1) { hipSetDevice(gpuIndex); hipError_t err = hipGetLastError(); // Ignore errors caused by calling hipSetDevice multiple times if (err != hipSuccess && err != hipErrorSetOnActiveProcess) return false; } int volSize = nx * ny * nz; float *d_input; float norm; /*allocate space for volume on device*/ hipMalloc((void**)&d_input,volSize*sizeof(float)); hipMemcpy(d_input,input,volSize*sizeof(float),hipMemcpyHostToDevice); // Measure Norm of Input Volume square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> input_vec(d_input, d_input + volSize); norm = thrust::transform_reduce(input_vec.begin(), input_vec.end(), unary_op, 0.0f, binary_op); hipDeviceSynchronize(); hipPeekAtLastError(); hipFree(d_input); return norm; } // MAIN HOST FUNCTION // float cuda_sum(float *input, int nx, int ny, int nz, int gpuIndex) { // Set GPU Index if (gpuIndex != -1) { hipSetDevice(gpuIndex); hipError_t err = hipGetLastError(); // Ignore errors caused by calling hipSetDevice multiple times if (err != hipSuccess && err != hipErrorSetOnActiveProcess) return false; } int volSize = nx * ny * nz; float *d_input; float sum; /*allocate space for volume on device*/ hipMalloc((void**)&d_input,volSize*sizeof(float)); hipMemcpy(d_input,input,volSize*sizeof(float),hipMemcpyHostToDevice); // Sum up all the Elements thrust::plus<float> binary_op; thrust::device_vector<float> input_vec(d_input, d_input + volSize); sum = thrust::reduce(input_vec.begin(), input_vec.end(), 0.0f, binary_op); hipDeviceSynchronize(); hipPeekAtLastError(); hipFree(d_input); return sum; } float cuda_rmse(float *recon, float *original, int nx, int ny, int nz, int gpuIndex) { // Set GPU Index if (gpuIndex != -1) { hipSetDevice(gpuIndex); hipError_t err = hipGetLastError(); // Ignore errors caused by calling hipSetDevice multiple times if (err != hipSuccess && err != hipErrorSetOnActiveProcess) return false; } int volSize = nx * ny * nz; float *d_recon, *d_original, *d_diff; float rmse; // Block dim3 dimBlock(BLKXSIZE,BLKXSIZE, BLKXSIZE); // Grid dim3 dimGrid(idivup(nx,BLKXSIZE), idivup(ny,BLKXSIZE), idivup(nz,BLKXSIZE)); /*allocate space for volume on device*/ hipMalloc((void**)&d_recon,volSize*sizeof(float)); hipMalloc((void**)&d_original,volSize*sizeof(float)); hipMalloc((void**)&d_diff,volSize*sizeof(float)); hipMemcpy(d_recon,recon,volSize*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(d_original,original,volSize*sizeof(float),hipMemcpyHostToDevice); hipMemset(d_diff, 0.0f, volSize*sizeof(float)); hipLaunchKernelGGL(( difference_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_diff, d_recon, d_original, nx, ny, nz); hipDeviceSynchronize(); hipPeekAtLastError(); // Sum up all the Elements thrust::plus<float> binary_op; thrust::device_vector<float> diff_vec(d_diff, d_diff + volSize); rmse = thrust::reduce(diff_vec.begin(), diff_vec.end(), 0.0f, binary_op); hipDeviceSynchronize(); hipPeekAtLastError(); hipFree(d_recon); hipFree(d_original); hipFree(d_diff); return rmse; // return std::sqrt(rmse/(nx*ny*nz)); } float cuda_euclidean_dist(float *vol1, float *vol2, int nx, int ny, int nz, int gpuIndex) { // Set GPU Index if (gpuIndex != -1) { hipSetDevice(gpuIndex); hipError_t err = hipGetLastError(); // Ignore errors caused by calling hipSetDevice multiple times if (err != hipSuccess && err != hipErrorSetOnActiveProcess) return false; } int volSize = nx * ny * nz; float *d_vol1, *d_vol2, *d_diff; float L2; // Block dim3 dimBlock(BLKXSIZE,BLKXSIZE, BLKXSIZE); // Grid dim3 dimGrid(idivup(nx,BLKXSIZE), idivup(ny,BLKXSIZE), idivup(nz,BLKXSIZE)); /*allocate space for volume on device*/ hipMalloc((void**)&d_vol1,volSize*sizeof(float)); hipMalloc((void**)&d_vol2,volSize*sizeof(float)); hipMalloc((void**)&d_diff,volSize*sizeof(float)); hipMemcpy(d_vol1,vol1,volSize*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(d_vol2,vol2,volSize*sizeof(float),hipMemcpyHostToDevice); hipMemset(d_diff, 0.0f, volSize*sizeof(float)); hipLaunchKernelGGL(( difference_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_diff, d_vol1, d_vol2, nx, ny, nz); hipDeviceSynchronize(); hipPeekAtLastError(); // Sum up all the Elements thrust::plus<float> binary_op; thrust::device_vector<float> diff_vec(d_diff, d_diff + volSize); L2 = thrust::reduce(diff_vec.begin(), diff_vec.end(), 0.0f, binary_op); hipDeviceSynchronize(); hipPeekAtLastError(); hipFree(d_vol1); hipFree(d_vol2); hipFree(d_diff); hipDeviceSynchronize(); return L2; } void cuda_positivity(float *recon, int nx, int ny, int nz, int gpuIndex) { // Set GPU Index if (gpuIndex != -1) { hipSetDevice(gpuIndex); hipError_t err = hipGetLastError(); } int volSize = nx * ny * nz; float *d_recon; // Block dim3 dimBlock(BLKXSIZE,BLKXSIZE, BLKXSIZE); // Grid dim3 dimGrid(idivup(nx,BLKXSIZE), idivup(ny,BLKXSIZE), idivup(nz,BLKXSIZE)); /*allocate space for volume on device*/ hipMalloc((void**)&d_recon,volSize*sizeof(float)); hipMemcpy(d_recon, recon, volSize*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cuda_positivity_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_recon, nx, ny, nz); hipDeviceSynchronize(); hipPeekAtLastError(); hipMemcpy(recon, d_recon, volSize*sizeof(float), hipMemcpyDeviceToHost); hipFree(d_recon); hipDeviceSynchronize(); } void cuda_set_background(float *vol, int value, int nx, int ny, int nz, int gpuIndex) { // Set GPU Index if (gpuIndex != -1) { hipSetDevice(gpuIndex); hipError_t err = hipGetLastError(); } int volSize = nx * ny * nz; float *d_vol; // Block dim3 dimBlock(BLKXSIZE,BLKXSIZE, BLKXSIZE); // Grid dim3 dimGrid(idivup(nx,BLKXSIZE), idivup(ny,BLKXSIZE), idivup(nz,BLKXSIZE)); // allocate space for volume on device hipMalloc((void**)&d_vol,volSize*sizeof(float)); hipMemcpy(d_vol, vol, volSize*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cuda_background_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_vol, value, nx, ny, nz); hipDeviceSynchronize(); hipPeekAtLastError(); hipMemcpy(vol, d_vol, volSize*sizeof(float), hipMemcpyDeviceToHost); hipFree(d_vol); hipDeviceSynchronize(); }
8d2574be5b58c6ec19c102a6cb78b32e4c6f6086.cu
// // matrix_ops.cu // // Created by Hovden Group on 8/17/2020. // Copyright © 2020 Jonathan Schwartz. All rights reserved. // #include "matrix_ops.h" #include "shared.h" #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/transform_reduce.h> #include <cmath> // What's the best block size? 8? 16? How can we calculate this? #define BLKXSIZE 8 #define MAX(x,y) (x>y?x:y) #define MIN(x,y) (x<y?x:y) #define ABS(x) (x>0?x:-x) #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) __global__ void difference_kernel(float *output, float *vol1, float *vol2, int nx, int ny, int nz) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int ijk = (ny*nz)*i + nz*j + k; if ((i < nx) && (j < ny) && (k < nz)) { output[ijk] = (vol1[ijk] - vol2[ijk]) * (vol1[ijk] - vol2[ijk]); } } __global__ void cuda_positivity_kernel(float *vol, int nx, int ny, int nz) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int ijk = (ny*nz)*i + nz*j + k; if ((i < nx) && (j < ny) && (k < nz)) { if (vol[ijk] < 0.0f) { vol[ijk] = 0.0f; } } } __global__ void cuda_background_kernel(float *vol, int value, int nx, int ny, int nz) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int ijk = (ny*nz)*i + nz*j + k; if ((i < nx) && (j < ny) && (k < nz)) { if (vol[ijk] == 0.0f) { vol[ijk] = value; } } } // MAIN HOST FUNCTION // float cuda_norm(float *input, int nx, int ny, int nz, int gpuIndex) { // Set GPU Index if (gpuIndex != -1) { cudaSetDevice(gpuIndex); cudaError_t err = cudaGetLastError(); // Ignore errors caused by calling cudaSetDevice multiple times if (err != cudaSuccess && err != cudaErrorSetOnActiveProcess) return false; } int volSize = nx * ny * nz; float *d_input; float norm; /*allocate space for volume on device*/ cudaMalloc((void**)&d_input,volSize*sizeof(float)); cudaMemcpy(d_input,input,volSize*sizeof(float),cudaMemcpyHostToDevice); // Measure Norm of Input Volume square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> input_vec(d_input, d_input + volSize); norm = thrust::transform_reduce(input_vec.begin(), input_vec.end(), unary_op, 0.0f, binary_op); cudaDeviceSynchronize(); cudaPeekAtLastError(); cudaFree(d_input); return norm; } // MAIN HOST FUNCTION // float cuda_sum(float *input, int nx, int ny, int nz, int gpuIndex) { // Set GPU Index if (gpuIndex != -1) { cudaSetDevice(gpuIndex); cudaError_t err = cudaGetLastError(); // Ignore errors caused by calling cudaSetDevice multiple times if (err != cudaSuccess && err != cudaErrorSetOnActiveProcess) return false; } int volSize = nx * ny * nz; float *d_input; float sum; /*allocate space for volume on device*/ cudaMalloc((void**)&d_input,volSize*sizeof(float)); cudaMemcpy(d_input,input,volSize*sizeof(float),cudaMemcpyHostToDevice); // Sum up all the Elements thrust::plus<float> binary_op; thrust::device_vector<float> input_vec(d_input, d_input + volSize); sum = thrust::reduce(input_vec.begin(), input_vec.end(), 0.0f, binary_op); cudaDeviceSynchronize(); cudaPeekAtLastError(); cudaFree(d_input); return sum; } float cuda_rmse(float *recon, float *original, int nx, int ny, int nz, int gpuIndex) { // Set GPU Index if (gpuIndex != -1) { cudaSetDevice(gpuIndex); cudaError_t err = cudaGetLastError(); // Ignore errors caused by calling cudaSetDevice multiple times if (err != cudaSuccess && err != cudaErrorSetOnActiveProcess) return false; } int volSize = nx * ny * nz; float *d_recon, *d_original, *d_diff; float rmse; // Block dim3 dimBlock(BLKXSIZE,BLKXSIZE, BLKXSIZE); // Grid dim3 dimGrid(idivup(nx,BLKXSIZE), idivup(ny,BLKXSIZE), idivup(nz,BLKXSIZE)); /*allocate space for volume on device*/ cudaMalloc((void**)&d_recon,volSize*sizeof(float)); cudaMalloc((void**)&d_original,volSize*sizeof(float)); cudaMalloc((void**)&d_diff,volSize*sizeof(float)); cudaMemcpy(d_recon,recon,volSize*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_original,original,volSize*sizeof(float),cudaMemcpyHostToDevice); cudaMemset(d_diff, 0.0f, volSize*sizeof(float)); difference_kernel<<<dimGrid,dimBlock>>>(d_diff, d_recon, d_original, nx, ny, nz); cudaDeviceSynchronize(); cudaPeekAtLastError(); // Sum up all the Elements thrust::plus<float> binary_op; thrust::device_vector<float> diff_vec(d_diff, d_diff + volSize); rmse = thrust::reduce(diff_vec.begin(), diff_vec.end(), 0.0f, binary_op); cudaDeviceSynchronize(); cudaPeekAtLastError(); cudaFree(d_recon); cudaFree(d_original); cudaFree(d_diff); return rmse; // return std::sqrt(rmse/(nx*ny*nz)); } float cuda_euclidean_dist(float *vol1, float *vol2, int nx, int ny, int nz, int gpuIndex) { // Set GPU Index if (gpuIndex != -1) { cudaSetDevice(gpuIndex); cudaError_t err = cudaGetLastError(); // Ignore errors caused by calling cudaSetDevice multiple times if (err != cudaSuccess && err != cudaErrorSetOnActiveProcess) return false; } int volSize = nx * ny * nz; float *d_vol1, *d_vol2, *d_diff; float L2; // Block dim3 dimBlock(BLKXSIZE,BLKXSIZE, BLKXSIZE); // Grid dim3 dimGrid(idivup(nx,BLKXSIZE), idivup(ny,BLKXSIZE), idivup(nz,BLKXSIZE)); /*allocate space for volume on device*/ cudaMalloc((void**)&d_vol1,volSize*sizeof(float)); cudaMalloc((void**)&d_vol2,volSize*sizeof(float)); cudaMalloc((void**)&d_diff,volSize*sizeof(float)); cudaMemcpy(d_vol1,vol1,volSize*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_vol2,vol2,volSize*sizeof(float),cudaMemcpyHostToDevice); cudaMemset(d_diff, 0.0f, volSize*sizeof(float)); difference_kernel<<<dimGrid,dimBlock>>>(d_diff, d_vol1, d_vol2, nx, ny, nz); cudaDeviceSynchronize(); cudaPeekAtLastError(); // Sum up all the Elements thrust::plus<float> binary_op; thrust::device_vector<float> diff_vec(d_diff, d_diff + volSize); L2 = thrust::reduce(diff_vec.begin(), diff_vec.end(), 0.0f, binary_op); cudaDeviceSynchronize(); cudaPeekAtLastError(); cudaFree(d_vol1); cudaFree(d_vol2); cudaFree(d_diff); cudaDeviceSynchronize(); return L2; } void cuda_positivity(float *recon, int nx, int ny, int nz, int gpuIndex) { // Set GPU Index if (gpuIndex != -1) { cudaSetDevice(gpuIndex); cudaError_t err = cudaGetLastError(); } int volSize = nx * ny * nz; float *d_recon; // Block dim3 dimBlock(BLKXSIZE,BLKXSIZE, BLKXSIZE); // Grid dim3 dimGrid(idivup(nx,BLKXSIZE), idivup(ny,BLKXSIZE), idivup(nz,BLKXSIZE)); /*allocate space for volume on device*/ cudaMalloc((void**)&d_recon,volSize*sizeof(float)); cudaMemcpy(d_recon, recon, volSize*sizeof(float), cudaMemcpyHostToDevice); cuda_positivity_kernel<<<dimGrid,dimBlock>>>(d_recon, nx, ny, nz); cudaDeviceSynchronize(); cudaPeekAtLastError(); cudaMemcpy(recon, d_recon, volSize*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_recon); cudaDeviceSynchronize(); } void cuda_set_background(float *vol, int value, int nx, int ny, int nz, int gpuIndex) { // Set GPU Index if (gpuIndex != -1) { cudaSetDevice(gpuIndex); cudaError_t err = cudaGetLastError(); } int volSize = nx * ny * nz; float *d_vol; // Block dim3 dimBlock(BLKXSIZE,BLKXSIZE, BLKXSIZE); // Grid dim3 dimGrid(idivup(nx,BLKXSIZE), idivup(ny,BLKXSIZE), idivup(nz,BLKXSIZE)); // allocate space for volume on device cudaMalloc((void**)&d_vol,volSize*sizeof(float)); cudaMemcpy(d_vol, vol, volSize*sizeof(float), cudaMemcpyHostToDevice); cuda_background_kernel<<<dimGrid,dimBlock>>>(d_vol, value, nx, ny, nz); cudaDeviceSynchronize(); cudaPeekAtLastError(); cudaMemcpy(vol, d_vol, volSize*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_vol); cudaDeviceSynchronize(); }
9ee8f45f8cf8e27f075de3efd1c26175e0c47a10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Authors: Anant Shah and Abhishek Nair Roll No: EE16B105 and EE16B060 */ /* Code to implement the convolution operation on images with a regular sparsity pattern. */ /* --------------------------- CONSTRAINTS ---------------------------------- Input Tensor Format : N x H x W x C Input Tensor Size Limit : 512 x 512 x C Batch Size : Multiple of 2 */ /* --------------------------- LIBRARIES ---------------------------------- */ #include<stdio.h> #include<cuda.h> /* --------------------------- GLOBAL PARAMETERS -------------------------- */ #define BLOCK_SIZE 16 #define BLOCK_DIM_Z 4 #define TILE_WIDTH 16 /* Macro to check for errors in calls to the CUDA API */ #define checkError(e) { \ if(e!=hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \ exit(0); \ } \ } /* -------------------------- CUDA KERNELS ----------------------------------- */ __device__ bool checksparse(int *d_mask,int cx,int cy,int ni,int H,int W,int K_H,int K_W,int S_BLOCK){ /* Kernel Function to check if an element in the tensor lies within a sparse block or not. We do this by comparing the realtive position of the element in the mask matrix */ /* coordinates of this thread in the mask array */ int s_x = cx/S_BLOCK; /* cx is the x-coordinate of the element in the image */ int s_y = cy/S_BLOCK; /* cy is the y-coordinate of the element in the image */ int x = 0; int y = 0; /* Check if all the elements surrounding the center element are zero by comparing it with the mask values. We check those surrounding elements which are valid for convolution with the kernel */ for( int l=-(K_H-1)/2; l <= (K_H-1)/2; l++ ) for( int p=-(K_W-1)/2; p <= (K_W-1)/2; p++ ){ x = cx + l; y = cy + p; s_x = x/S_BLOCK; /* x-coordinate of block in the mask which the element belongs to */ s_y = y/S_BLOCK; /* y-coordinate of block in the mask which the element belongs to */ if( d_mask[ni*(H/S_BLOCK)*(W/S_BLOCK)+s_x*(W/S_BLOCK)+s_y] == 1 ){ return false; } } return true; } /* ***************************************************************************************** */ __global__ void im2col(float *d_tensor, int *d_mask, float *d_mat, unsigned int *row_address, int* d_row_map, int num_images,int N,int H,int W,int C,int K_N,int K_H,int K_W,int S_BLOCK){ /* Kernel Function to convert the sparse tensor to a matrix. We create the matrix such that all the surrounding elements required for convolution are unrolled into a row. We ignore those elements which are completely surrounded by zeros. These element are found with the help of the checksparse() function. Once we store the elements in a matrix, we need a map which stores the coordinates and image id from which that row was produced. This map will be useful when we need to convert the matrix back into the tensor form after multiplication. */ int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int bx = blockIdx.x; int by = blockIdx.y; int bz = blockIdx.z; int gx = bx*blockDim.x+tx; int gy = by*blockDim.y+ty; int gz = bz*blockDim.z+tz; //checking for validity of thread if(gz<num_images){ if( (gx < H-K_H+1) && (gy < W-K_W+1) ){ int centerx = gx + (K_H-1)/2; /* Corresponding x-coordinate in the acutal tensor */ int centery = gy + (K_W-1)/2; /* Corresponding y-coordinate in the actual tensor */ /* Check if the element needs to be added into the matrix or not */ if( !checksparse(d_mask,centerx,centery,gz,H,W,K_H,K_W,S_BLOCK) ){ unsigned int row_index = atomicAdd( row_address, 1 ); /* Update the number of rows in the matrix */ int col_index = 0; /* Initialize the column index to 0 as the elements are being added along a row */ /* Load the corresponding elements required for convolution into the matrix */ for( int l=-(K_H-1)/2; l <= (K_H-1)/2; l++ ) for( int p=-(K_W-1)/2; p <= (K_W-1)/2; p++ ) for( int q=0; q < C; q++){ /* mat_val = mask_val?mat_val:0 */ d_mat[row_index*K_H*K_W*C+col_index] = d_mask[gz*(H/S_BLOCK)*(W/S_BLOCK)+((int)((centerx+l)/S_BLOCK))*(W/S_BLOCK)+((int)((centery+p)/S_BLOCK))]?d_tensor[gz*H*W*C+(centerx+l)*W*C+(centery+p)*C+q]:0; col_index += 1; } d_row_map[row_index*3+0] = gx; /* Store the original x-coordinate corresponding to a row into a map */ d_row_map[row_index*3+1] = gy; /* Store the original y-coordinate corresponding to a row into a map */ d_row_map[row_index*3+2] = gz; /* Store the image corresponding to a row in a map */ } } } } /* ***************************************************************************************** */ __global__ void col2im(float *o_mat, int *d_row_map, unsigned int *row_num, float *op_img,int H,int W,int K_N,int K_H,int K_W){ /* Kernel Function to convert the matrix multiplication output back to the tensor. We will be utilizing the map genereated earlier to reorder the elements appropiately */ int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int gx = bx*blockDim.x+tx; int gy = by*blockDim.y+ty; int ystride = blockDim.y*gridDim.y; /* Iterate over the relevant rows in the output to store it into the output tensor */ while( gy < *row_num ){ //validity check if( gx < K_N ){ op_img[d_row_map[gy*3+2]*(H-K_H+1)*(W-K_W+1)*(K_N) + d_row_map[gy*3+0]*(W-K_W+1)*(K_N) + d_row_map[gy*3+1]*(K_N) + gx] = o_mat[gy*K_N+gx]; } gy += ystride; } } /* ***************************************************************************************** */ __global__ void sparsematmul(float *image_mat,float *kernel_mat,unsigned int *width,int num_kernels,float *output_mat,int K_H,int K_W,int C){ /* Kernel Function to perform matrix multiplication. We utilize shared memory and tiling to perform the matrix multiplication. */ int tx = threadIdx.x; /*Thread-ID in the x-direction */ int ty = threadIdx.y; /*Thread-ID in the y-direction */ __shared__ float image_mat_s[TILE_WIDTH][TILE_WIDTH]; /* Shared memory to be used by threads in a block */ __shared__ float kernel_mat_s[TILE_WIDTH][TILE_WIDTH]; /* Shared memory to be used by therads in a block */ int row = blockIdx.y*blockDim.y + ty; /* row in the output matrix */ int col = blockIdx.x*blockDim.x + tx; /* column in the output matrix */ float pSum = 0.0; for(int m=0;m<(K_W*K_H*C+TILE_WIDTH-1)/TILE_WIDTH;m++){ /* Load Pahse : Load elements cooperatively into the shared memeory */ if(row<*width && (m*TILE_WIDTH+tx)<(K_W*K_H*C) ) image_mat_s[ty][tx] = image_mat[row*K_W*K_H*C+m*TILE_WIDTH+tx]; if( (ty+m*TILE_WIDTH)<(K_W*K_H*C) && col<num_kernels ) kernel_mat_s[ty][tx] = kernel_mat[(ty+m*TILE_WIDTH)*num_kernels+col]; /* This is assuming that the tile is a sqaure */ __syncthreads(); for(int i=0;i<TILE_WIDTH;i++){ pSum += image_mat_s[ty][i]*kernel_mat_s[i][tx]; } __syncthreads(); image_mat_s[ty][tx] = 0.0; /* Setting the elements in the shared memory back to 0. This takes care of the corner cases where junk values are stored */ kernel_mat_s[ty][tx] = 0.0; } if(row<*width && col<num_kernels) output_mat[row*num_kernels+col] = pSum; /* Load the result into the output matrix */ } /* ----------------------------- Utility Functions ------------------------------------------- */ void fillTensor(float *tensor,int N,int H,int W,int C){ /* Utility function to fill the tensor */ float (*ctensor)[H][W][C] = (float (*)[H][W][C])tensor; for(int i=0;i<N;i++){ for(int j=0;j<H;j++){ for(int k=0;k<W;k++){ for(int l=0;l<C;l++){ ctensor[i][j][k][l] = i*W*H*C+j*W*C+k*C+l;//(i*l+0.2*j+0.4*k+2*l); } } } } } /* ***************************************************************************************** */ void fillKernel(float *h_kernel_mat,int K_N,int K_H,int K_W,int C){ /* Utility function to fill the kernel */ for(int i=0;i<K_H*K_W*C;i++){ for(int j=0;j<K_N;j++){ if(j%2 == 0) h_kernel_mat[i*K_N+j] = 1;//(i*j+0.2*i+0.3*j); else h_kernel_mat[i*K_N+j] = 1; } } } /* ***************************************************************************************** */ void fillMask(int *mask,int N,int H,int W,int S_BLOCK,int sparsity_perc){ /* Utility function to define the mask. Based on the sparsity, the first x elements in each image are set to 0 */ for(int i=0;i<N;i++){ for(int j=0;j<(H/S_BLOCK);j++){ for(int k=0;k<(W/S_BLOCK);k++){ if((j+k) < ((float)sparsity_perc/100)*((H/S_BLOCK)*(W/S_BLOCK))) mask[i*(H/S_BLOCK)*(W/S_BLOCK)+j*(W/S_BLOCK)+k] = 0; else mask[i*(H/S_BLOCK)*(W/S_BLOCK)+j*(W/S_BLOCK)+k] = 1; } } } } /* ***************************************************************************************** */ void print_tensor(float *h_out_tensor,int N,int H,int W,int K_N,int K_H,int K_W){ /* Utility function to print the output tensor to a file */ FILE *fp = fopen("Block_Sparse_Convolution_Output","w"); for(int i=0;i<N;i++){ for(int j=0;j<(H-K_H+1);j++){ for(int k=0;k<(W-K_W+1);k++){ for(int l=0;l<K_N;l++){ fprintf(fp,"%4.4f ",h_out_tensor[i*(H-K_H+1)*(W-K_W+1)*K_N + j*(W-K_W+1)*K_N + k*K_N + l]); } } } fprintf(fp,"\n"); } fclose(fp); } int main(int argc,char **argv){ if(argc!=10){ printf("error : Invalid number of arguments\n"); printf("Format : ./exec num_images img_height img_width img_channels kernel_height kernel_width num_kernels sparsity_percentage sparsity_block_size\n"); exit(EXIT_FAILURE); } int N = atoi(argv[1]); /* Number of images */ int H = atoi(argv[2]); /* Height of one image */ int W = atoi(argv[3]); /* Width of one image */ int C = atoi(argv[4]); /* Number of channels in the image */ int K_H = atoi(argv[5]); /* Height of the kernel */ int K_W = atoi(argv[6]); /* Widht of the kernel */ int K_N = atoi(argv[7]); /* Number of kernels */ int sparsity_perc = atoi(argv[8]); /* Sparsity percentage in the mask */ int S_BLOCK = atoi(argv[9]); /* Size of a sparse block */ float *tensor; /* Pointer to the tensor on the host */ checkError(hipHostMalloc( (void **) &tensor, sizeof(float)*N*H*W*C, 0 )); /* Allocating pinned memory on the CPU so as to facilitate streams */ float *kernel = (float *)malloc(sizeof(float)*K_N*K_H*K_W*C); /* Allocate memory for the kernel on the CPU */ float *h_out_tensor = (float *)malloc(sizeof(float)*N*(W-K_W+1)*(H-K_H+1)*K_N) ; // A pointer to the output tensor on the host int num_images = 2; /* Number of images handled by each stream */ if( (H%S_BLOCK != 0) && (W%S_BLOCK != 0)){ printf("Invalid dimensions for input and sparsity blocks. Input tensor dimensions must be integral multiples of sparsity block dimensions\n"); exit(EXIT_FAILURE); } if( N%num_images!=0 ){ printf("error : Number of images is not a multiple of that handled by each stream. Please enter a mulitple of %d as number of images \n",num_images); exit(EXIT_FAILURE); } int *mask; /* Define a pointer to the mask */ checkError( hipHostMalloc((void **)&mask, sizeof(float)*N*(H/S_BLOCK)*(W/S_BLOCK), 0)); /* Allocate pinned memory for the mask to facilitate streams */ fillTensor(tensor,N,H,W,C); /* Fill the tensor from the syntehtic input */ fillKernel(kernel,K_N,K_H,K_W,C); /* Fill the kernel from the synthetic input */ fillMask(mask,N,H,W,S_BLOCK,sparsity_perc); /* Fill the mask array */ float *d_tensor; /* A pointer to the tensor on the device */ float *d_kernel; /* A pointer to the kernel on the device */ float *d_ker_mat; //the matrix for the kernel generated by im2col_ker int *d_mask; /* A pointer to the mask on the device */ float *d_mat; // d_mat is the matrix used for matrix multiplication int *d_row_map; /* A pointer to the map array; which holds the coordinate ids for each row in the matrix */ float *d_out_mat; // Output matrix float *d_out_tensor; // Pointer to the output tensor on the device unsigned int *row_address; /* A pointer to keep a track of how many rows have been added to the matrix per stream */ int NUM_STREAMS = (N+num_images-1)/num_images; /* Number of streams defined to handle all the images */ hipEvent_t start,stop; /* CUDA events to time the program */ hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); /*********** Allocate memory on the device for each of the matrices ***************/ checkError(hipMalloc((void **)&d_tensor, sizeof(float)*N*H*W*C )); checkError(hipMalloc((void **)&d_kernel, sizeof(float)*K_N*K_H*K_W*C)); checkError(hipMalloc((void **)&d_mask, sizeof(int)*N*(H/S_BLOCK)*(W/S_BLOCK) )); //allocating data for the entire dense matrix //need to find a way to know how many columns there will be in the matrix checkError(hipMalloc((void **)&d_mat, sizeof(float)*K_H*K_W*C*(H-K_H+1)*(W-K_W+1)*N)); checkError(hipMalloc((void **)&d_row_map, sizeof(int)*(H-K_H+1)*(W-K_W+1)*N*3)); checkError(hipMalloc((void **)&d_out_tensor,sizeof(float)*N*(H-K_H+1)*(W-K_W+1)*K_N)); //allocating memory for N row_addresses checkError(hipMalloc((void **)&row_address, sizeof(unsigned int)*NUM_STREAMS)); checkError(hipMalloc((void **)&d_ker_mat, sizeof(float)*K_N*K_H*K_W*C)); checkError(hipMemcpy( d_kernel, kernel, sizeof(float)*K_N*K_H*K_W*C, hipMemcpyHostToDevice )); checkError(hipMemset( d_mat, 0, sizeof(float)*K_H*K_W*C*(H-K_H+1)*(W-K_W+1)*N )); checkError(hipMemset( row_address, 0, sizeof(unsigned int)*NUM_STREAMS )); checkError(hipMalloc( (void **)&d_out_mat, sizeof(float)*K_N*(H-K_H+1)*(W-K_W+1)*N)); /**********************************************************************************/ dim3 threads(BLOCK_SIZE, BLOCK_SIZE, BLOCK_DIM_Z); /* Block dimensions for the im2col operation */ dim3 matthreads(BLOCK_SIZE,BLOCK_SIZE,(num_images+BLOCK_DIM_Z-1)/BLOCK_DIM_Z); /* Grid dimensions for the im2col operation */ dim3 blocks((H+BLOCK_SIZE-1)/BLOCK_SIZE,(W+BLOCK_SIZE-1)/BLOCK_SIZE); /*Grid definition for the im2col kernel operation */ dim3 matblocks((K_N+BLOCK_SIZE-1)/BLOCK_SIZE,((H-K_H+1)*(W-K_W+1)*num_images+BLOCK_SIZE-1)/BLOCK_SIZE); // 2-D grid dimension to perform the matrix multiplication hipStream_t streams[NUM_STREAMS]; /* Declaring a set of CUDA streams */ for( int i=0; i<NUM_STREAMS; i++ ) hipStreamCreate(&streams[i]); /* Initializing a set of streams to work on a set of each image */ if(N<num_images) num_images = N; /* To take care of the case of illegal memory access if the number of images are less than that assigned to each stream */ /**************** Initialize a set of offsets for the streams *****************/ int offset = 0; int mask_offset = 0; int mat_offset = 0; int map_offset = 0; int o_offset = 0; int om_offset = 0; /*****************************************************************************/ for(int j=0; j<NUM_STREAMS; j++){ /* Initialize a set of off-sets for each stream */ offset = j*H*W*C*num_images; mask_offset = j*(H/S_BLOCK)*(W/S_BLOCK)*num_images; mat_offset = K_H*K_W*C*(H-K_H+1)*(W-K_W+1)*j*num_images; map_offset = 3*(H-K_H+1)*(W-K_W+1)*j*num_images; o_offset = (H-K_H+1)*(W-K_W+1)*K_N*j*num_images; om_offset = K_N*(H-K_H+1)*(W-K_W+1)*j*num_images; /* Asynchronously copy the images and kernels to the device */ checkError(hipMemcpyAsync( &d_tensor[offset], &tensor[offset], sizeof(float)*H*W*C*num_images, hipMemcpyHostToDevice, streams[j] )); checkError(hipMemcpyAsync( &d_mask[mask_offset], &mask[mask_offset], sizeof(int)*(H/S_BLOCK)*(W/S_BLOCK)*num_images, hipMemcpyHostToDevice, streams[j] )); /* Transform the tensor into the matrix form */ hipLaunchKernelGGL(( im2col), dim3(blocks), dim3(threads), 0, streams[j], &d_tensor[offset],&d_mask[mask_offset], &d_mat[mat_offset], &row_address[j], &d_row_map[map_offset], num_images,N,H,W,C,K_N,K_H,K_W,S_BLOCK); hipError_t error = hipGetLastError(); if( error != hipSuccess ) { printf("Error in launching kernel1 in stream %d\n",j); } checkError(error); /* Perform the matrix multiplication */ hipLaunchKernelGGL(( sparsematmul), dim3(matblocks),dim3(matthreads), 0, streams[j], &d_mat[mat_offset], d_kernel, &row_address[j], K_N, &d_out_mat[om_offset],K_H,K_W,C); error = hipGetLastError(); if( error != hipSuccess ) { printf("Error in launching kernel 2 in stream %d\n",j); } checkError(error); /* Convert the matrix multiplication output back to the tensor */ hipLaunchKernelGGL(( col2im), dim3(matblocks),dim3(matthreads), 0 , streams[j], &d_out_mat[om_offset], &d_row_map[map_offset], &row_address[j], &d_out_tensor[o_offset],H,W,K_N,K_H,K_W); error = hipGetLastError(); if( error != hipSuccess ) { printf("Error in launching kernel 3 in stream %d\n",j); } checkError(error); /* Asynchronously copy the tensor from the device to the host */ checkError(hipMemcpyAsync(&h_out_tensor[o_offset], &d_out_tensor[o_offset], sizeof(float)*(H-K_H+1)*(W-K_W+1)*K_N*num_images, hipMemcpyDeviceToHost,streams[j])); } hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); print_tensor(h_out_tensor,N,H,W,K_N,K_H,K_W); float run_time = 0.0; hipEventElapsedTime(&run_time,start,stop); //printf("%d %d %d %d %d %d %d %d %.4f\n",N,H,W,C,K_N,K_H,K_W,sparsity_perc,run_time); printf("Output : Block_Sparse_Convolution_Output\nRunning Time : %f (ms) \nConfiguration Parameters ::\nBatch Size : %d\nImage Height : %d\nImage Width : %d\nNo. of Channels : %d\nKernel Height : %d\nKernel Width : %d\nNo. of Kernels : %d\nPercentage Sparsity : %d\n ",run_time,N,H,W,C,K_H,K_W,K_N,sparsity_perc); for( int i=0; i<NUM_STREAMS; i++ ) hipStreamDestroy(streams[i]); return 0; }
9ee8f45f8cf8e27f075de3efd1c26175e0c47a10.cu
/* Authors: Anant Shah and Abhishek Nair Roll No: EE16B105 and EE16B060 */ /* Code to implement the convolution operation on images with a regular sparsity pattern. */ /* --------------------------- CONSTRAINTS ---------------------------------- Input Tensor Format : N x H x W x C Input Tensor Size Limit : 512 x 512 x C Batch Size : Multiple of 2 */ /* --------------------------- LIBRARIES ---------------------------------- */ #include<stdio.h> #include<cuda.h> /* --------------------------- GLOBAL PARAMETERS -------------------------- */ #define BLOCK_SIZE 16 #define BLOCK_DIM_Z 4 #define TILE_WIDTH 16 /* Macro to check for errors in calls to the CUDA API */ #define checkError(e) { \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(0); \ } \ } /* -------------------------- CUDA KERNELS ----------------------------------- */ __device__ bool checksparse(int *d_mask,int cx,int cy,int ni,int H,int W,int K_H,int K_W,int S_BLOCK){ /* Kernel Function to check if an element in the tensor lies within a sparse block or not. We do this by comparing the realtive position of the element in the mask matrix */ /* coordinates of this thread in the mask array */ int s_x = cx/S_BLOCK; /* cx is the x-coordinate of the element in the image */ int s_y = cy/S_BLOCK; /* cy is the y-coordinate of the element in the image */ int x = 0; int y = 0; /* Check if all the elements surrounding the center element are zero by comparing it with the mask values. We check those surrounding elements which are valid for convolution with the kernel */ for( int l=-(K_H-1)/2; l <= (K_H-1)/2; l++ ) for( int p=-(K_W-1)/2; p <= (K_W-1)/2; p++ ){ x = cx + l; y = cy + p; s_x = x/S_BLOCK; /* x-coordinate of block in the mask which the element belongs to */ s_y = y/S_BLOCK; /* y-coordinate of block in the mask which the element belongs to */ if( d_mask[ni*(H/S_BLOCK)*(W/S_BLOCK)+s_x*(W/S_BLOCK)+s_y] == 1 ){ return false; } } return true; } /* ***************************************************************************************** */ __global__ void im2col(float *d_tensor, int *d_mask, float *d_mat, unsigned int *row_address, int* d_row_map, int num_images,int N,int H,int W,int C,int K_N,int K_H,int K_W,int S_BLOCK){ /* Kernel Function to convert the sparse tensor to a matrix. We create the matrix such that all the surrounding elements required for convolution are unrolled into a row. We ignore those elements which are completely surrounded by zeros. These element are found with the help of the checksparse() function. Once we store the elements in a matrix, we need a map which stores the coordinates and image id from which that row was produced. This map will be useful when we need to convert the matrix back into the tensor form after multiplication. */ int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int bx = blockIdx.x; int by = blockIdx.y; int bz = blockIdx.z; int gx = bx*blockDim.x+tx; int gy = by*blockDim.y+ty; int gz = bz*blockDim.z+tz; //checking for validity of thread if(gz<num_images){ if( (gx < H-K_H+1) && (gy < W-K_W+1) ){ int centerx = gx + (K_H-1)/2; /* Corresponding x-coordinate in the acutal tensor */ int centery = gy + (K_W-1)/2; /* Corresponding y-coordinate in the actual tensor */ /* Check if the element needs to be added into the matrix or not */ if( !checksparse(d_mask,centerx,centery,gz,H,W,K_H,K_W,S_BLOCK) ){ unsigned int row_index = atomicAdd( row_address, 1 ); /* Update the number of rows in the matrix */ int col_index = 0; /* Initialize the column index to 0 as the elements are being added along a row */ /* Load the corresponding elements required for convolution into the matrix */ for( int l=-(K_H-1)/2; l <= (K_H-1)/2; l++ ) for( int p=-(K_W-1)/2; p <= (K_W-1)/2; p++ ) for( int q=0; q < C; q++){ /* mat_val = mask_val?mat_val:0 */ d_mat[row_index*K_H*K_W*C+col_index] = d_mask[gz*(H/S_BLOCK)*(W/S_BLOCK)+((int)((centerx+l)/S_BLOCK))*(W/S_BLOCK)+((int)((centery+p)/S_BLOCK))]?d_tensor[gz*H*W*C+(centerx+l)*W*C+(centery+p)*C+q]:0; col_index += 1; } d_row_map[row_index*3+0] = gx; /* Store the original x-coordinate corresponding to a row into a map */ d_row_map[row_index*3+1] = gy; /* Store the original y-coordinate corresponding to a row into a map */ d_row_map[row_index*3+2] = gz; /* Store the image corresponding to a row in a map */ } } } } /* ***************************************************************************************** */ __global__ void col2im(float *o_mat, int *d_row_map, unsigned int *row_num, float *op_img,int H,int W,int K_N,int K_H,int K_W){ /* Kernel Function to convert the matrix multiplication output back to the tensor. We will be utilizing the map genereated earlier to reorder the elements appropiately */ int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int gx = bx*blockDim.x+tx; int gy = by*blockDim.y+ty; int ystride = blockDim.y*gridDim.y; /* Iterate over the relevant rows in the output to store it into the output tensor */ while( gy < *row_num ){ //validity check if( gx < K_N ){ op_img[d_row_map[gy*3+2]*(H-K_H+1)*(W-K_W+1)*(K_N) + d_row_map[gy*3+0]*(W-K_W+1)*(K_N) + d_row_map[gy*3+1]*(K_N) + gx] = o_mat[gy*K_N+gx]; } gy += ystride; } } /* ***************************************************************************************** */ __global__ void sparsematmul(float *image_mat,float *kernel_mat,unsigned int *width,int num_kernels,float *output_mat,int K_H,int K_W,int C){ /* Kernel Function to perform matrix multiplication. We utilize shared memory and tiling to perform the matrix multiplication. */ int tx = threadIdx.x; /*Thread-ID in the x-direction */ int ty = threadIdx.y; /*Thread-ID in the y-direction */ __shared__ float image_mat_s[TILE_WIDTH][TILE_WIDTH]; /* Shared memory to be used by threads in a block */ __shared__ float kernel_mat_s[TILE_WIDTH][TILE_WIDTH]; /* Shared memory to be used by therads in a block */ int row = blockIdx.y*blockDim.y + ty; /* row in the output matrix */ int col = blockIdx.x*blockDim.x + tx; /* column in the output matrix */ float pSum = 0.0; for(int m=0;m<(K_W*K_H*C+TILE_WIDTH-1)/TILE_WIDTH;m++){ /* Load Pahse : Load elements cooperatively into the shared memeory */ if(row<*width && (m*TILE_WIDTH+tx)<(K_W*K_H*C) ) image_mat_s[ty][tx] = image_mat[row*K_W*K_H*C+m*TILE_WIDTH+tx]; if( (ty+m*TILE_WIDTH)<(K_W*K_H*C) && col<num_kernels ) kernel_mat_s[ty][tx] = kernel_mat[(ty+m*TILE_WIDTH)*num_kernels+col]; /* This is assuming that the tile is a sqaure */ __syncthreads(); for(int i=0;i<TILE_WIDTH;i++){ pSum += image_mat_s[ty][i]*kernel_mat_s[i][tx]; } __syncthreads(); image_mat_s[ty][tx] = 0.0; /* Setting the elements in the shared memory back to 0. This takes care of the corner cases where junk values are stored */ kernel_mat_s[ty][tx] = 0.0; } if(row<*width && col<num_kernels) output_mat[row*num_kernels+col] = pSum; /* Load the result into the output matrix */ } /* ----------------------------- Utility Functions ------------------------------------------- */ void fillTensor(float *tensor,int N,int H,int W,int C){ /* Utility function to fill the tensor */ float (*ctensor)[H][W][C] = (float (*)[H][W][C])tensor; for(int i=0;i<N;i++){ for(int j=0;j<H;j++){ for(int k=0;k<W;k++){ for(int l=0;l<C;l++){ ctensor[i][j][k][l] = i*W*H*C+j*W*C+k*C+l;//(i*l+0.2*j+0.4*k+2*l); } } } } } /* ***************************************************************************************** */ void fillKernel(float *h_kernel_mat,int K_N,int K_H,int K_W,int C){ /* Utility function to fill the kernel */ for(int i=0;i<K_H*K_W*C;i++){ for(int j=0;j<K_N;j++){ if(j%2 == 0) h_kernel_mat[i*K_N+j] = 1;//(i*j+0.2*i+0.3*j); else h_kernel_mat[i*K_N+j] = 1; } } } /* ***************************************************************************************** */ void fillMask(int *mask,int N,int H,int W,int S_BLOCK,int sparsity_perc){ /* Utility function to define the mask. Based on the sparsity, the first x elements in each image are set to 0 */ for(int i=0;i<N;i++){ for(int j=0;j<(H/S_BLOCK);j++){ for(int k=0;k<(W/S_BLOCK);k++){ if((j+k) < ((float)sparsity_perc/100)*((H/S_BLOCK)*(W/S_BLOCK))) mask[i*(H/S_BLOCK)*(W/S_BLOCK)+j*(W/S_BLOCK)+k] = 0; else mask[i*(H/S_BLOCK)*(W/S_BLOCK)+j*(W/S_BLOCK)+k] = 1; } } } } /* ***************************************************************************************** */ void print_tensor(float *h_out_tensor,int N,int H,int W,int K_N,int K_H,int K_W){ /* Utility function to print the output tensor to a file */ FILE *fp = fopen("Block_Sparse_Convolution_Output","w"); for(int i=0;i<N;i++){ for(int j=0;j<(H-K_H+1);j++){ for(int k=0;k<(W-K_W+1);k++){ for(int l=0;l<K_N;l++){ fprintf(fp,"%4.4f ",h_out_tensor[i*(H-K_H+1)*(W-K_W+1)*K_N + j*(W-K_W+1)*K_N + k*K_N + l]); } } } fprintf(fp,"\n"); } fclose(fp); } int main(int argc,char **argv){ if(argc!=10){ printf("error : Invalid number of arguments\n"); printf("Format : ./exec num_images img_height img_width img_channels kernel_height kernel_width num_kernels sparsity_percentage sparsity_block_size\n"); exit(EXIT_FAILURE); } int N = atoi(argv[1]); /* Number of images */ int H = atoi(argv[2]); /* Height of one image */ int W = atoi(argv[3]); /* Width of one image */ int C = atoi(argv[4]); /* Number of channels in the image */ int K_H = atoi(argv[5]); /* Height of the kernel */ int K_W = atoi(argv[6]); /* Widht of the kernel */ int K_N = atoi(argv[7]); /* Number of kernels */ int sparsity_perc = atoi(argv[8]); /* Sparsity percentage in the mask */ int S_BLOCK = atoi(argv[9]); /* Size of a sparse block */ float *tensor; /* Pointer to the tensor on the host */ checkError(cudaHostAlloc( (void **) &tensor, sizeof(float)*N*H*W*C, 0 )); /* Allocating pinned memory on the CPU so as to facilitate streams */ float *kernel = (float *)malloc(sizeof(float)*K_N*K_H*K_W*C); /* Allocate memory for the kernel on the CPU */ float *h_out_tensor = (float *)malloc(sizeof(float)*N*(W-K_W+1)*(H-K_H+1)*K_N) ; // A pointer to the output tensor on the host int num_images = 2; /* Number of images handled by each stream */ if( (H%S_BLOCK != 0) && (W%S_BLOCK != 0)){ printf("Invalid dimensions for input and sparsity blocks. Input tensor dimensions must be integral multiples of sparsity block dimensions\n"); exit(EXIT_FAILURE); } if( N%num_images!=0 ){ printf("error : Number of images is not a multiple of that handled by each stream. Please enter a mulitple of %d as number of images \n",num_images); exit(EXIT_FAILURE); } int *mask; /* Define a pointer to the mask */ checkError( cudaHostAlloc((void **)&mask, sizeof(float)*N*(H/S_BLOCK)*(W/S_BLOCK), 0)); /* Allocate pinned memory for the mask to facilitate streams */ fillTensor(tensor,N,H,W,C); /* Fill the tensor from the syntehtic input */ fillKernel(kernel,K_N,K_H,K_W,C); /* Fill the kernel from the synthetic input */ fillMask(mask,N,H,W,S_BLOCK,sparsity_perc); /* Fill the mask array */ float *d_tensor; /* A pointer to the tensor on the device */ float *d_kernel; /* A pointer to the kernel on the device */ float *d_ker_mat; //the matrix for the kernel generated by im2col_ker int *d_mask; /* A pointer to the mask on the device */ float *d_mat; // d_mat is the matrix used for matrix multiplication int *d_row_map; /* A pointer to the map array; which holds the coordinate ids for each row in the matrix */ float *d_out_mat; // Output matrix float *d_out_tensor; // Pointer to the output tensor on the device unsigned int *row_address; /* A pointer to keep a track of how many rows have been added to the matrix per stream */ int NUM_STREAMS = (N+num_images-1)/num_images; /* Number of streams defined to handle all the images */ cudaEvent_t start,stop; /* CUDA events to time the program */ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); /*********** Allocate memory on the device for each of the matrices ***************/ checkError(cudaMalloc((void **)&d_tensor, sizeof(float)*N*H*W*C )); checkError(cudaMalloc((void **)&d_kernel, sizeof(float)*K_N*K_H*K_W*C)); checkError(cudaMalloc((void **)&d_mask, sizeof(int)*N*(H/S_BLOCK)*(W/S_BLOCK) )); //allocating data for the entire dense matrix //need to find a way to know how many columns there will be in the matrix checkError(cudaMalloc((void **)&d_mat, sizeof(float)*K_H*K_W*C*(H-K_H+1)*(W-K_W+1)*N)); checkError(cudaMalloc((void **)&d_row_map, sizeof(int)*(H-K_H+1)*(W-K_W+1)*N*3)); checkError(cudaMalloc((void **)&d_out_tensor,sizeof(float)*N*(H-K_H+1)*(W-K_W+1)*K_N)); //allocating memory for N row_addresses checkError(cudaMalloc((void **)&row_address, sizeof(unsigned int)*NUM_STREAMS)); checkError(cudaMalloc((void **)&d_ker_mat, sizeof(float)*K_N*K_H*K_W*C)); checkError(cudaMemcpy( d_kernel, kernel, sizeof(float)*K_N*K_H*K_W*C, cudaMemcpyHostToDevice )); checkError(cudaMemset( d_mat, 0, sizeof(float)*K_H*K_W*C*(H-K_H+1)*(W-K_W+1)*N )); checkError(cudaMemset( row_address, 0, sizeof(unsigned int)*NUM_STREAMS )); checkError(cudaMalloc( (void **)&d_out_mat, sizeof(float)*K_N*(H-K_H+1)*(W-K_W+1)*N)); /**********************************************************************************/ dim3 threads(BLOCK_SIZE, BLOCK_SIZE, BLOCK_DIM_Z); /* Block dimensions for the im2col operation */ dim3 matthreads(BLOCK_SIZE,BLOCK_SIZE,(num_images+BLOCK_DIM_Z-1)/BLOCK_DIM_Z); /* Grid dimensions for the im2col operation */ dim3 blocks((H+BLOCK_SIZE-1)/BLOCK_SIZE,(W+BLOCK_SIZE-1)/BLOCK_SIZE); /*Grid definition for the im2col kernel operation */ dim3 matblocks((K_N+BLOCK_SIZE-1)/BLOCK_SIZE,((H-K_H+1)*(W-K_W+1)*num_images+BLOCK_SIZE-1)/BLOCK_SIZE); // 2-D grid dimension to perform the matrix multiplication cudaStream_t streams[NUM_STREAMS]; /* Declaring a set of CUDA streams */ for( int i=0; i<NUM_STREAMS; i++ ) cudaStreamCreate(&streams[i]); /* Initializing a set of streams to work on a set of each image */ if(N<num_images) num_images = N; /* To take care of the case of illegal memory access if the number of images are less than that assigned to each stream */ /**************** Initialize a set of offsets for the streams *****************/ int offset = 0; int mask_offset = 0; int mat_offset = 0; int map_offset = 0; int o_offset = 0; int om_offset = 0; /*****************************************************************************/ for(int j=0; j<NUM_STREAMS; j++){ /* Initialize a set of off-sets for each stream */ offset = j*H*W*C*num_images; mask_offset = j*(H/S_BLOCK)*(W/S_BLOCK)*num_images; mat_offset = K_H*K_W*C*(H-K_H+1)*(W-K_W+1)*j*num_images; map_offset = 3*(H-K_H+1)*(W-K_W+1)*j*num_images; o_offset = (H-K_H+1)*(W-K_W+1)*K_N*j*num_images; om_offset = K_N*(H-K_H+1)*(W-K_W+1)*j*num_images; /* Asynchronously copy the images and kernels to the device */ checkError(cudaMemcpyAsync( &d_tensor[offset], &tensor[offset], sizeof(float)*H*W*C*num_images, cudaMemcpyHostToDevice, streams[j] )); checkError(cudaMemcpyAsync( &d_mask[mask_offset], &mask[mask_offset], sizeof(int)*(H/S_BLOCK)*(W/S_BLOCK)*num_images, cudaMemcpyHostToDevice, streams[j] )); /* Transform the tensor into the matrix form */ im2col<<<blocks, threads, 0, streams[j]>>>(&d_tensor[offset],&d_mask[mask_offset], &d_mat[mat_offset], &row_address[j], &d_row_map[map_offset], num_images,N,H,W,C,K_N,K_H,K_W,S_BLOCK); cudaError_t error = cudaGetLastError(); if( error != cudaSuccess ) { printf("Error in launching kernel1 in stream %d\n",j); } checkError(error); /* Perform the matrix multiplication */ sparsematmul<<<matblocks,matthreads, 0, streams[j]>>>(&d_mat[mat_offset], d_kernel, &row_address[j], K_N, &d_out_mat[om_offset],K_H,K_W,C); error = cudaGetLastError(); if( error != cudaSuccess ) { printf("Error in launching kernel 2 in stream %d\n",j); } checkError(error); /* Convert the matrix multiplication output back to the tensor */ col2im<<<matblocks,matthreads, 0 , streams[j]>>>(&d_out_mat[om_offset], &d_row_map[map_offset], &row_address[j], &d_out_tensor[o_offset],H,W,K_N,K_H,K_W); error = cudaGetLastError(); if( error != cudaSuccess ) { printf("Error in launching kernel 3 in stream %d\n",j); } checkError(error); /* Asynchronously copy the tensor from the device to the host */ checkError(cudaMemcpyAsync(&h_out_tensor[o_offset], &d_out_tensor[o_offset], sizeof(float)*(H-K_H+1)*(W-K_W+1)*K_N*num_images, cudaMemcpyDeviceToHost,streams[j])); } cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); print_tensor(h_out_tensor,N,H,W,K_N,K_H,K_W); float run_time = 0.0; cudaEventElapsedTime(&run_time,start,stop); //printf("%d %d %d %d %d %d %d %d %.4f\n",N,H,W,C,K_N,K_H,K_W,sparsity_perc,run_time); printf("Output : Block_Sparse_Convolution_Output\nRunning Time : %f (ms) \nConfiguration Parameters ::\nBatch Size : %d\nImage Height : %d\nImage Width : %d\nNo. of Channels : %d\nKernel Height : %d\nKernel Width : %d\nNo. of Kernels : %d\nPercentage Sparsity : %d\n ",run_time,N,H,W,C,K_H,K_W,K_N,sparsity_perc); for( int i=0; i<NUM_STREAMS; i++ ) cudaStreamDestroy(streams[i]); return 0; }
b97a356278a099305dcd45b796c3878470b16892.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2020 ETH Zurich. All Rights Reserved. #include "from_mesh.h" #include "drivers/mesh.h" #include "kernels/api.h" #include <mirheo/core/celllist.h> #include <mirheo/core/pvs/object_vector.h> #include <mirheo/core/pvs/rigid_object_vector.h> #include <mirheo/core/pvs/views/ov.h> #include <mirheo/core/rigid/operations.h> #include <mirheo/core/utils/kernel_launch.h> namespace mirheo { BounceFromMesh::BounceFromMesh(const MirState *state, const std::string& name, VarBounceKernel varBounceKernel) : Bouncer(state, name), varBounceKernel_(varBounceKernel) {} BounceFromMesh::~BounceFromMesh() = default; void BounceFromMesh::setup(ObjectVector *ov) { Bouncer::setup(ov); // If the object is rigid, we need to collect the forces into the RigidMotion rov_ = dynamic_cast<RigidObjectVector*> (ov); // for NON-rigid objects: // // old positions HAVE to be known when the mesh travels to other ranks // shift HAS be applied as well // // for Rigid: // old motions HAVE to be there and communicated and shifted if (rov_ == nullptr) ov->requireDataPerParticle<real4> (channel_names::oldPositions, DataManager::PersistenceMode::Active, DataManager::ShiftMode::Active); else ov->requireDataPerObject<RigidMotion> (channel_names::oldMotions, DataManager::PersistenceMode::Active, DataManager::ShiftMode::Active); } void BounceFromMesh::setPrerequisites(ParticleVector *pv) { // do not set it to persistent because bounce happens after integration pv->requireDataPerParticle<real4> (channel_names::oldPositions, DataManager::PersistenceMode::None, DataManager::ShiftMode::Active); } std::vector<std::string> BounceFromMesh::getChannelsToBeExchanged() const { if (rov_) return {channel_names::motions, channel_names::oldMotions}; else return {channel_names::oldPositions}; } std::vector<std::string> BounceFromMesh::getChannelsToBeSentBack() const { if (rov_) return {channel_names::motions}; else // return {channel_names::forces}; return {}; } void BounceFromMesh::exec(ParticleVector *pv, CellList *cl, ParticleVectorLocality locality, hipStream_t stream) { auto activeOV = ov_->get(locality); debug("Bouncing %d '%s' particles from %d '%s' objects (%s)", pv->local()->size(), pv->getCName(), activeOV->getNumObjects(), ov_->getCName(), getParticleVectorLocalityStr(locality).c_str()); ov_->findExtentAndCOM(stream, locality); const int totalTriangles = ov_->mesh->getNtriangles() * activeOV->getNumObjects(); // Set maximum possible number of _coarse_ and _fine_ collisions with triangles // In case of crash, the estimate should be increased const int maxCoarseCollisions = static_cast<int>(coarseCollisionsPerTri_ * static_cast<real>(totalTriangles)); coarseTable_.collisionTable.resize_anew(maxCoarseCollisions); coarseTable_.nCollisions.clear(stream); mesh_bounce_kernels::TriangleTable devCoarseTable { maxCoarseCollisions, coarseTable_.nCollisions.devPtr(), coarseTable_.collisionTable.devPtr() }; const int maxFineCollisions = static_cast<int>(fineCollisionsPerTri_ * static_cast<real>(totalTriangles)); fineTable_.collisionTable.resize_anew(maxFineCollisions); fineTable_.nCollisions.clear(stream); mesh_bounce_kernels::TriangleTable devFineTable { maxFineCollisions, fineTable_.nCollisions.devPtr(), fineTable_.collisionTable.devPtr() }; // Setup collision times array. For speed and simplicity initial time will be 0, // and after the collisions detected its i-th element will be t_i-1.0_r, where 0 <= t_i <= 1 // is the collision time, or 0 if no collision with the particle found collisionTimes_.resize_anew(pv->local()->size()); collisionTimes_.clear(stream); const int nthreads = 128; // FIXME this is a hack if (rov_) { if (locality == ParticleVectorLocality::Local) rov_->local()->getMeshForces(stream)->clear(stream); else rov_->halo()-> getMeshForces(stream)->clear(stream); } OVviewWithNewOldVertices vertexView(ov_, activeOV, stream); PVviewWithOldParticles pvView(pv, pv->local()); // Step 1, find all the candidate collisions SAFE_KERNEL_LAUNCH( mesh_bounce_kernels::findBouncesInMesh, getNblocks(totalTriangles, nthreads), nthreads, 0, stream, vertexView, pvView, ov_->mesh.get(), cl->cellInfo(), devCoarseTable ); coarseTable_.nCollisions.downloadFromDevice(stream); debug("Found %d triangle collision candidates", coarseTable_.nCollisions[0]); if (coarseTable_.nCollisions[0] > maxCoarseCollisions) die("Found too many triangle collision candidates (coarse) (%d, max %d)," "something may be broken or you need to increase the estimate", coarseTable_.nCollisions[0], maxCoarseCollisions); // Step 2, filter the candidates SAFE_KERNEL_LAUNCH( mesh_bounce_kernels::refineCollisions, getNblocks(coarseTable_.nCollisions[0], nthreads), nthreads, 0, stream, vertexView, pvView, ov_->mesh.get(), coarseTable_.nCollisions[0], devCoarseTable.indices, devFineTable, collisionTimes_.devPtr() ); fineTable_.nCollisions.downloadFromDevice(stream); debug("Found %d precise triangle collisions", fineTable_.nCollisions[0]); if (fineTable_.nCollisions[0] > maxFineCollisions) die("Found too many triangle collisions (precise) (%d, max %d)," "something may be broken or you need to increase the estimate", fineTable_.nCollisions[0], maxFineCollisions); // Step 3, resolve the collisions mpark::visit([&](auto& bounceKernel) { bounceKernel.update(rng_); SAFE_KERNEL_LAUNCH( mesh_bounce_kernels::performBouncingTriangle, getNblocks(fineTable_.nCollisions[0], nthreads), nthreads, 0, stream, vertexView, pvView, ov_->mesh.get(), fineTable_.nCollisions[0], devFineTable.indices, collisionTimes_.devPtr(), getState()->dt, bounceKernel ); }, varBounceKernel_); if (rov_) { // make a fake view with vertices instead of particles ROVview view(rov_, rov_->get(locality)); view.objSize = ov_->mesh->getNvertices(); view.size = view.nObjects * view.objSize; view.positions = vertexView.vertices; view.forces = vertexView.vertexForces; rigid_operations::collectRigidForces(view, stream); } } } // namespace mirheo
b97a356278a099305dcd45b796c3878470b16892.cu
// Copyright 2020 ETH Zurich. All Rights Reserved. #include "from_mesh.h" #include "drivers/mesh.h" #include "kernels/api.h" #include <mirheo/core/celllist.h> #include <mirheo/core/pvs/object_vector.h> #include <mirheo/core/pvs/rigid_object_vector.h> #include <mirheo/core/pvs/views/ov.h> #include <mirheo/core/rigid/operations.h> #include <mirheo/core/utils/kernel_launch.h> namespace mirheo { BounceFromMesh::BounceFromMesh(const MirState *state, const std::string& name, VarBounceKernel varBounceKernel) : Bouncer(state, name), varBounceKernel_(varBounceKernel) {} BounceFromMesh::~BounceFromMesh() = default; void BounceFromMesh::setup(ObjectVector *ov) { Bouncer::setup(ov); // If the object is rigid, we need to collect the forces into the RigidMotion rov_ = dynamic_cast<RigidObjectVector*> (ov); // for NON-rigid objects: // // old positions HAVE to be known when the mesh travels to other ranks // shift HAS be applied as well // // for Rigid: // old motions HAVE to be there and communicated and shifted if (rov_ == nullptr) ov->requireDataPerParticle<real4> (channel_names::oldPositions, DataManager::PersistenceMode::Active, DataManager::ShiftMode::Active); else ov->requireDataPerObject<RigidMotion> (channel_names::oldMotions, DataManager::PersistenceMode::Active, DataManager::ShiftMode::Active); } void BounceFromMesh::setPrerequisites(ParticleVector *pv) { // do not set it to persistent because bounce happens after integration pv->requireDataPerParticle<real4> (channel_names::oldPositions, DataManager::PersistenceMode::None, DataManager::ShiftMode::Active); } std::vector<std::string> BounceFromMesh::getChannelsToBeExchanged() const { if (rov_) return {channel_names::motions, channel_names::oldMotions}; else return {channel_names::oldPositions}; } std::vector<std::string> BounceFromMesh::getChannelsToBeSentBack() const { if (rov_) return {channel_names::motions}; else // return {channel_names::forces}; return {}; } void BounceFromMesh::exec(ParticleVector *pv, CellList *cl, ParticleVectorLocality locality, cudaStream_t stream) { auto activeOV = ov_->get(locality); debug("Bouncing %d '%s' particles from %d '%s' objects (%s)", pv->local()->size(), pv->getCName(), activeOV->getNumObjects(), ov_->getCName(), getParticleVectorLocalityStr(locality).c_str()); ov_->findExtentAndCOM(stream, locality); const int totalTriangles = ov_->mesh->getNtriangles() * activeOV->getNumObjects(); // Set maximum possible number of _coarse_ and _fine_ collisions with triangles // In case of crash, the estimate should be increased const int maxCoarseCollisions = static_cast<int>(coarseCollisionsPerTri_ * static_cast<real>(totalTriangles)); coarseTable_.collisionTable.resize_anew(maxCoarseCollisions); coarseTable_.nCollisions.clear(stream); mesh_bounce_kernels::TriangleTable devCoarseTable { maxCoarseCollisions, coarseTable_.nCollisions.devPtr(), coarseTable_.collisionTable.devPtr() }; const int maxFineCollisions = static_cast<int>(fineCollisionsPerTri_ * static_cast<real>(totalTriangles)); fineTable_.collisionTable.resize_anew(maxFineCollisions); fineTable_.nCollisions.clear(stream); mesh_bounce_kernels::TriangleTable devFineTable { maxFineCollisions, fineTable_.nCollisions.devPtr(), fineTable_.collisionTable.devPtr() }; // Setup collision times array. For speed and simplicity initial time will be 0, // and after the collisions detected its i-th element will be t_i-1.0_r, where 0 <= t_i <= 1 // is the collision time, or 0 if no collision with the particle found collisionTimes_.resize_anew(pv->local()->size()); collisionTimes_.clear(stream); const int nthreads = 128; // FIXME this is a hack if (rov_) { if (locality == ParticleVectorLocality::Local) rov_->local()->getMeshForces(stream)->clear(stream); else rov_->halo()-> getMeshForces(stream)->clear(stream); } OVviewWithNewOldVertices vertexView(ov_, activeOV, stream); PVviewWithOldParticles pvView(pv, pv->local()); // Step 1, find all the candidate collisions SAFE_KERNEL_LAUNCH( mesh_bounce_kernels::findBouncesInMesh, getNblocks(totalTriangles, nthreads), nthreads, 0, stream, vertexView, pvView, ov_->mesh.get(), cl->cellInfo(), devCoarseTable ); coarseTable_.nCollisions.downloadFromDevice(stream); debug("Found %d triangle collision candidates", coarseTable_.nCollisions[0]); if (coarseTable_.nCollisions[0] > maxCoarseCollisions) die("Found too many triangle collision candidates (coarse) (%d, max %d)," "something may be broken or you need to increase the estimate", coarseTable_.nCollisions[0], maxCoarseCollisions); // Step 2, filter the candidates SAFE_KERNEL_LAUNCH( mesh_bounce_kernels::refineCollisions, getNblocks(coarseTable_.nCollisions[0], nthreads), nthreads, 0, stream, vertexView, pvView, ov_->mesh.get(), coarseTable_.nCollisions[0], devCoarseTable.indices, devFineTable, collisionTimes_.devPtr() ); fineTable_.nCollisions.downloadFromDevice(stream); debug("Found %d precise triangle collisions", fineTable_.nCollisions[0]); if (fineTable_.nCollisions[0] > maxFineCollisions) die("Found too many triangle collisions (precise) (%d, max %d)," "something may be broken or you need to increase the estimate", fineTable_.nCollisions[0], maxFineCollisions); // Step 3, resolve the collisions mpark::visit([&](auto& bounceKernel) { bounceKernel.update(rng_); SAFE_KERNEL_LAUNCH( mesh_bounce_kernels::performBouncingTriangle, getNblocks(fineTable_.nCollisions[0], nthreads), nthreads, 0, stream, vertexView, pvView, ov_->mesh.get(), fineTable_.nCollisions[0], devFineTable.indices, collisionTimes_.devPtr(), getState()->dt, bounceKernel ); }, varBounceKernel_); if (rov_) { // make a fake view with vertices instead of particles ROVview view(rov_, rov_->get(locality)); view.objSize = ov_->mesh->getNvertices(); view.size = view.nObjects * view.objSize; view.positions = vertexView.vertices; view.forces = vertexView.vertexForces; rigid_operations::collectRigidForces(view, stream); } } } // namespace mirheo
4537cc4b519ff5c7470ee7466bffd90854e74567.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ #include <NeoMathEngine/NeoMathEngineDefs.h> #ifdef NEOML_USE_CUDA #include <CudaMathEngine.h> #include <MemoryHandleInternal.h> #include <MathEngineCommon.h> #include <CudaDevice.h> #include <Kernels/CudaBlasKernels.h> namespace NeoML { void CCudaMathEngine::SetVectorToMatrixRows(const CFloatHandle& resultHandle, int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle) { ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, matrixHeight, matrixWidth); hipLaunchKernelGGL(( SetVectorToMatrixRowsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle)); } void CCudaMathEngine::AddVectorToMatrixElements(const CFloatHandle& matrix, int height, int width, const CConstIntHandle& indices, const CConstFloatHandle& vector) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( indices.GetMathEngine() == this ); ASSERT_EXPR( vector.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, height, AddVectorToMatrixElementsCombine); hipLaunchKernelGGL(( AddVectorToMatrixElementsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrix), height, width, GetRaw(indices), GetRaw(vector)); } void CCudaMathEngine::AddVectorToMatrixElements(const CFloatHandle& matrixHandle, int height, int width, const CConstIntHandle& rowIndicesHandle, const CConstIntHandle& columnIndicesHandle, const CConstFloatHandle& vectorHandle, int vectorSize) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( rowIndicesHandle.GetMathEngine() == this ); ASSERT_EXPR( columnIndicesHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, vectorSize, AddVectorToMatrixElementsMulCombine); hipLaunchKernelGGL(( AddVectorToMatrixElementsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrixHandle), height, width, GetRaw(rowIndicesHandle), GetRaw(columnIndicesHandle), GetRaw(vectorHandle), vectorSize); } // Assigns the values: matrix[rowIndices[i], columnIndices[i]] = vector[i]. void CCudaMathEngine::setVectorToMatrixElements( const CFloatHandle& matrixHandle, int height, int width, const CConstIntHandle& rowIndicesHandle, const CConstIntHandle& columnIndicesHandle, const CConstFloatHandle& vectorHandle, int vectorSize ) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( rowIndicesHandle.GetMathEngine() == this ); ASSERT_EXPR( columnIndicesHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid( blockCount, threadCount, vectorSize, SetVectorToMatrixElementsMulCombine ); hipLaunchKernelGGL(( SetVectorToMatrixElementsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw( matrixHandle ), height, width, GetRaw( rowIndicesHandle ), GetRaw( columnIndicesHandle ), GetRaw( vectorHandle ), vectorSize ); } void CCudaMathEngine::AddMatrixElementsToVector(const CConstFloatHandle& matrix, int height, int width, const CConstIntHandle& indices, const CFloatHandle& result, int vectorSize) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); ASSERT_EXPR( indices.GetMathEngine() == this ); ASSERT_EXPR(vectorSize >= height); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, height, AddMatrixElementsToVectorCombine); hipLaunchKernelGGL(( AddMatrixElementsToVectorKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrix), height, width, GetRaw(indices), GetRaw(result)); } void CCudaMathEngine::AddMatrixElementsToVector(const CConstFloatHandle& matrix, int height, int width, const CConstIntHandle& rowIndices, const CConstIntHandle& columnIndices, const CFloatHandle& result, int vectorSize) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( rowIndices.GetMathEngine() == this ); ASSERT_EXPR( columnIndices.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, vectorSize, AddMatrixElementsToVectorMulCombine); hipLaunchKernelGGL(( AddMatrixElementsToVectorKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrix), height, width, GetRaw(rowIndices), GetRaw(columnIndices), GetRaw(result), vectorSize); } void CCudaMathEngine::AddMatrixElementsToMatrix(const CConstFloatHandle& matrix, int height, int width, const CFloatHandle& result, const CConstIntHandle& indices) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); ASSERT_EXPR( indices.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, height, AddMatrixElementsToMatrixCombine); hipLaunchKernelGGL(( AddMatrixElementsToMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrix), height, width, GetRaw(result), GetRaw(indices)); } void CCudaMathEngine::AddDiagMatrixToMatrix( const CConstFloatHandle& diagMatrix, const CConstFloatHandle& matrix, int height, int width, const CFloatHandle& result ) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); ASSERT_EXPR( diagMatrix.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); const int widthNorm = ( width + AddDiagMatrixToMatrixCombine - 1 ) / AddDiagMatrixToMatrixCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D( blockCount, threadCount, height, widthNorm ); hipLaunchKernelGGL(( AddDiagMatrixToMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw( diagMatrix ), GetRaw( matrix ), height, width, widthNorm, GetRaw( result ) ); } void CCudaMathEngine::AddVectorToMatrixRows(int batchSize, const CConstFloatHandle& matrixHandle, const CFloatHandle& resultHandle, int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int widthNorm = (matrixWidth + BatchAddVectorToMatrixRowsCombine - 1) / BatchAddVectorToMatrixRowsCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, device->ThreadMax3DCountX, blockCount, threadCount, batchSize * matrixHeight, widthNorm); hipLaunchKernelGGL(( AddVectorToMatrixRowsKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle)); } void CCudaMathEngine::AddVectorToMatrixColumns( const CConstFloatHandle& matrixHandle, const CFloatHandle& resultHandle, int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle ) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D( blockCount, threadCount, matrixHeight, matrixWidth ); hipLaunchKernelGGL(( AddVectorToMatrixColumnsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle) ); } void CCudaMathEngine::AddVectorToMatrixColumns( const CConstIntHandle& matrixHandle, const CIntHandle& resultHandle, int matrixHeight, int matrixWidth, const CConstIntHandle& vectorHandle ) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D( blockCount, threadCount, matrixHeight, matrixWidth ); hipLaunchKernelGGL(( AddVectorToMatrixColumnsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle) ); } void CCudaMathEngine::SubVectorFromMatrixColumns(const CConstFloatHandle& matrixHandle, const CFloatHandle& resultHandle, int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, matrixHeight, matrixWidth); hipLaunchKernelGGL(( SubVectorFromMatrixColumnsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle)); } void CCudaMathEngine::SumMatrixRowsAdd( int batchSize, const CFloatHandle& resultHandle, const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth ) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); const int height = ( matrixHeight + SumMatrixRowsAddCombineCount - 1 ) / SumMatrixRowsAddCombineCount; dim3 blockCount; dim3 threadCount; getCudaTaskGrid3D( blockCount, threadCount, batchSize, height, matrixWidth ); hipLaunchKernelGGL(( SumMatrixRowsAddKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(resultHandle), GetRaw(matrixHandle), matrixHeight, matrixWidth ); } void CCudaMathEngine::SumMatrixColumns(const CFloatHandle& resultHandle, const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); sumMatrixColumnsKernelFunc(resultHandle, GetRaw(matrixHandle), matrixHeight, matrixWidth, false); } void CCudaMathEngine::MatrixColumnsEltwiseDivide( const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle ) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); const int widthNorm = ( matrixWidth + MatrixColumnsEltwiseDivideCombine - 1 ) / MatrixColumnsEltwiseDivideCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D( blockCount, threadCount, matrixHeight, widthNorm ); hipLaunchKernelGGL(( MatrixColumnsEltwiseDivideKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw( matrixHandle ), matrixHeight, matrixWidth, widthNorm, GetRaw( vectorHandle ), GetRaw( resultHandle ) ); } void CCudaMathEngine::MatrixLogSumExpByRows(const CConstFloatHandle& matrix, int height, int width, const CFloatHandle& result, int resultSize) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); ASSERT_EXPR(resultSize >= height); SetCudaDevice( device->DeviceNumber ); int widthNorm = (width + MatrixLogSumExpByRowsCombine - 1) / MatrixLogSumExpByRowsCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, height, widthNorm); blockCount.x = 1; const int sharedSize = threadCount.x * threadCount.y * sizeof(float); hipLaunchKernelGGL(( MatrixLogSumExpByRowsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(matrix), height, width, GetRaw(result), widthNorm); } void CCudaMathEngine::MatrixSoftmaxByRows(const CConstFloatHandle& matrix, int height, int width, const CFloatHandle& result) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int widthNorm = (width + MatrixSoftmaxByRowsCombine - 1) / MatrixSoftmaxByRowsCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, height, widthNorm); blockCount.x = 1; const int sharedSize = threadCount.x * threadCount.y * sizeof(float); hipLaunchKernelGGL(( MatrixSoftmaxByRowsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(matrix), height, width, GetRaw(result), widthNorm); } void CCudaMathEngine::MatrixSoftmaxDiffOpByRows(const CConstFloatHandle& first, const CConstFloatHandle& second, int height, int width, const CFloatHandle& result) { ASSERT_EXPR( first.GetMathEngine() == this ); ASSERT_EXPR( second.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int widthNorm = (width + MatrixSoftmaxDiffOpByRowsCombine - 1) / MatrixSoftmaxDiffOpByRowsCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, height, widthNorm); blockCount.x = 1; const int sharedSize = threadCount.x * threadCount.y * sizeof(float); hipLaunchKernelGGL(( MatrixSoftmaxDiffOpByRowsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(first), GetRaw(second), height, width, GetRaw(result), widthNorm); } void CCudaMathEngine::MatrixSoftmaxByColumns(const CConstFloatHandle& matrix, int height, int width, const CFloatHandle& result) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int heightNorm = (height + MatrixSoftmaxByColumnsCombine - 1) / MatrixSoftmaxByColumnsCombine; heightNorm = alignXSizeForWarp(heightNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, width, heightNorm); blockCount.x = 1; const int sharedSize = threadCount.x * threadCount.y * sizeof(float); hipLaunchKernelGGL(( MatrixSoftmaxByColumnsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(matrix), height, width, GetRaw(result), heightNorm); } void CCudaMathEngine::MatrixSoftmaxDiffOpByColumns(const CConstFloatHandle& first, const CConstFloatHandle& second, int height, int width, const CFloatHandle& result) { ASSERT_EXPR( first.GetMathEngine() == this ); ASSERT_EXPR( second.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int heightNorm = (height + MatrixSoftmaxDiffOpByColumnsCombine - 1) / MatrixSoftmaxDiffOpByColumnsCombine; heightNorm = alignXSizeForWarp(heightNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, width, heightNorm); blockCount.x = 1; const int sharedSize = threadCount.x * threadCount.y * sizeof(float); hipLaunchKernelGGL(( MatrixSoftmaxDiffOpByColumnsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(first), GetRaw(second), height, width, GetRaw(result), heightNorm); } void CCudaMathEngine::FindMaxValueInRows(const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth, const CFloatHandle& resultHandle, const CIntHandle& columnIndices, int vectorSize) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR(vectorSize >= matrixHeight); SetCudaDevice( device->DeviceNumber ); int widthNorm = (matrixWidth + FindMaxValueInRowsCombine - 1) / FindMaxValueInRowsCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, matrixHeight, widthNorm); blockCount.x = 1; const int sharedSize = threadCount.y * threadCount.x * sizeof(CValueWithIndex); hipLaunchKernelGGL(( FindMaxValueWithIndicesInRowsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(matrixHandle), matrixHeight, matrixWidth, GetRaw(resultHandle), GetRaw(columnIndices), widthNorm); } void CCudaMathEngine::FindMaxValueInRows(const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth, const CFloatHandle& resultHandle, int vectorSize) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR(vectorSize >= matrixHeight); SetCudaDevice( device->DeviceNumber ); int widthNorm = (matrixWidth + FindMaxValueInRowsCombine - 1) / FindMaxValueInRowsCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, matrixHeight, widthNorm); blockCount.x = 1; const int sharedSize = threadCount.y * threadCount.x * sizeof(float); hipLaunchKernelGGL(( FindMaxValueInRowsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(matrixHandle), matrixHeight, matrixWidth, GetRaw(resultHandle), widthNorm); } void CCudaMathEngine::FindMaxValueInColumns( int batchSize, const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth, const CFloatHandle& resultHandle, const CIntHandle& rowIndices, int vectorSize ) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( rowIndices.GetMathEngine() == this ); ASSERT_EXPR( vectorSize >= batchSize * matrixWidth ); SetCudaDevice( device->DeviceNumber ); int heightNorm = ( matrixHeight + FindMaxValueInColumnsCombine - 1 ) / FindMaxValueInColumnsCombine; heightNorm = alignXSizeForWarp( heightNorm ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid3D( blockCount, threadCount, batchSize, matrixWidth, heightNorm ); blockCount.x = 1; const int sharedSize = threadCount.z * threadCount.y * threadCount.x * sizeof( CValueWithIndex ); hipLaunchKernelGGL(( FindMaxValueInColumnsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, batchSize, GetRaw( matrixHandle ), matrixHeight, matrixWidth, GetRaw( resultHandle ), GetRaw( rowIndices ), heightNorm ); } void CCudaMathEngine::FindMinValueInColumns( const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth, const CFloatHandle& resultHandle, const CIntHandle& columnIndices ) { SetCudaDevice( device->DeviceNumber ); // Initialize using the first row data VectorCopy( resultHandle, matrixHandle, matrixWidth ); VectorFill( columnIndices, 0, matrixWidth ); int blockCount; int threadCount; getCudaTaskGrid( blockCount, threadCount, matrixWidth ); hipLaunchKernelGGL(( FindMinValueInColumnsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw( matrixHandle ), matrixHeight, matrixWidth, GetRaw( resultHandle ), GetRaw( columnIndices ) ); } void CCudaMathEngine::VectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CConstFloatHandle& inputHandle, const CConstFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CFloatHandle& outputHandle, int outputChannelsCount) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( outputHandle.GetMathEngine() == this ); vectorMultichannelLookupAndCopy(batchSize, channelCount, inputHandle, lookupHandles, lookupDimensions, lookupCount, outputHandle, outputChannelsCount); } void CCudaMathEngine::VectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CConstIntHandle& inputHandle, const CConstFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CFloatHandle& outputHandle, int outputChannelsCount) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( outputHandle.GetMathEngine() == this ); vectorMultichannelLookupAndCopy(batchSize, channelCount, inputHandle, lookupHandles, lookupDimensions, lookupCount, outputHandle, outputChannelsCount); } void CCudaMathEngine::VectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CConstIntHandle& inputHandle, const CConstIntHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CIntHandle& outputHandle, int outputChannelsCount) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( outputHandle.GetMathEngine() == this ); vectorMultichannelLookupAndCopy(batchSize, channelCount, inputHandle, lookupHandles, lookupDimensions, lookupCount, outputHandle, outputChannelsCount); } void CCudaMathEngine::VectorMultichannelLookupAndAddToTable(int batchSize, int channelCount, const CConstFloatHandle& inputHandle, const CFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CConstFloatHandle& multHandle, const CConstFloatHandle& matrixHandle, int outputChannelsCount) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( multHandle.GetMathEngine() == this ); ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); vectorMultichannelLookupAndAddToTable(batchSize, channelCount, inputHandle, lookupHandles, lookupDimensions, lookupCount, multHandle, matrixHandle, outputChannelsCount); } void CCudaMathEngine::VectorMultichannelLookupAndAddToTable(int batchSize, int channelCount, const CConstIntHandle& inputHandle, const CFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CConstFloatHandle& multHandle, const CConstFloatHandle& matrixHandle, int outputChannelsCount) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( multHandle.GetMathEngine() == this ); ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); vectorMultichannelLookupAndAddToTable(batchSize, channelCount, inputHandle, lookupHandles, lookupDimensions, lookupCount, multHandle, matrixHandle, outputChannelsCount); } void CCudaMathEngine::BitSetBinarization(int batchSize, int bitSetSize, const CConstIntHandle& inputHandle, int outputVectorSize, const CFloatHandle& resultHandle) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid( blockCount, threadCount, batchSize * outputVectorSize ); hipLaunchKernelGGL(( BitSetBinarizationKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, bitSetSize, GetRaw(inputHandle), outputVectorSize, GetRaw(resultHandle)); } void CCudaMathEngine::MultiplyLookupMatrixByLookupVector(int batchSize, const CLookupMatrix& matrix, const CLookupVector& vector, const CFloatHandle& resultHandle, int resultSize) { ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_EXPR(matrix.Width() == vector.VectorSize()); ASSERT_EXPR(resultSize >= batchSize * matrix.Height()); int widthNorm = (matrix.Width() + MultiplyLookupMatrixByLookupVectorCombine - 1) / MultiplyLookupMatrixByLookupVectorCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, batchSize * matrix.Height(), widthNorm); if(blockCount.x > 0) { // Several GPUs may take part in adding up one row, need atomic operations // Set resultHandle to zeros VectorFill(resultHandle, 0, batchSize * matrix.Height()); } const int sharedSize = threadCount.x * threadCount.y * sizeof(float); hipLaunchKernelGGL(( MultiplyLookupMatrixByLookupVectorKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, batchSize, GetRaw(matrix.Table), matrix.Dims.VectorCount, matrix.Dims.VectorSize, GetRaw(matrix.Rows), matrix.RowCount, GetRaw(vector.Table), vector.Dims.VectorCount, GetRaw(vector.Vector), GetRaw(resultHandle), resultSize, widthNorm); } void CCudaMathEngine::MultiplyTransposedLookupMatrixByVector(int batchSize, const CLookupMatrix& matrix, const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle, int resultSize) { ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); multiplyVectorByLookupMatrixImpl(batchSize, matrix, vectorHandle, resultHandle, resultSize, false); } void CCudaMathEngine::MultiplyTransposedLookupMatrixByVectorAndAdd(int batchSize, const CLookupMatrix& matrix, const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle, int resultSize) { ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); multiplyVectorByLookupMatrixImpl(batchSize, matrix, vectorHandle, resultHandle, resultSize, true); } void CCudaMathEngine::MultiplyVectorByTransposedLookupVectorAndAddToTable(int batchSize, const CFloatHandle& table, int vectorCount, int vectorSize, const CConstIntHandle& indexHandle, const CConstFloatHandle& firstHandle, int firstSize, const CLookupVector& second) { ASSERT_EXPR( table.GetMathEngine() == this ); ASSERT_EXPR( indexHandle.GetMathEngine() == this ); ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR(vectorSize == second.VectorSize()); SetCudaDevice( device->DeviceNumber ); int vectorSizeNorm = (vectorSize + MultiplyVectorByTransposedLookupVectorAndAddToTableCombine - 1) / MultiplyVectorByTransposedLookupVectorAndAddToTableCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, batchSize * firstSize, vectorSizeNorm); hipLaunchKernelGGL(( MultiplyVectorByTransposedLookupVectorAndAddToTableKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(table), vectorCount, vectorSize, GetRaw(indexHandle), GetRaw(firstHandle), firstSize, GetRaw(second.Table), GetRaw(second.Vector), vectorSizeNorm); } void CCudaMathEngine::MultiplyDiagMatrixByMatrix(const CConstFloatHandle& firstHandle, int firstSize, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle, int) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, firstSize, secondWidth); hipLaunchKernelGGL(( MultiplyDiagMatrixByMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(firstHandle), firstSize, GetRaw(secondHandle), secondWidth, GetRaw(resultHandle)); } void CCudaMathEngine::Multiply1DiagMatrixByMatrix(int batchSize, const CConstFloatHandle& firstHandle, int firstSize, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle, int) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount; dim3 threadCount; int batchNorm = (batchSize + Multiply1DiagMatrixByMatrixCombine - 1) / Multiply1DiagMatrixByMatrixCombine; getCudaTaskGrid2DMinYX(1, 256, blockCount, threadCount, batchNorm, firstSize * secondWidth); hipLaunchKernelGGL(( Multiply1DiagMatrixByMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(firstHandle), firstSize, GetRaw(secondHandle), secondWidth, GetRaw(resultHandle), batchNorm); } void CCudaMathEngine::TransposeMatrix(int batchSize, const CConstFloatHandle& firstHandle, int height, int medium, int width, int channels, const CFloatHandle& resultHandle, int resultBufferSize) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); transposeMatrixImpl(batchSize, firstHandle, height, medium, width, channels, resultHandle, resultBufferSize); } void CCudaMathEngine::TransposeMatrix(int batchSize, const CConstIntHandle& firstHandle, int height, int medium, int width, int channels, const CIntHandle& resultHandle, int resultBufferSize) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); transposeMatrixImpl(batchSize, firstHandle, height, medium, width, channels, resultHandle, resultBufferSize); } void CCudaMathEngine::MultiplyDiagMatrixByMatrixAndAdd( int batchSize, const CConstFloatHandle& firstHandle, int firstSize, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int batchSizeNorm = ( batchSize + MultiplyDiagMatrixByMatrixAndSumCombine - 1 ) / MultiplyDiagMatrixByMatrixAndSumCombine; batchSizeNorm = alignXSizeForWarp( batchSizeNorm ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid3DMinZYX( 1, 1, 512, blockCount, threadCount, firstSize, secondWidth, batchSizeNorm ); int sharedSize = threadCount.x * threadCount.y * threadCount.z * sizeof( float ); hipLaunchKernelGGL(( MultiplyDiagMatrixByMatrixAndSumKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, batchSize, GetRaw( firstHandle ), firstSize, GetRaw( secondHandle ), secondWidth, GetRaw( resultHandle ), batchSizeNorm ); } void CCudaMathEngine::RowMultiplyMatrixByMatrix( const CConstFloatHandle& firstHandle, const CConstFloatHandle& secondHandle, int height, int width, const CFloatHandle& resultHandle ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); VectorFill( resultHandle, 0, height ); int widthNorm = ( width + RowMultiplyMatrixByMatrixCombine - 1 ) / RowMultiplyMatrixByMatrixCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 256, blockCount, threadCount, height, widthNorm ); const int sharedSize = threadCount.y * threadCount.x * sizeof( float ); hipLaunchKernelGGL(( RowMultiplyMatrixByMatrixKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw( firstHandle ), GetRaw( secondHandle ), height, width, GetRaw( resultHandle ), widthNorm ); } void CCudaMathEngine::MatrixSpreadRows(const CConstFloatHandle& sourceHandle, int height, int width, const CFloatHandle& resultHandle, int resultHeight, const CConstIntHandle& indexHandle, const CConstFloatHandle& fillValue) { ASSERT_EXPR( indexHandle.GetMathEngine() == this ); ASSERT_EXPR( sourceHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( fillValue.IsNull() || fillValue.GetMathEngine() == this ); matrixSpreadRowsImpl(GetRaw(sourceHandle), height, width, resultHandle, resultHeight, GetRaw(indexHandle), fillValue); } void CCudaMathEngine::MatrixSpreadRowsAdd(const CConstFloatHandle& sourceHandle, int height, int width, const CFloatHandle& resultHandle, int resultHeight, const CConstIntHandle& indexHandle) { ASSERT_EXPR( indexHandle.GetMathEngine() == this ); ASSERT_EXPR( sourceHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int widthNorm = (width + MatrixSpreadRowsCombine - 1) / MatrixSpreadRowsCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, height, widthNorm); hipLaunchKernelGGL(( MatrixSpreadRowsAddKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(sourceHandle), height, width, GetRaw(resultHandle), GetRaw(indexHandle), widthNorm); } void CCudaMathEngine::MatrixSpreadRows(const CConstIntHandle& sourceHandle, int height, int width, const CIntHandle& resultHandle, int resultHeight, const CConstIntHandle& indexHandle, const CConstIntHandle& fillValue) { ASSERT_EXPR( indexHandle.GetMathEngine() == this ); ASSERT_EXPR( sourceHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( fillValue.IsNull() || fillValue.GetMathEngine() == this ); matrixSpreadRowsImpl(GetRaw(sourceHandle), height, width, resultHandle, resultHeight, GetRaw(indexHandle), fillValue); } void CCudaMathEngine::LookupAndSum( const CConstIntHandle& indicesHandle, int batchSize, int indexCount, const CConstFloatHandle& tableHandle, int vectorSize, const CFloatHandle& result ) { ASSERT_EXPR( indicesHandle.GetMathEngine() == this ); ASSERT_EXPR( tableHandle.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount, threadCount; getCudaTaskGrid2D( blockCount, threadCount, batchSize, vectorSize ); hipLaunchKernelGGL(( LookupAndSumKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw( indicesHandle ), batchSize, indexCount, GetRaw( tableHandle ), vectorSize, GetRaw( result ) ); } void CCudaMathEngine::LookupAndAddToTable( const CConstIntHandle& indicesHandle, int batchSize, int indexCount, const CConstFloatHandle& additionsHandle, int vectorSize, const CFloatHandle& tableHandle, int vectorCount ) { ASSERT_EXPR( indicesHandle.GetMathEngine() == this ); ASSERT_EXPR( tableHandle.GetMathEngine() == this ); ASSERT_EXPR( additionsHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); VectorFill( tableHandle, 0.f, vectorSize * vectorCount ); dim3 blockCount, threadCount; getCudaTaskGrid3D( blockCount, threadCount, batchSize, indexCount, vectorSize ); hipLaunchKernelGGL(( LookupAndAddToTableKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw( indicesHandle ), batchSize, indexCount, GetRaw( additionsHandle ), vectorSize, GetRaw( tableHandle ) ); } void CCudaMathEngine::EnumBinarization(int batchSize, const CConstFloatHandle& inputHandle, int enumSize, const CFloatHandle& resultHandle) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, batchSize * enumSize, EnumBinarizationCombine); hipLaunchKernelGGL(( EnumBinarizationKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(inputHandle), enumSize, GetRaw(resultHandle)); } void CCudaMathEngine::EnumBinarization(int batchSize, const CConstIntHandle& inputHandle, int enumSize, const CFloatHandle& resultHandle) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, batchSize * enumSize, EnumBinarizationCombine); hipLaunchKernelGGL(( EnumBinarizationKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(inputHandle), enumSize, GetRaw(resultHandle)); } template<class T> void CCudaMathEngine::transposeMatrixImpl(int batchSize, const CTypedMemoryHandle<const T>& firstHandle, int height, int medium, int width, int channels, const CTypedMemoryHandle<T>& resultHandle, int resultBufferSize) { int size = batchSize * height * medium * width * channels; ASSERT_EXPR(resultBufferSize >= size); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, size, TransposeMatrixCombine); hipLaunchKernelGGL(( TransposeMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(firstHandle), height, medium, width, channels, GetRaw(resultHandle), size); } void CCudaMathEngine::sumMatrixColumnsKernelFunc(const CFloatHandle& resultHandle, const float* matrix, int matrixHeight, int matrixWidth, bool isNeg) { ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int widthNorm = (matrixWidth + SumMatrixColumnsCombine - 1) / SumMatrixColumnsCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 256, blockCount, threadCount, matrixHeight, widthNorm); int maxAtomicPerX = SumMatrixColumnsMaxAtomic / blockCount.y; if(maxAtomicPerX <= 0) { maxAtomicPerX = 1; } if((int)blockCount.x > maxAtomicPerX) { blockCount.x = maxAtomicPerX; } int totalThreadXCount = threadCount.x * blockCount.x; int combine = (matrixWidth + totalThreadXCount - 1) / totalThreadXCount; if( blockCount.x > 1 ) { VectorFill(resultHandle, 0, matrixHeight); } const int sharedSize = threadCount.y * threadCount.x * sizeof(float); hipLaunchKernelGGL(( SumMatrixColumnsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(resultHandle), matrix, matrixHeight, matrixWidth, isNeg, widthNorm, combine); } void CCudaMathEngine::multiplyVectorByLookupMatrixImpl(int batchSize, const CLookupMatrix& matrix, const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle, int resultSize, bool isAdd) { ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR(resultSize >= batchSize * matrix.Width()); SetCudaDevice( device->DeviceNumber ); int heightNorm = (matrix.Height() + MultiplyTransposedLookupMatrixByVectorCombine - 1) / MultiplyTransposedLookupMatrixByVectorCombine; heightNorm = alignXSizeForWarp(heightNorm); // X coordinate is Height to allow for warp reduction dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, batchSize * matrix.Width(), heightNorm); if(blockCount.x > 0 && !isAdd) { // Several GPUs may take part in adding up one column, need atomic operations // Set resultHandle to zeros VectorFill(resultHandle, 0, batchSize * matrix.Width()); } const int sharedSize = threadCount.x * threadCount.y * sizeof(float); hipLaunchKernelGGL(( MultiplyTransposedLookupMatrixByVectorKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, batchSize, GetRaw(matrix.Table), matrix.Dims.VectorCount, matrix.Dims.VectorSize, GetRaw(matrix.Rows), matrix.RowCount, GetRaw(vectorHandle), GetRaw(resultHandle), isAdd, heightNorm); } template<class T> void CCudaMathEngine::matrixSpreadRowsImpl(const T* source, int height, int width, CTypedMemoryHandle<T> result, int resultHeight, const int* index, const CTypedMemoryHandle<const T>& fillValue) { SetCudaDevice( device->DeviceNumber ); if(fillValue.IsNull()) { VectorFill( result, 0, resultHeight * width); } else { VectorFill( result, resultHeight * width, fillValue); } int widthNorm = (width + MatrixSpreadRowsCombine - 1) / MatrixSpreadRowsCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, height, widthNorm); hipLaunchKernelGGL(( MatrixSpreadRowsKernel<T>), dim3(blockCount), dim3(threadCount), 0, 0, source, height, width, GetRaw( result ), index, widthNorm); } template<class TInput, class TLookup> void CCudaMathEngine::vectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CTypedMemoryHandle<const TInput>& inputHandle, const CTypedMemoryHandle<const TLookup>* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CTypedMemoryHandle<TLookup>& outputHandle, int outputChannelsCount) { SetCudaDevice( device->DeviceNumber ); int batchNorm = (batchSize + BatchVectorLookupAndCopyCombineBatch - 1) / BatchVectorLookupAndCopyCombineBatch; int outputChannel = 0; for(int j = 0; j < lookupCount; ++j) { dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, batchNorm, lookupDimensions[j].VectorSize); hipLaunchKernelGGL(( VectorChannelLookupAndCopyKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(inputHandle) + j, channelCount, GetRaw(lookupHandles[j]), lookupDimensions[j].VectorSize, GetRaw(outputHandle) + outputChannel, outputChannelsCount, batchNorm); outputChannel += lookupDimensions[j].VectorSize; } if(lookupCount < channelCount) { dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, batchNorm, channelCount - lookupCount); hipLaunchKernelGGL(( BatchVectorChannelCopyKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(inputHandle) + lookupCount, channelCount, channelCount - lookupCount, GetRaw(outputHandle) + outputChannel, outputChannelsCount, batchNorm); } } template<class T> void CCudaMathEngine::vectorMultichannelLookupAndAddToTable(int batchSize, int channelCount, const CTypedMemoryHandle<const T>& inputHandle, const CFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CConstFloatHandle& multHandle, const CConstFloatHandle& matrixHandle, int outputChannelsCount) { SetCudaDevice( device->DeviceNumber ); int batchNorm = (batchSize + BatchVectorLookupAndAddToTableCombine - 1) / BatchVectorLookupAndAddToTableCombine; float mult = multHandle.GetValue(); int outputChannel = 0; for (int j = 0; j < lookupCount; ++j) { dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, batchNorm, lookupDimensions[j].VectorSize); hipLaunchKernelGGL(( VectorChannelLookupAndAddToTableKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(inputHandle) + j, channelCount, GetRaw(lookupHandles[j]), lookupDimensions[j].VectorSize, mult, GetRaw(matrixHandle) + outputChannel, outputChannelsCount, batchNorm); outputChannel += lookupDimensions[j].VectorSize; } } } // namespace NeoML #endif // NEOML_USE_CUDA
4537cc4b519ff5c7470ee7466bffd90854e74567.cu
/* Copyright © 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ #include <NeoMathEngine/NeoMathEngineDefs.h> #ifdef NEOML_USE_CUDA #include <CudaMathEngine.h> #include <MemoryHandleInternal.h> #include <MathEngineCommon.h> #include <CudaDevice.h> #include <Kernels/CudaBlasKernels.h> namespace NeoML { void CCudaMathEngine::SetVectorToMatrixRows(const CFloatHandle& resultHandle, int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle) { ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, matrixHeight, matrixWidth); SetVectorToMatrixRowsKernel<<<blockCount, threadCount>>> (GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle)); } void CCudaMathEngine::AddVectorToMatrixElements(const CFloatHandle& matrix, int height, int width, const CConstIntHandle& indices, const CConstFloatHandle& vector) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( indices.GetMathEngine() == this ); ASSERT_EXPR( vector.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, height, AddVectorToMatrixElementsCombine); AddVectorToMatrixElementsKernel<<<blockCount, threadCount>>>(GetRaw(matrix), height, width, GetRaw(indices), GetRaw(vector)); } void CCudaMathEngine::AddVectorToMatrixElements(const CFloatHandle& matrixHandle, int height, int width, const CConstIntHandle& rowIndicesHandle, const CConstIntHandle& columnIndicesHandle, const CConstFloatHandle& vectorHandle, int vectorSize) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( rowIndicesHandle.GetMathEngine() == this ); ASSERT_EXPR( columnIndicesHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, vectorSize, AddVectorToMatrixElementsMulCombine); AddVectorToMatrixElementsKernel<<<blockCount, threadCount>>>(GetRaw(matrixHandle), height, width, GetRaw(rowIndicesHandle), GetRaw(columnIndicesHandle), GetRaw(vectorHandle), vectorSize); } // Assigns the values: matrix[rowIndices[i], columnIndices[i]] = vector[i]. void CCudaMathEngine::setVectorToMatrixElements( const CFloatHandle& matrixHandle, int height, int width, const CConstIntHandle& rowIndicesHandle, const CConstIntHandle& columnIndicesHandle, const CConstFloatHandle& vectorHandle, int vectorSize ) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( rowIndicesHandle.GetMathEngine() == this ); ASSERT_EXPR( columnIndicesHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid( blockCount, threadCount, vectorSize, SetVectorToMatrixElementsMulCombine ); SetVectorToMatrixElementsKernel<<<blockCount, threadCount>>>( GetRaw( matrixHandle ), height, width, GetRaw( rowIndicesHandle ), GetRaw( columnIndicesHandle ), GetRaw( vectorHandle ), vectorSize ); } void CCudaMathEngine::AddMatrixElementsToVector(const CConstFloatHandle& matrix, int height, int width, const CConstIntHandle& indices, const CFloatHandle& result, int vectorSize) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); ASSERT_EXPR( indices.GetMathEngine() == this ); ASSERT_EXPR(vectorSize >= height); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, height, AddMatrixElementsToVectorCombine); AddMatrixElementsToVectorKernel<<<blockCount, threadCount>>>(GetRaw(matrix), height, width, GetRaw(indices), GetRaw(result)); } void CCudaMathEngine::AddMatrixElementsToVector(const CConstFloatHandle& matrix, int height, int width, const CConstIntHandle& rowIndices, const CConstIntHandle& columnIndices, const CFloatHandle& result, int vectorSize) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( rowIndices.GetMathEngine() == this ); ASSERT_EXPR( columnIndices.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, vectorSize, AddMatrixElementsToVectorMulCombine); AddMatrixElementsToVectorKernel<<<blockCount, threadCount>>>(GetRaw(matrix), height, width, GetRaw(rowIndices), GetRaw(columnIndices), GetRaw(result), vectorSize); } void CCudaMathEngine::AddMatrixElementsToMatrix(const CConstFloatHandle& matrix, int height, int width, const CFloatHandle& result, const CConstIntHandle& indices) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); ASSERT_EXPR( indices.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, height, AddMatrixElementsToMatrixCombine); AddMatrixElementsToMatrixKernel<<<blockCount, threadCount>>>(GetRaw(matrix), height, width, GetRaw(result), GetRaw(indices)); } void CCudaMathEngine::AddDiagMatrixToMatrix( const CConstFloatHandle& diagMatrix, const CConstFloatHandle& matrix, int height, int width, const CFloatHandle& result ) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); ASSERT_EXPR( diagMatrix.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); const int widthNorm = ( width + AddDiagMatrixToMatrixCombine - 1 ) / AddDiagMatrixToMatrixCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D( blockCount, threadCount, height, widthNorm ); AddDiagMatrixToMatrixKernel<<<blockCount, threadCount>>>( GetRaw( diagMatrix ), GetRaw( matrix ), height, width, widthNorm, GetRaw( result ) ); } void CCudaMathEngine::AddVectorToMatrixRows(int batchSize, const CConstFloatHandle& matrixHandle, const CFloatHandle& resultHandle, int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int widthNorm = (matrixWidth + BatchAddVectorToMatrixRowsCombine - 1) / BatchAddVectorToMatrixRowsCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, device->ThreadMax3DCountX, blockCount, threadCount, batchSize * matrixHeight, widthNorm); AddVectorToMatrixRowsKernel<<<blockCount, threadCount>>>(batchSize, GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle)); } void CCudaMathEngine::AddVectorToMatrixColumns( const CConstFloatHandle& matrixHandle, const CFloatHandle& resultHandle, int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle ) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D( blockCount, threadCount, matrixHeight, matrixWidth ); AddVectorToMatrixColumnsKernel<<<blockCount, threadCount>>> ( GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle) ); } void CCudaMathEngine::AddVectorToMatrixColumns( const CConstIntHandle& matrixHandle, const CIntHandle& resultHandle, int matrixHeight, int matrixWidth, const CConstIntHandle& vectorHandle ) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D( blockCount, threadCount, matrixHeight, matrixWidth ); AddVectorToMatrixColumnsKernel<<<blockCount, threadCount>>> ( GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle) ); } void CCudaMathEngine::SubVectorFromMatrixColumns(const CConstFloatHandle& matrixHandle, const CFloatHandle& resultHandle, int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, matrixHeight, matrixWidth); SubVectorFromMatrixColumnsKernel<<<blockCount, threadCount>>> (GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle)); } void CCudaMathEngine::SumMatrixRowsAdd( int batchSize, const CFloatHandle& resultHandle, const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth ) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); const int height = ( matrixHeight + SumMatrixRowsAddCombineCount - 1 ) / SumMatrixRowsAddCombineCount; dim3 blockCount; dim3 threadCount; getCudaTaskGrid3D( blockCount, threadCount, batchSize, height, matrixWidth ); SumMatrixRowsAddKernel<<<blockCount, threadCount>>> ( batchSize, GetRaw(resultHandle), GetRaw(matrixHandle), matrixHeight, matrixWidth ); } void CCudaMathEngine::SumMatrixColumns(const CFloatHandle& resultHandle, const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); sumMatrixColumnsKernelFunc(resultHandle, GetRaw(matrixHandle), matrixHeight, matrixWidth, false); } void CCudaMathEngine::MatrixColumnsEltwiseDivide( const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle ) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); const int widthNorm = ( matrixWidth + MatrixColumnsEltwiseDivideCombine - 1 ) / MatrixColumnsEltwiseDivideCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D( blockCount, threadCount, matrixHeight, widthNorm ); MatrixColumnsEltwiseDivideKernel<<<blockCount, threadCount>>>( GetRaw( matrixHandle ), matrixHeight, matrixWidth, widthNorm, GetRaw( vectorHandle ), GetRaw( resultHandle ) ); } void CCudaMathEngine::MatrixLogSumExpByRows(const CConstFloatHandle& matrix, int height, int width, const CFloatHandle& result, int resultSize) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); ASSERT_EXPR(resultSize >= height); SetCudaDevice( device->DeviceNumber ); int widthNorm = (width + MatrixLogSumExpByRowsCombine - 1) / MatrixLogSumExpByRowsCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, height, widthNorm); blockCount.x = 1; const int sharedSize = threadCount.x * threadCount.y * sizeof(float); MatrixLogSumExpByRowsKernel<<<blockCount, threadCount, sharedSize>>>(GetRaw(matrix), height, width, GetRaw(result), widthNorm); } void CCudaMathEngine::MatrixSoftmaxByRows(const CConstFloatHandle& matrix, int height, int width, const CFloatHandle& result) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int widthNorm = (width + MatrixSoftmaxByRowsCombine - 1) / MatrixSoftmaxByRowsCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, height, widthNorm); blockCount.x = 1; const int sharedSize = threadCount.x * threadCount.y * sizeof(float); MatrixSoftmaxByRowsKernel<<<blockCount, threadCount, sharedSize>>>(GetRaw(matrix), height, width, GetRaw(result), widthNorm); } void CCudaMathEngine::MatrixSoftmaxDiffOpByRows(const CConstFloatHandle& first, const CConstFloatHandle& second, int height, int width, const CFloatHandle& result) { ASSERT_EXPR( first.GetMathEngine() == this ); ASSERT_EXPR( second.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int widthNorm = (width + MatrixSoftmaxDiffOpByRowsCombine - 1) / MatrixSoftmaxDiffOpByRowsCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, height, widthNorm); blockCount.x = 1; const int sharedSize = threadCount.x * threadCount.y * sizeof(float); MatrixSoftmaxDiffOpByRowsKernel<<<blockCount, threadCount, sharedSize>>>(GetRaw(first), GetRaw(second), height, width, GetRaw(result), widthNorm); } void CCudaMathEngine::MatrixSoftmaxByColumns(const CConstFloatHandle& matrix, int height, int width, const CFloatHandle& result) { ASSERT_EXPR( matrix.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int heightNorm = (height + MatrixSoftmaxByColumnsCombine - 1) / MatrixSoftmaxByColumnsCombine; heightNorm = alignXSizeForWarp(heightNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, width, heightNorm); blockCount.x = 1; const int sharedSize = threadCount.x * threadCount.y * sizeof(float); MatrixSoftmaxByColumnsKernel<<<blockCount, threadCount, sharedSize>>>(GetRaw(matrix), height, width, GetRaw(result), heightNorm); } void CCudaMathEngine::MatrixSoftmaxDiffOpByColumns(const CConstFloatHandle& first, const CConstFloatHandle& second, int height, int width, const CFloatHandle& result) { ASSERT_EXPR( first.GetMathEngine() == this ); ASSERT_EXPR( second.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int heightNorm = (height + MatrixSoftmaxDiffOpByColumnsCombine - 1) / MatrixSoftmaxDiffOpByColumnsCombine; heightNorm = alignXSizeForWarp(heightNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, width, heightNorm); blockCount.x = 1; const int sharedSize = threadCount.x * threadCount.y * sizeof(float); MatrixSoftmaxDiffOpByColumnsKernel<<<blockCount, threadCount, sharedSize>>>( GetRaw(first), GetRaw(second), height, width, GetRaw(result), heightNorm); } void CCudaMathEngine::FindMaxValueInRows(const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth, const CFloatHandle& resultHandle, const CIntHandle& columnIndices, int vectorSize) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR(vectorSize >= matrixHeight); SetCudaDevice( device->DeviceNumber ); int widthNorm = (matrixWidth + FindMaxValueInRowsCombine - 1) / FindMaxValueInRowsCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, matrixHeight, widthNorm); blockCount.x = 1; const int sharedSize = threadCount.y * threadCount.x * sizeof(CValueWithIndex); FindMaxValueWithIndicesInRowsKernel<<<blockCount, threadCount, sharedSize>>>( GetRaw(matrixHandle), matrixHeight, matrixWidth, GetRaw(resultHandle), GetRaw(columnIndices), widthNorm); } void CCudaMathEngine::FindMaxValueInRows(const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth, const CFloatHandle& resultHandle, int vectorSize) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR(vectorSize >= matrixHeight); SetCudaDevice( device->DeviceNumber ); int widthNorm = (matrixWidth + FindMaxValueInRowsCombine - 1) / FindMaxValueInRowsCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, matrixHeight, widthNorm); blockCount.x = 1; const int sharedSize = threadCount.y * threadCount.x * sizeof(float); FindMaxValueInRowsKernel<<<blockCount, threadCount, sharedSize>>>(GetRaw(matrixHandle), matrixHeight, matrixWidth, GetRaw(resultHandle), widthNorm); } void CCudaMathEngine::FindMaxValueInColumns( int batchSize, const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth, const CFloatHandle& resultHandle, const CIntHandle& rowIndices, int vectorSize ) { ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( rowIndices.GetMathEngine() == this ); ASSERT_EXPR( vectorSize >= batchSize * matrixWidth ); SetCudaDevice( device->DeviceNumber ); int heightNorm = ( matrixHeight + FindMaxValueInColumnsCombine - 1 ) / FindMaxValueInColumnsCombine; heightNorm = alignXSizeForWarp( heightNorm ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid3D( blockCount, threadCount, batchSize, matrixWidth, heightNorm ); blockCount.x = 1; const int sharedSize = threadCount.z * threadCount.y * threadCount.x * sizeof( CValueWithIndex ); FindMaxValueInColumnsKernel<<<blockCount, threadCount, sharedSize>>>( batchSize, GetRaw( matrixHandle ), matrixHeight, matrixWidth, GetRaw( resultHandle ), GetRaw( rowIndices ), heightNorm ); } void CCudaMathEngine::FindMinValueInColumns( const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth, const CFloatHandle& resultHandle, const CIntHandle& columnIndices ) { SetCudaDevice( device->DeviceNumber ); // Initialize using the first row data VectorCopy( resultHandle, matrixHandle, matrixWidth ); VectorFill( columnIndices, 0, matrixWidth ); int blockCount; int threadCount; getCudaTaskGrid( blockCount, threadCount, matrixWidth ); FindMinValueInColumnsKernel<<<blockCount, threadCount>>>( GetRaw( matrixHandle ), matrixHeight, matrixWidth, GetRaw( resultHandle ), GetRaw( columnIndices ) ); } void CCudaMathEngine::VectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CConstFloatHandle& inputHandle, const CConstFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CFloatHandle& outputHandle, int outputChannelsCount) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( outputHandle.GetMathEngine() == this ); vectorMultichannelLookupAndCopy(batchSize, channelCount, inputHandle, lookupHandles, lookupDimensions, lookupCount, outputHandle, outputChannelsCount); } void CCudaMathEngine::VectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CConstIntHandle& inputHandle, const CConstFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CFloatHandle& outputHandle, int outputChannelsCount) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( outputHandle.GetMathEngine() == this ); vectorMultichannelLookupAndCopy(batchSize, channelCount, inputHandle, lookupHandles, lookupDimensions, lookupCount, outputHandle, outputChannelsCount); } void CCudaMathEngine::VectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CConstIntHandle& inputHandle, const CConstIntHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CIntHandle& outputHandle, int outputChannelsCount) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( outputHandle.GetMathEngine() == this ); vectorMultichannelLookupAndCopy(batchSize, channelCount, inputHandle, lookupHandles, lookupDimensions, lookupCount, outputHandle, outputChannelsCount); } void CCudaMathEngine::VectorMultichannelLookupAndAddToTable(int batchSize, int channelCount, const CConstFloatHandle& inputHandle, const CFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CConstFloatHandle& multHandle, const CConstFloatHandle& matrixHandle, int outputChannelsCount) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( multHandle.GetMathEngine() == this ); ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); vectorMultichannelLookupAndAddToTable(batchSize, channelCount, inputHandle, lookupHandles, lookupDimensions, lookupCount, multHandle, matrixHandle, outputChannelsCount); } void CCudaMathEngine::VectorMultichannelLookupAndAddToTable(int batchSize, int channelCount, const CConstIntHandle& inputHandle, const CFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CConstFloatHandle& multHandle, const CConstFloatHandle& matrixHandle, int outputChannelsCount) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( multHandle.GetMathEngine() == this ); ASSERT_EXPR( matrixHandle.GetMathEngine() == this ); vectorMultichannelLookupAndAddToTable(batchSize, channelCount, inputHandle, lookupHandles, lookupDimensions, lookupCount, multHandle, matrixHandle, outputChannelsCount); } void CCudaMathEngine::BitSetBinarization(int batchSize, int bitSetSize, const CConstIntHandle& inputHandle, int outputVectorSize, const CFloatHandle& resultHandle) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid( blockCount, threadCount, batchSize * outputVectorSize ); BitSetBinarizationKernel<<<blockCount, threadCount>>>(batchSize, bitSetSize, GetRaw(inputHandle), outputVectorSize, GetRaw(resultHandle)); } void CCudaMathEngine::MultiplyLookupMatrixByLookupVector(int batchSize, const CLookupMatrix& matrix, const CLookupVector& vector, const CFloatHandle& resultHandle, int resultSize) { ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_EXPR(matrix.Width() == vector.VectorSize()); ASSERT_EXPR(resultSize >= batchSize * matrix.Height()); int widthNorm = (matrix.Width() + MultiplyLookupMatrixByLookupVectorCombine - 1) / MultiplyLookupMatrixByLookupVectorCombine; widthNorm = alignXSizeForWarp(widthNorm); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, batchSize * matrix.Height(), widthNorm); if(blockCount.x > 0) { // Several GPUs may take part in adding up one row, need atomic operations // Set resultHandle to zeros VectorFill(resultHandle, 0, batchSize * matrix.Height()); } const int sharedSize = threadCount.x * threadCount.y * sizeof(float); MultiplyLookupMatrixByLookupVectorKernel<<<blockCount, threadCount, sharedSize>>>(batchSize, GetRaw(matrix.Table), matrix.Dims.VectorCount, matrix.Dims.VectorSize, GetRaw(matrix.Rows), matrix.RowCount, GetRaw(vector.Table), vector.Dims.VectorCount, GetRaw(vector.Vector), GetRaw(resultHandle), resultSize, widthNorm); } void CCudaMathEngine::MultiplyTransposedLookupMatrixByVector(int batchSize, const CLookupMatrix& matrix, const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle, int resultSize) { ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); multiplyVectorByLookupMatrixImpl(batchSize, matrix, vectorHandle, resultHandle, resultSize, false); } void CCudaMathEngine::MultiplyTransposedLookupMatrixByVectorAndAdd(int batchSize, const CLookupMatrix& matrix, const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle, int resultSize) { ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); multiplyVectorByLookupMatrixImpl(batchSize, matrix, vectorHandle, resultHandle, resultSize, true); } void CCudaMathEngine::MultiplyVectorByTransposedLookupVectorAndAddToTable(int batchSize, const CFloatHandle& table, int vectorCount, int vectorSize, const CConstIntHandle& indexHandle, const CConstFloatHandle& firstHandle, int firstSize, const CLookupVector& second) { ASSERT_EXPR( table.GetMathEngine() == this ); ASSERT_EXPR( indexHandle.GetMathEngine() == this ); ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR(vectorSize == second.VectorSize()); SetCudaDevice( device->DeviceNumber ); int vectorSizeNorm = (vectorSize + MultiplyVectorByTransposedLookupVectorAndAddToTableCombine - 1) / MultiplyVectorByTransposedLookupVectorAndAddToTableCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, batchSize * firstSize, vectorSizeNorm); MultiplyVectorByTransposedLookupVectorAndAddToTableKernel<<<blockCount, threadCount>>>(batchSize, GetRaw(table), vectorCount, vectorSize, GetRaw(indexHandle), GetRaw(firstHandle), firstSize, GetRaw(second.Table), GetRaw(second.Vector), vectorSizeNorm); } void CCudaMathEngine::MultiplyDiagMatrixByMatrix(const CConstFloatHandle& firstHandle, int firstSize, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle, int) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, firstSize, secondWidth); MultiplyDiagMatrixByMatrixKernel<<<blockCount, threadCount>>> (GetRaw(firstHandle), firstSize, GetRaw(secondHandle), secondWidth, GetRaw(resultHandle)); } void CCudaMathEngine::Multiply1DiagMatrixByMatrix(int batchSize, const CConstFloatHandle& firstHandle, int firstSize, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle, int) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount; dim3 threadCount; int batchNorm = (batchSize + Multiply1DiagMatrixByMatrixCombine - 1) / Multiply1DiagMatrixByMatrixCombine; getCudaTaskGrid2DMinYX(1, 256, blockCount, threadCount, batchNorm, firstSize * secondWidth); Multiply1DiagMatrixByMatrixKernel<<<blockCount, threadCount>>> (batchSize, GetRaw(firstHandle), firstSize, GetRaw(secondHandle), secondWidth, GetRaw(resultHandle), batchNorm); } void CCudaMathEngine::TransposeMatrix(int batchSize, const CConstFloatHandle& firstHandle, int height, int medium, int width, int channels, const CFloatHandle& resultHandle, int resultBufferSize) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); transposeMatrixImpl(batchSize, firstHandle, height, medium, width, channels, resultHandle, resultBufferSize); } void CCudaMathEngine::TransposeMatrix(int batchSize, const CConstIntHandle& firstHandle, int height, int medium, int width, int channels, const CIntHandle& resultHandle, int resultBufferSize) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); transposeMatrixImpl(batchSize, firstHandle, height, medium, width, channels, resultHandle, resultBufferSize); } void CCudaMathEngine::MultiplyDiagMatrixByMatrixAndAdd( int batchSize, const CConstFloatHandle& firstHandle, int firstSize, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int batchSizeNorm = ( batchSize + MultiplyDiagMatrixByMatrixAndSumCombine - 1 ) / MultiplyDiagMatrixByMatrixAndSumCombine; batchSizeNorm = alignXSizeForWarp( batchSizeNorm ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid3DMinZYX( 1, 1, 512, blockCount, threadCount, firstSize, secondWidth, batchSizeNorm ); int sharedSize = threadCount.x * threadCount.y * threadCount.z * sizeof( float ); MultiplyDiagMatrixByMatrixAndSumKernel<<<blockCount, threadCount, sharedSize>>>( batchSize, GetRaw( firstHandle ), firstSize, GetRaw( secondHandle ), secondWidth, GetRaw( resultHandle ), batchSizeNorm ); } void CCudaMathEngine::RowMultiplyMatrixByMatrix( const CConstFloatHandle& firstHandle, const CConstFloatHandle& secondHandle, int height, int width, const CFloatHandle& resultHandle ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); VectorFill( resultHandle, 0, height ); int widthNorm = ( width + RowMultiplyMatrixByMatrixCombine - 1 ) / RowMultiplyMatrixByMatrixCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 256, blockCount, threadCount, height, widthNorm ); const int sharedSize = threadCount.y * threadCount.x * sizeof( float ); RowMultiplyMatrixByMatrixKernel<<<blockCount, threadCount, sharedSize>>>( GetRaw( firstHandle ), GetRaw( secondHandle ), height, width, GetRaw( resultHandle ), widthNorm ); } void CCudaMathEngine::MatrixSpreadRows(const CConstFloatHandle& sourceHandle, int height, int width, const CFloatHandle& resultHandle, int resultHeight, const CConstIntHandle& indexHandle, const CConstFloatHandle& fillValue) { ASSERT_EXPR( indexHandle.GetMathEngine() == this ); ASSERT_EXPR( sourceHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( fillValue.IsNull() || fillValue.GetMathEngine() == this ); matrixSpreadRowsImpl(GetRaw(sourceHandle), height, width, resultHandle, resultHeight, GetRaw(indexHandle), fillValue); } void CCudaMathEngine::MatrixSpreadRowsAdd(const CConstFloatHandle& sourceHandle, int height, int width, const CFloatHandle& resultHandle, int resultHeight, const CConstIntHandle& indexHandle) { ASSERT_EXPR( indexHandle.GetMathEngine() == this ); ASSERT_EXPR( sourceHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int widthNorm = (width + MatrixSpreadRowsCombine - 1) / MatrixSpreadRowsCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, height, widthNorm); MatrixSpreadRowsAddKernel<<<blockCount, threadCount>>>(GetRaw(sourceHandle), height, width, GetRaw(resultHandle), GetRaw(indexHandle), widthNorm); } void CCudaMathEngine::MatrixSpreadRows(const CConstIntHandle& sourceHandle, int height, int width, const CIntHandle& resultHandle, int resultHeight, const CConstIntHandle& indexHandle, const CConstIntHandle& fillValue) { ASSERT_EXPR( indexHandle.GetMathEngine() == this ); ASSERT_EXPR( sourceHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( fillValue.IsNull() || fillValue.GetMathEngine() == this ); matrixSpreadRowsImpl(GetRaw(sourceHandle), height, width, resultHandle, resultHeight, GetRaw(indexHandle), fillValue); } void CCudaMathEngine::LookupAndSum( const CConstIntHandle& indicesHandle, int batchSize, int indexCount, const CConstFloatHandle& tableHandle, int vectorSize, const CFloatHandle& result ) { ASSERT_EXPR( indicesHandle.GetMathEngine() == this ); ASSERT_EXPR( tableHandle.GetMathEngine() == this ); ASSERT_EXPR( result.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); dim3 blockCount, threadCount; getCudaTaskGrid2D( blockCount, threadCount, batchSize, vectorSize ); LookupAndSumKernel<<<blockCount, threadCount>>>( GetRaw( indicesHandle ), batchSize, indexCount, GetRaw( tableHandle ), vectorSize, GetRaw( result ) ); } void CCudaMathEngine::LookupAndAddToTable( const CConstIntHandle& indicesHandle, int batchSize, int indexCount, const CConstFloatHandle& additionsHandle, int vectorSize, const CFloatHandle& tableHandle, int vectorCount ) { ASSERT_EXPR( indicesHandle.GetMathEngine() == this ); ASSERT_EXPR( tableHandle.GetMathEngine() == this ); ASSERT_EXPR( additionsHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); VectorFill( tableHandle, 0.f, vectorSize * vectorCount ); dim3 blockCount, threadCount; getCudaTaskGrid3D( blockCount, threadCount, batchSize, indexCount, vectorSize ); LookupAndAddToTableKernel<<<blockCount, threadCount>>>( GetRaw( indicesHandle ), batchSize, indexCount, GetRaw( additionsHandle ), vectorSize, GetRaw( tableHandle ) ); } void CCudaMathEngine::EnumBinarization(int batchSize, const CConstFloatHandle& inputHandle, int enumSize, const CFloatHandle& resultHandle) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, batchSize * enumSize, EnumBinarizationCombine); EnumBinarizationKernel<<<blockCount, threadCount>>>(batchSize, GetRaw(inputHandle), enumSize, GetRaw(resultHandle)); } void CCudaMathEngine::EnumBinarization(int batchSize, const CConstIntHandle& inputHandle, int enumSize, const CFloatHandle& resultHandle) { ASSERT_EXPR( inputHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, batchSize * enumSize, EnumBinarizationCombine); EnumBinarizationKernel<<<blockCount, threadCount>>>(batchSize, GetRaw(inputHandle), enumSize, GetRaw(resultHandle)); } template<class T> void CCudaMathEngine::transposeMatrixImpl(int batchSize, const CTypedMemoryHandle<const T>& firstHandle, int height, int medium, int width, int channels, const CTypedMemoryHandle<T>& resultHandle, int resultBufferSize) { int size = batchSize * height * medium * width * channels; ASSERT_EXPR(resultBufferSize >= size); SetCudaDevice( device->DeviceNumber ); int blockCount; int threadCount; getCudaTaskGrid(blockCount, threadCount, size, TransposeMatrixCombine); TransposeMatrixKernel<<<blockCount, threadCount>>>(batchSize, GetRaw(firstHandle), height, medium, width, channels, GetRaw(resultHandle), size); } void CCudaMathEngine::sumMatrixColumnsKernelFunc(const CFloatHandle& resultHandle, const float* matrix, int matrixHeight, int matrixWidth, bool isNeg) { ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); int widthNorm = (matrixWidth + SumMatrixColumnsCombine - 1) / SumMatrixColumnsCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 256, blockCount, threadCount, matrixHeight, widthNorm); int maxAtomicPerX = SumMatrixColumnsMaxAtomic / blockCount.y; if(maxAtomicPerX <= 0) { maxAtomicPerX = 1; } if((int)blockCount.x > maxAtomicPerX) { blockCount.x = maxAtomicPerX; } int totalThreadXCount = threadCount.x * blockCount.x; int combine = (matrixWidth + totalThreadXCount - 1) / totalThreadXCount; if( blockCount.x > 1 ) { VectorFill(resultHandle, 0, matrixHeight); } const int sharedSize = threadCount.y * threadCount.x * sizeof(float); SumMatrixColumnsKernel<<<blockCount, threadCount, sharedSize>>> (GetRaw(resultHandle), matrix, matrixHeight, matrixWidth, isNeg, widthNorm, combine); } void CCudaMathEngine::multiplyVectorByLookupMatrixImpl(int batchSize, const CLookupMatrix& matrix, const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle, int resultSize, bool isAdd) { ASSERT_EXPR( vectorHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR(resultSize >= batchSize * matrix.Width()); SetCudaDevice( device->DeviceNumber ); int heightNorm = (matrix.Height() + MultiplyTransposedLookupMatrixByVectorCombine - 1) / MultiplyTransposedLookupMatrixByVectorCombine; heightNorm = alignXSizeForWarp(heightNorm); // X coordinate is Height to allow for warp reduction dim3 blockCount; dim3 threadCount; getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, batchSize * matrix.Width(), heightNorm); if(blockCount.x > 0 && !isAdd) { // Several GPUs may take part in adding up one column, need atomic operations // Set resultHandle to zeros VectorFill(resultHandle, 0, batchSize * matrix.Width()); } const int sharedSize = threadCount.x * threadCount.y * sizeof(float); MultiplyTransposedLookupMatrixByVectorKernel<<<blockCount, threadCount, sharedSize>>>(batchSize, GetRaw(matrix.Table), matrix.Dims.VectorCount, matrix.Dims.VectorSize, GetRaw(matrix.Rows), matrix.RowCount, GetRaw(vectorHandle), GetRaw(resultHandle), isAdd, heightNorm); } template<class T> void CCudaMathEngine::matrixSpreadRowsImpl(const T* source, int height, int width, CTypedMemoryHandle<T> result, int resultHeight, const int* index, const CTypedMemoryHandle<const T>& fillValue) { SetCudaDevice( device->DeviceNumber ); if(fillValue.IsNull()) { VectorFill( result, 0, resultHeight * width); } else { VectorFill( result, resultHeight * width, fillValue); } int widthNorm = (width + MatrixSpreadRowsCombine - 1) / MatrixSpreadRowsCombine; dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, height, widthNorm); MatrixSpreadRowsKernel<T><<<blockCount, threadCount>>>(source, height, width, GetRaw( result ), index, widthNorm); } template<class TInput, class TLookup> void CCudaMathEngine::vectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CTypedMemoryHandle<const TInput>& inputHandle, const CTypedMemoryHandle<const TLookup>* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CTypedMemoryHandle<TLookup>& outputHandle, int outputChannelsCount) { SetCudaDevice( device->DeviceNumber ); int batchNorm = (batchSize + BatchVectorLookupAndCopyCombineBatch - 1) / BatchVectorLookupAndCopyCombineBatch; int outputChannel = 0; for(int j = 0; j < lookupCount; ++j) { dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, batchNorm, lookupDimensions[j].VectorSize); VectorChannelLookupAndCopyKernel<<<blockCount, threadCount>>>(batchSize, GetRaw(inputHandle) + j, channelCount, GetRaw(lookupHandles[j]), lookupDimensions[j].VectorSize, GetRaw(outputHandle) + outputChannel, outputChannelsCount, batchNorm); outputChannel += lookupDimensions[j].VectorSize; } if(lookupCount < channelCount) { dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, batchNorm, channelCount - lookupCount); BatchVectorChannelCopyKernel<<<blockCount, threadCount>>>(batchSize, GetRaw(inputHandle) + lookupCount, channelCount, channelCount - lookupCount, GetRaw(outputHandle) + outputChannel, outputChannelsCount, batchNorm); } } template<class T> void CCudaMathEngine::vectorMultichannelLookupAndAddToTable(int batchSize, int channelCount, const CTypedMemoryHandle<const T>& inputHandle, const CFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount, const CConstFloatHandle& multHandle, const CConstFloatHandle& matrixHandle, int outputChannelsCount) { SetCudaDevice( device->DeviceNumber ); int batchNorm = (batchSize + BatchVectorLookupAndAddToTableCombine - 1) / BatchVectorLookupAndAddToTableCombine; float mult = multHandle.GetValue(); int outputChannel = 0; for (int j = 0; j < lookupCount; ++j) { dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D(blockCount, threadCount, batchNorm, lookupDimensions[j].VectorSize); VectorChannelLookupAndAddToTableKernel<<<blockCount, threadCount>>>(batchSize, GetRaw(inputHandle) + j, channelCount, GetRaw(lookupHandles[j]), lookupDimensions[j].VectorSize, mult, GetRaw(matrixHandle) + outputChannel, outputChannelsCount, batchNorm); outputChannel += lookupDimensions[j].VectorSize; } } } // namespace NeoML #endif // NEOML_USE_CUDA
8e32a037e43722129be9c1a8a072d9cea9d09543.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) #define JITIFY_ENABLE_EMBEDDED_FILES 1 #endif #define JITIFY_PRINT_INSTANTIATION 1 #define JITIFY_PRINT_SOURCE 1 #define JITIFY_PRINT_LOG 1 #define JITIFY_PRINT_PTX 1 #define JITIFY_PRINT_LINKER_LOG 1 #define JITIFY_PRINT_LAUNCH 1 #define JITIFY_PRINT_HEADER_PATHS 1 #include "jitify.hpp" #include "example_headers/class_arg_kernel.cuh" #include "example_headers/my_header1.cuh.jit" #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) JITIFY_INCLUDE_EMBEDDED_FILE(example_headers_my_header2_cuh); #endif #include "gtest/gtest.h" #include <cstdio> #include <fstream> #include <iostream> #include <memory> #define CHECK_CUDA(call) \ do { \ hipError_t status = call; \ if (status != hipSuccess) { \ const char* str; \ hipGetErrorName(status, &str); \ std::cout << "(CUDA) returned " << str; \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, hipSuccess); \ } \ } while (0) #define CHECK_CUDART(call) \ do { \ hipError_t status = call; \ if (status != hipSuccess) { \ std::cout << "(CUDART) returned " << hipGetErrorString(status); \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, hipSuccess); \ } \ } while (0) std::istream* file_callback(std::string filename, std::iostream& tmp_stream) { // User returns NULL or pointer to stream containing file source // Note: tmp_stream is provided for convenience if (filename == "example_headers/my_header4.cuh") { tmp_stream << "#pragma once\n" "template<typename T>\n" "T pointless_func(T x) {\n" " return x;\n" "}\n"; return &tmp_stream; } else { // Find this file through other mechanisms return 0; } } static const char* const simple_program_source = "my_program\n" "template<int N, typename T>\n" "__global__\n" "void my_kernel(T* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " T data0 = data[0];\n" " for( int i=0; i<N-1; ++i ) {\n" " data[0] *= data0;\n" " }\n" "}\n"; TEST(JitifyTest, Simple) { static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(simple_program_source); typedef float T; T* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); T h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(hipFree(d_data)); } TEST(JitifyTest, Simple_experimental) { std::vector<std::string> opts; jitify::experimental::Program program_orig(simple_program_source, {}, opts); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst_orig = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); auto kernel_inst = jitify::experimental::KernelInstantiation::deserialize( kernel_inst_orig.serialize()); T h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(hipFree(d_data)); } TEST(JitifyTest, DefaultConstructable) { // Same as Simple, but uses default + move constructors of Program, Kernel, // KernelInstantiation, and KernelLauncher classes. static jitify::JitCache kernel_cache; jitify::Program program; program = kernel_cache.program(simple_program_source); typedef float T; T* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; jitify::Kernel kernel; kernel = program.kernel("my_kernel"); jitify::KernelInstantiation kernel_inst; kernel_inst = kernel.instantiate(3, type_of(*d_data)); jitify::KernelLauncher kernel_launcher; kernel_launcher = kernel_inst.configure(grid, block); T h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_launcher.launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); kernel_launcher = kernel_inst.configure_1d_max_occupancy(); h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_launcher.launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(hipFree(d_data)); } static const char* const multiple_kernels_program_source = "my_program1\n" "#include \"example_headers/my_header1.cuh\"\n" "#include \"example_headers/my_header2.cuh\"\n" "#include \"example_headers/my_header3.cuh\"\n" "#include \"example_headers/my_header4.cuh\"\n" "\n" "__global__\n" "void my_kernel1(float const* indata, float* outdata) {\n" " outdata[0] = indata[0] + 1;\n" " outdata[0] -= 1;\n" "}\n" "\n" "template<int C, typename T>\n" "__global__\n" "void my_kernel2(float const* indata, float* outdata) {\n" " for( int i=0; i<C; ++i ) {\n" " outdata[0] = " "pointless_func(identity(sqrt(square(negate(indata[0])))));\n" " }\n" "}\n"; TEST(JitifyTest, MultipleKernels) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; thread_local static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I" CUDA_INC_DIR}, file_callback); typedef float T; T* indata; T* outdata; CHECK_CUDART(hipMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(hipMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(hipMemcpy(indata, &inval, sizeof(T), hipMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent and will come from cache after the 1st CHECK_CUDA((program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .configure(grid, block) .launch(indata, outdata))); CHECK_CUDA(program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(hipMemcpy(&outval, outdata, sizeof(T), hipMemcpyDeviceToHost)); CHECK_CUDART(hipFree(outdata)); CHECK_CUDART(hipFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } TEST(JitifyTest, MultipleKernels_experimental) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; jitify::experimental::Program program_orig( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I" CUDA_INC_DIR}, file_callback); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* indata; T* outdata; CHECK_CUDART(hipMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(hipMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(hipMemcpy(indata, &inval, sizeof(T), hipMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent. CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .serialize()) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(hipMemcpy(&outval, outdata, sizeof(T), hipMemcpyDeviceToHost)); CHECK_CUDART(hipFree(outdata)); CHECK_CUDART(hipFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } static const char* const constmem_program_source = "constmem_program\n" "#pragma once\n" "\n" "__constant__ int a;\n" "__device__ int d;\n" "namespace b { __constant__ int a; __device__ int d; }\n" "namespace c { namespace b { __constant__ int a; __device__ int d; } }\n" "namespace x { __constant__ int a = 3; __device__ int d = 7; }\n" "namespace y { __constant__ int a[] = {4, 5}; __device__ int d[] = {8, 9}; " "}\n" "\n" "__global__ void constant_test(int *x) {\n" " x[0] = a;\n" " x[1] = b::a;\n" " x[2] = c::b::a;\n" " x[3] = d;\n" " x[4] = b::d;\n" " x[5] = c::b::d;\n" " x[6] = x::a;\n" " x[7] = x::d;\n" " x[8] = y::a[0];\n" " x[9] = y::a[1];\n" " x[10] = y::d[0];\n" " x[11] = y::d[1];\n" "}\n"; TEST(JitifyTest, ConstantMemory) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; constexpr int n_const = 12; int* outdata; CHECK_CUDART(hipMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using diffrent namespaces jitify::Program program = kernel_cache.program( constmem_program_source, 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto instance = program.kernel("constant_test").instantiate(); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(hipDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::Program program = kernel_cache.program("example_headers/constant_header.cuh", 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto instance = program.kernel("constant_test2").instantiate(); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(hipFree(outdata)); } TEST(JitifyTest, ConstantMemory_experimental) { using jitify::reflection::Type; constexpr int n_const = 12; int* outdata; CHECK_CUDART(hipMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using different namespaces jitify::experimental::Program program_orig( constmem_program_source, {}, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test").instantiate().serialize()); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(hipDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::experimental::Program program_orig( "example_headers/constant_header.cuh", {}, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test2").instantiate().serialize()); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(hipFree(outdata)); } TEST(JitifyTest, ParallelFor) { int n = 10000; typedef float T; T* d_out; CHECK_CUDART(hipMalloc((void**)&d_out, n * sizeof(T))); T val = 3.14159f; jitify::ExecutionPolicy policy(jitify::DEVICE); auto lambda = JITIFY_LAMBDA((d_out, val), d_out[i] = (float)i * val); CHECK_CUDA(jitify::parallel_for(policy, 0, n, lambda)); std::vector<T> h_out(n); CHECK_CUDART( hipMemcpy(&h_out[0], d_out, n * sizeof(T), hipMemcpyDeviceToHost)); CHECK_CUDART(hipFree(d_out)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_out[i], (T)i * val); } } TEST(JitifyTest, InvalidPrograms) { jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program("empty_program\n"); // OK EXPECT_THROW(auto program_v2 = kernel_cache.program("missing_filename"), std::runtime_error); EXPECT_THROW( auto program_v3 = kernel_cache.program("bad_program\nNOT CUDA C!"), std::runtime_error); jitify::experimental::Program program_v4("empty_program\n"); // OK EXPECT_THROW(jitify::experimental::Program program_v5("missing_filename"), std::runtime_error); EXPECT_THROW( jitify::experimental::Program program_v6("bad_program\nNOT CUDA C!"), std::runtime_error); } static const char* const pragma_repl_program_source = R"(my_program template <int N, typename T> __global__ void my_kernel(T* data) { if (blockIdx.x != 0 || threadIdx.x != 0) return; T data0 = data[0]; #pragma unroll for (int i = 0; i < N - 1; ++i) data[0] *= data0; #pragma unroll 1 for (int i = 0; i < N - 1; ++i) data[0] *= data0; #pragma unroll 1 // Make sure parsing works with comments for (int i = 0; i < N - 1; ++i) data[0] *= data0; // TODO: Add support for block comments. //#pragma unroll 1 /* Make sure parsing works with comments */ //for (int i = 0; i < N - 1; ++i) data[0] *= data0; } )"; TEST(JitifyTest, PragmaReplacement) { static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(pragma_repl_program_source); typedef float T; T* d_data = nullptr; using jitify::reflection::type_of; auto kernel_inst = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); } // TODO: Expand this to include more Thrust code. static const char* const thrust_program_source = "thrust_program\n" "#include <thrust/iterator/counting_iterator.h>\n" "__global__ void my_kernel(thrust::counting_iterator<int> begin,\n" " thrust::counting_iterator<int> end) {\n" "}\n"; TEST(JitifyTest, ThrustHeaders) { // Checks that basic Thrust headers can be compiled. jitify::JitCache kernel_cache; #if TORCH_HIP_VERSION < 11000 const char* cppstd = "-std=c++98"; #else const char* cppstd = "-std=c++11"; #endif auto program_v1 = kernel_cache.program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, cppstd}); auto program_v2 = jitify::experimental::Program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, cppstd}); } static const char* const cub_program_source = "cub_program\n" "#include <hipcub/hipcub.hpp>\n" "#include <cub/block/block_radix_sort.cuh>\n" "#include <hipcub/hipcub.hpp>\n" "#include <cub/block/block_store.cuh>\n" "\n" "template<int BLOCK_SIZE, int PER_THREAD>\n" "__global__ void my_kernel(float* data) {\n" " typedef cub::BlockLoad<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_LOAD_VECTORIZE> BlockLoad;\n" " typedef cub::BlockRadixSort<float, BLOCK_SIZE, PER_THREAD>\n" " BlockSort;\n" " typedef hipcub::BlockReduce<float, BLOCK_SIZE> BlockReduce;\n" " typedef cub::BlockStore<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_STORE_VECTORIZE> BlockStore;\n" " __shared__ union {\n" " typename BlockLoad::TempStorage load;\n" " typename BlockSort::TempStorage sort;\n" " typename BlockReduce::TempStorage reduce;\n" " typename BlockStore::TempStorage store;\n" " float sum;\n" " } temp_storage;\n" " float thread_data[PER_THREAD];\n" " BlockLoad(temp_storage.load).Load(data, thread_data);\n" " __syncthreads();\n" " BlockSort(temp_storage.sort).Sort(thread_data);\n" " __syncthreads();\n" " float sum = BlockReduce(temp_storage.reduce).Sum(thread_data);\n" " __syncthreads();\n" " if (threadIdx.x == 0) {\n" " temp_storage.sum = sum;\n" " }\n" " __syncthreads();\n" " sum = temp_storage.sum;\n" " #pragma unroll\n" " for (int i = 0; i < PER_THREAD; ++i) {\n" " thread_data[i] *= 1.f / sum;\n" " }\n" " __syncthreads();\n" " BlockStore(temp_storage.store).Store(data, thread_data);\n" "}\n"; TEST(JitifyTest, CubBlockPrimitives) { int block_size = 64; int per_thread = 4; int n = block_size * per_thread; std::vector<float> h_data(n); float sum = 0; for (int i = 0; i < n; ++i) { // Start with values sorted in reverse. h_data[i] = (float)(n - 1 - i); sum += h_data[i]; } // Shuffle the values a bit. std::swap(h_data[3], h_data[7]); std::swap(h_data[10], h_data[20]); std::vector<float> h_expected(n); for (int i = 0; i < n; ++i) { // Expected sorted and normalized. h_expected[i] = (float)i / sum; } std::vector<float> h_result(n); float* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, n * sizeof(float))); jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}); CHECK_CUDART(hipMemcpy(d_data, h_data.data(), n * sizeof(float), hipMemcpyHostToDevice)); CHECK_CUDA(program_v1.kernel("my_kernel") .instantiate(block_size, per_thread) .configure(1, block_size) .launch(d_data)); CHECK_CUDART(hipMemcpy(h_result.data(), d_data, n * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } auto program_v2 = jitify::experimental::Program::deserialize( jitify::experimental::Program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}) .serialize()); auto kernel_inst_v2 = jitify::experimental::KernelInstantiation::deserialize( program_v2.kernel("my_kernel") .instantiate(block_size, per_thread) .serialize()); CHECK_CUDART(hipMemcpy(d_data, h_data.data(), n * sizeof(float), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, block_size).launch(d_data)); CHECK_CUDART(hipMemcpy(h_result.data(), d_data, n * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } CHECK_CUDART(hipFree(d_data)); } static const char* const unused_globals_source = "unused_globals_program\n" "struct Foo { static const int value = 7; };\n" "struct Bar { int a; double b; };\n" "__device__ float used_scalar;\n" "__device__ float used_array[2];\n" "__device__ Bar used_struct;\n" "__device__ float unused_scalar;\n" "__device__ float unused_array[3];\n" "__device__ Bar unused_struct;\n" "__device__ float reg, ret, bra;\n" // Tricky names "__global__ void foo_kernel(int* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " used_scalar = 1.f;\n" " used_array[1] = 2.f;\n" " used_struct.b = 3.f;\n" " __syncthreads();\n" " *data += Foo::value + used_scalar + used_array[1] + used_struct.b;\n" " printf(\"*data = %i\\n\", *data);\n" // Produces global symbols named // $str "}\n"; TEST(JitifyTest, RemoveUnusedGlobals) { hipFree(0); auto program_v2 = jitify::experimental::Program( unused_globals_source, {}, // Note: Flag added twice to test handling of repeats. {"-remove-unused-globals", "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("foo_kernel").instantiate(); std::string ptx = kernel_inst_v2.ptx(); EXPECT_TRUE(ptx.find(".global .align 4 .f32 used_scalar;") != std::string::npos); // Note: PTX represents arrays and structs as .b8 instead of the actual type. EXPECT_TRUE(ptx.find(".global .align 4 .b8 used_array[8];") != std::string::npos); EXPECT_TRUE(ptx.find(".global .align 8 .b8 used_struct[16];") != std::string::npos); EXPECT_FALSE(ptx.find("_ZN3Foo5valueE") != std::string::npos); EXPECT_FALSE(ptx.find("unused_scalar;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_array;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_struct;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 reg;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 ret;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 bra;") != std::string::npos); int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( hipMemcpy(d_data, &h_data, sizeof(int), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(h_data, 16); CHECK_CUDART(hipFree(d_data)); } static const char* const curand_program_source = "curand_program\n" "#include <hiprand/hiprand_kernel.h>\n" "__global__ void my_kernel() {}\n" "\n"; TEST(JitifyTest, CuRandKernel) { auto program_v2 = jitify::experimental::Program( curand_program_source, {}, // Note: --remove-unused-globals is added to remove huge precomputed // arrays that come from CURAND. {"-I" CUDA_INC_DIR, "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); // TODO: Expand this test to actually call hiprand kernels and check outputs. } static const char* const linktest_program1_source = "linktest_program1\n" "__constant__ int c = 5;\n" "__device__ int d = 7;\n" "__device__ int f(int i) { return i + 11; }\n" "\n"; static const char* const linktest_program2_source = "linktest_program2\n" "extern __constant__ int c;\n" "extern __device__ int d;\n" "extern __device__ int f(int);\n" "__global__ void my_kernel(int* data) {\n" " *data = f(*data + c + d);\n" "}\n" "\n"; TEST(JitifyTest, LinkExternalFiles) { hipFree(0); // Ensure temporary file is deleted at the end. std::unique_ptr<const char, int (*)(const char*)> ptx_filename( "example_headers/linktest.ptx", std::remove); { std::ofstream ptx_file(ptx_filename.get()); ptx_file.exceptions(std::ofstream::failbit | std::ofstream::badbit); ptx_file << jitify::experimental::Program(linktest_program1_source, {}, {"-rdc=true"}) .kernel("") .instantiate() .ptx(); } auto program_v2 = jitify::experimental::Program( linktest_program2_source, {}, {"-rdc=true", "-Lexample_headers", "-llinktest.ptx"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( hipMemcpy(d_data, &h_data, sizeof(int), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(h_data, 26); CHECK_CUDART(hipFree(d_data)); } namespace a { __host__ __device__ int external_device_func(int i) { return i + 1; } } // namespace a static const char* const selflink_program_source = "selflink_program\n" "namespace a {\n" "extern __device__ int external_device_func(int);\n" "}\n" "__global__ void my_kernel(int* data) {\n" " *data = a::external_device_func(*data);\n" "}\n" "\n"; TEST(JitifyTest, LinkCurrentExecutable) { hipFree(0); using namespace jitify::experimental; auto program = Program(selflink_program_source, {}, {"-l."}); auto kernel_inst = program.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( hipMemcpy(d_data, &h_data, sizeof(int), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(1, 1).launch(d_data)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(h_data, 4); CHECK_CUDART(hipFree(d_data)); } static const char* const reflection_program_source = "reflection_program\n" "struct Base { virtual ~Base() {} };\n" "template <typename T>\n" "struct Derived : public Base {};\n" "template<typename T>\n" "__global__ void type_kernel() {}\n" "template<unsigned short N>\n" "__global__ void nontype_kernel() {}\n" "\n"; struct Base { virtual ~Base() {} }; template <typename T> struct Derived : public Base {}; TEST(JitifyTest, Reflection) { hipFree(0); using namespace jitify::experimental; using jitify::reflection::instance_of; Program program(reflection_program_source); auto type_kernel = program.kernel("type_kernel"); #define JITIFY_TYPE_REFLECTION_TEST(T) \ EXPECT_EQ(type_kernel.instantiate<T>().mangled_name(), \ type_kernel.instantiate({#T}).mangled_name()) JITIFY_TYPE_REFLECTION_TEST(const volatile float); JITIFY_TYPE_REFLECTION_TEST(const volatile float*); JITIFY_TYPE_REFLECTION_TEST(const volatile float&); JITIFY_TYPE_REFLECTION_TEST(Base * (const volatile float)); JITIFY_TYPE_REFLECTION_TEST(const volatile float[4]); #undef JITIFY_TYPE_REFLECTION_TEST typedef Derived<float> derived_type; const Base& base = derived_type(); EXPECT_EQ(type_kernel.instantiate(instance_of(base)).mangled_name(), type_kernel.instantiate<derived_type>().mangled_name()); auto nontype_kernel = program.kernel("nontype_kernel"); #define JITIFY_NONTYPE_REFLECTION_TEST(N) \ EXPECT_EQ(nontype_kernel.instantiate(N).mangled_name(), \ nontype_kernel.instantiate({#N}).mangled_name()) JITIFY_NONTYPE_REFLECTION_TEST(7); JITIFY_NONTYPE_REFLECTION_TEST('J'); #undef JITIFY_NONTYPE_REFLECTION_TEST } static const char* const builtin_numeric_limits_program_source = "builtin_numeric_limits_program\n" "#include <limits>\n" "struct MyType {};\n" "namespace std {\n" "template<> class numeric_limits<MyType> {\n" " public:\n" " static MyType min() { return {}; }\n" " static MyType max() { return {}; }\n" "};\n" "} // namespace std\n" "template <typename T>\n" "__global__ void my_kernel(T* data) {\n" " data[0] = std::numeric_limits<T>::min();\n" " data[1] = std::numeric_limits<T>::max();\n" "}\n"; TEST(JitifyTest, BuiltinNumericLimitsHeader) { hipFree(0); using namespace jitify::experimental; auto program = Program(builtin_numeric_limits_program_source); for (const auto& type : {"float", "double", "char", "signed char", "unsigned char", "short", "unsigned short", "int", "unsigned int", "long", "unsigned long", "long long", "unsigned long long", "MyType"}) { program.kernel("my_kernel").instantiate({type}); } } static const char* const builtin_numeric_cuda_std_limits_program_source = "builtin_numeric_cuda_std_limits_program\n" "#include <climits>\n" "#include <limits>\n" "#include <cuda/std/climits>\n" // test fails without this explicit include "#include <cuda/std/limits>\n" "struct MyType {};\n" "namespace cuda {\n" "namespace std {\n" "template<> class numeric_limits<MyType> {\n" " public:\n" " static MyType min() { return {}; }\n" " static MyType max() { return {}; }\n" "};\n" "} // namespace std\n" "} // namespace cuda\n" "template <typename T>\n" "__global__ void my_kernel(T* data) {\n" " data[0] = cuda::std::numeric_limits<T>::min();\n" " data[1] = cuda::std::numeric_limits<T>::max();\n" "}\n"; TEST(JitifyTest, BuiltinNumericCudaStdLimitsHeader) { hipFree(0); using namespace jitify::experimental; auto program = Program(builtin_numeric_cuda_std_limits_program_source, {}, {"-I" CUDA_INC_DIR}); for (const auto& type : {"float", "double", "char", "signed char", "unsigned char", "short", "unsigned short", "int", "unsigned int", "long", "unsigned long", "long long", "unsigned long long", "MyType"}) { program.kernel("my_kernel").instantiate({type}); } } TEST(JitifyTest, ClassKernelArg) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; int h_data; int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); dim3 grid(1); dim3 block(1); jitify::Program program = kernel_cache.program("example_headers/class_arg_kernel.cuh", 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); { // test that we can pass an arg object to a kernel Arg arg(-1); CHECK_CUDA(program.kernel("class_arg_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART(hipDeviceSynchronize()); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(arg.x, h_data); } { // test that we can pass an arg object rvalue to a kernel int value = -2; CHECK_CUDA(program.kernel("class_arg_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, Arg(value))); CHECK_CUDART(hipDeviceSynchronize()); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(value, h_data); } { // test that we can pass an arg object reference to a kernel Arg* arg = new Arg(-3); // references are passed as pointers since refernces are just pointers from // an ABI point of view CHECK_CUDA(program.kernel("class_arg_ref_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(arg->x, h_data); delete (arg); } { // test that we can pass an arg object reference to a kernel Arg* arg = new Arg(-4); CHECK_CUDA(program.kernel("class_arg_ptr_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(arg->x, h_data); delete (arg); } CHECK_CUDART(hipFree(d_data)); } static const char* const assert_program_source = R"( #include <cassert> __global__ void my_assert_kernel() { assert(0 == 1); } )"; static const char* const get_attribute_program_source = R"( __global__ void get_attribute_kernel(int *out, int *in) { __shared__ int buffer[4096]; buffer[threadIdx.x] = in[threadIdx.x]; __syncthreads(); out[threadIdx.y] = buffer[threadIdx.x]; } )"; TEST(JitifyTest, GetAttribute) { // Checks that we can get function attributes jitify::JitCache kernel_cache; auto program = kernel_cache.program(get_attribute_program_source, {}, {"-I" CUDA_INC_DIR}); auto instance = program.kernel("get_attribute_kernel").instantiate(); EXPECT_EQ(4096 * (int)sizeof(int), instance.get_func_attribute(hipFuncAttributeSharedSizeBytes)); } static const char* const set_attribute_program_source = R"( __global__ void set_attribute_kernel(int *out, int *in) { extern __shared__ int buffer[]; buffer[threadIdx.x] = in[threadIdx.x]; __syncthreads(); out[threadIdx.y] = buffer[threadIdx.x]; } )"; TEST(JitifyTest, SetAttribute) { // Checks that we can set function attributes jitify::JitCache kernel_cache; int* in; CHECK_CUDART(hipMalloc((void**)&in, sizeof(int))); int* out; CHECK_CUDART(hipMalloc((void**)&out, sizeof(int))); // query the maximum supported shared bytes per block hipDevice_t device; CHECK_CUDA(hipDeviceGet(&device, 0)); int shared_bytes; CHECK_CUDA(hipDeviceGetAttribute( &shared_bytes, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, device)); auto program = kernel_cache.program(set_attribute_program_source, {}, {"-I" CUDA_INC_DIR}); auto instance = program.kernel("set_attribute_kernel").instantiate(); instance.set_func_attribute(CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_bytes); dim3 grid(1); dim3 block(1); // this kernel will fail on Volta+ unless the set attribute succeeded CHECK_CUDA(instance.configure(grid, block, shared_bytes).launch(out, in)); CHECK_CUDART(hipFree(out)); CHECK_CUDART(hipFree(in)); } TEST(JitifyTest, EnvVarOptions) { setenv("JITIFY_OPTIONS", "-bad_option", true); EXPECT_THROW(jitify::JitCache kernel_cache; auto program = kernel_cache.program(simple_program_source), std::runtime_error); EXPECT_THROW(jitify::experimental::Program program(simple_program_source), std::runtime_error); setenv("JITIFY_OPTIONS", "", true); } // NOTE: This MUST be the last test in the file, due to sticky CUDA error. TEST(JitifyTest, AssertHeader) { // Checks that cassert works as expected jitify::JitCache kernel_cache; auto program = kernel_cache.program(assert_program_source, {}, {"-I" CUDA_INC_DIR}); dim3 grid(1); dim3 block(1); CHECK_CUDA((program.kernel("my_assert_kernel") .instantiate<>() .configure(grid, block) .launch())); }
8e32a037e43722129be9c1a8a072d9cea9d09543.cu
/* * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) #define JITIFY_ENABLE_EMBEDDED_FILES 1 #endif #define JITIFY_PRINT_INSTANTIATION 1 #define JITIFY_PRINT_SOURCE 1 #define JITIFY_PRINT_LOG 1 #define JITIFY_PRINT_PTX 1 #define JITIFY_PRINT_LINKER_LOG 1 #define JITIFY_PRINT_LAUNCH 1 #define JITIFY_PRINT_HEADER_PATHS 1 #include "jitify.hpp" #include "example_headers/class_arg_kernel.cuh" #include "example_headers/my_header1.cuh.jit" #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) JITIFY_INCLUDE_EMBEDDED_FILE(example_headers_my_header2_cuh); #endif #include "gtest/gtest.h" #include <cstdio> #include <fstream> #include <iostream> #include <memory> #define CHECK_CUDA(call) \ do { \ CUresult status = call; \ if (status != CUDA_SUCCESS) { \ const char* str; \ cuGetErrorName(status, &str); \ std::cout << "(CUDA) returned " << str; \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, CUDA_SUCCESS); \ } \ } while (0) #define CHECK_CUDART(call) \ do { \ cudaError_t status = call; \ if (status != cudaSuccess) { \ std::cout << "(CUDART) returned " << cudaGetErrorString(status); \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, cudaSuccess); \ } \ } while (0) std::istream* file_callback(std::string filename, std::iostream& tmp_stream) { // User returns NULL or pointer to stream containing file source // Note: tmp_stream is provided for convenience if (filename == "example_headers/my_header4.cuh") { tmp_stream << "#pragma once\n" "template<typename T>\n" "T pointless_func(T x) {\n" " return x;\n" "}\n"; return &tmp_stream; } else { // Find this file through other mechanisms return 0; } } static const char* const simple_program_source = "my_program\n" "template<int N, typename T>\n" "__global__\n" "void my_kernel(T* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " T data0 = data[0];\n" " for( int i=0; i<N-1; ++i ) {\n" " data[0] *= data0;\n" " }\n" "}\n"; TEST(JitifyTest, Simple) { static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(simple_program_source); typedef float T; T* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); T h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(cudaFree(d_data)); } TEST(JitifyTest, Simple_experimental) { std::vector<std::string> opts; jitify::experimental::Program program_orig(simple_program_source, {}, opts); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst_orig = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); auto kernel_inst = jitify::experimental::KernelInstantiation::deserialize( kernel_inst_orig.serialize()); T h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(cudaFree(d_data)); } TEST(JitifyTest, DefaultConstructable) { // Same as Simple, but uses default + move constructors of Program, Kernel, // KernelInstantiation, and KernelLauncher classes. static jitify::JitCache kernel_cache; jitify::Program program; program = kernel_cache.program(simple_program_source); typedef float T; T* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; jitify::Kernel kernel; kernel = program.kernel("my_kernel"); jitify::KernelInstantiation kernel_inst; kernel_inst = kernel.instantiate(3, type_of(*d_data)); jitify::KernelLauncher kernel_launcher; kernel_launcher = kernel_inst.configure(grid, block); T h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_launcher.launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); kernel_launcher = kernel_inst.configure_1d_max_occupancy(); h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_launcher.launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(cudaFree(d_data)); } static const char* const multiple_kernels_program_source = "my_program1\n" "#include \"example_headers/my_header1.cuh\"\n" "#include \"example_headers/my_header2.cuh\"\n" "#include \"example_headers/my_header3.cuh\"\n" "#include \"example_headers/my_header4.cuh\"\n" "\n" "__global__\n" "void my_kernel1(float const* indata, float* outdata) {\n" " outdata[0] = indata[0] + 1;\n" " outdata[0] -= 1;\n" "}\n" "\n" "template<int C, typename T>\n" "__global__\n" "void my_kernel2(float const* indata, float* outdata) {\n" " for( int i=0; i<C; ++i ) {\n" " outdata[0] = " "pointless_func(identity(sqrt(square(negate(indata[0])))));\n" " }\n" "}\n"; TEST(JitifyTest, MultipleKernels) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; thread_local static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I" CUDA_INC_DIR}, file_callback); typedef float T; T* indata; T* outdata; CHECK_CUDART(cudaMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(cudaMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(cudaMemcpy(indata, &inval, sizeof(T), cudaMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent and will come from cache after the 1st CHECK_CUDA((program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .configure(grid, block) .launch(indata, outdata))); CHECK_CUDA(program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(cudaMemcpy(&outval, outdata, sizeof(T), cudaMemcpyDeviceToHost)); CHECK_CUDART(cudaFree(outdata)); CHECK_CUDART(cudaFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } TEST(JitifyTest, MultipleKernels_experimental) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; jitify::experimental::Program program_orig( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I" CUDA_INC_DIR}, file_callback); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* indata; T* outdata; CHECK_CUDART(cudaMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(cudaMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(cudaMemcpy(indata, &inval, sizeof(T), cudaMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent. CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .serialize()) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(cudaMemcpy(&outval, outdata, sizeof(T), cudaMemcpyDeviceToHost)); CHECK_CUDART(cudaFree(outdata)); CHECK_CUDART(cudaFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } static const char* const constmem_program_source = "constmem_program\n" "#pragma once\n" "\n" "__constant__ int a;\n" "__device__ int d;\n" "namespace b { __constant__ int a; __device__ int d; }\n" "namespace c { namespace b { __constant__ int a; __device__ int d; } }\n" "namespace x { __constant__ int a = 3; __device__ int d = 7; }\n" "namespace y { __constant__ int a[] = {4, 5}; __device__ int d[] = {8, 9}; " "}\n" "\n" "__global__ void constant_test(int *x) {\n" " x[0] = a;\n" " x[1] = b::a;\n" " x[2] = c::b::a;\n" " x[3] = d;\n" " x[4] = b::d;\n" " x[5] = c::b::d;\n" " x[6] = x::a;\n" " x[7] = x::d;\n" " x[8] = y::a[0];\n" " x[9] = y::a[1];\n" " x[10] = y::d[0];\n" " x[11] = y::d[1];\n" "}\n"; TEST(JitifyTest, ConstantMemory) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; constexpr int n_const = 12; int* outdata; CHECK_CUDART(cudaMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using diffrent namespaces jitify::Program program = kernel_cache.program( constmem_program_source, 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto instance = program.kernel("constant_test").instantiate(); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(cudaDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::Program program = kernel_cache.program("example_headers/constant_header.cuh", 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto instance = program.kernel("constant_test2").instantiate(); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(cudaFree(outdata)); } TEST(JitifyTest, ConstantMemory_experimental) { using jitify::reflection::Type; constexpr int n_const = 12; int* outdata; CHECK_CUDART(cudaMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using different namespaces jitify::experimental::Program program_orig( constmem_program_source, {}, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test").instantiate().serialize()); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(cudaDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::experimental::Program program_orig( "example_headers/constant_header.cuh", {}, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test2").instantiate().serialize()); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(cudaFree(outdata)); } TEST(JitifyTest, ParallelFor) { int n = 10000; typedef float T; T* d_out; CHECK_CUDART(cudaMalloc((void**)&d_out, n * sizeof(T))); T val = 3.14159f; jitify::ExecutionPolicy policy(jitify::DEVICE); auto lambda = JITIFY_LAMBDA((d_out, val), d_out[i] = (float)i * val); CHECK_CUDA(jitify::parallel_for(policy, 0, n, lambda)); std::vector<T> h_out(n); CHECK_CUDART( cudaMemcpy(&h_out[0], d_out, n * sizeof(T), cudaMemcpyDeviceToHost)); CHECK_CUDART(cudaFree(d_out)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_out[i], (T)i * val); } } TEST(JitifyTest, InvalidPrograms) { jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program("empty_program\n"); // OK EXPECT_THROW(auto program_v2 = kernel_cache.program("missing_filename"), std::runtime_error); EXPECT_THROW( auto program_v3 = kernel_cache.program("bad_program\nNOT CUDA C!"), std::runtime_error); jitify::experimental::Program program_v4("empty_program\n"); // OK EXPECT_THROW(jitify::experimental::Program program_v5("missing_filename"), std::runtime_error); EXPECT_THROW( jitify::experimental::Program program_v6("bad_program\nNOT CUDA C!"), std::runtime_error); } static const char* const pragma_repl_program_source = R"(my_program template <int N, typename T> __global__ void my_kernel(T* data) { if (blockIdx.x != 0 || threadIdx.x != 0) return; T data0 = data[0]; #pragma unroll for (int i = 0; i < N - 1; ++i) data[0] *= data0; #pragma unroll 1 for (int i = 0; i < N - 1; ++i) data[0] *= data0; #pragma unroll 1 // Make sure parsing works with comments for (int i = 0; i < N - 1; ++i) data[0] *= data0; // TODO: Add support for block comments. //#pragma unroll 1 /* Make sure parsing works with comments */ //for (int i = 0; i < N - 1; ++i) data[0] *= data0; } )"; TEST(JitifyTest, PragmaReplacement) { static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(pragma_repl_program_source); typedef float T; T* d_data = nullptr; using jitify::reflection::type_of; auto kernel_inst = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); } // TODO: Expand this to include more Thrust code. static const char* const thrust_program_source = "thrust_program\n" "#include <thrust/iterator/counting_iterator.h>\n" "__global__ void my_kernel(thrust::counting_iterator<int> begin,\n" " thrust::counting_iterator<int> end) {\n" "}\n"; TEST(JitifyTest, ThrustHeaders) { // Checks that basic Thrust headers can be compiled. jitify::JitCache kernel_cache; #if CUDA_VERSION < 11000 const char* cppstd = "-std=c++98"; #else const char* cppstd = "-std=c++11"; #endif auto program_v1 = kernel_cache.program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, cppstd}); auto program_v2 = jitify::experimental::Program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, cppstd}); } static const char* const cub_program_source = "cub_program\n" "#include <cub/block/block_load.cuh>\n" "#include <cub/block/block_radix_sort.cuh>\n" "#include <cub/block/block_reduce.cuh>\n" "#include <cub/block/block_store.cuh>\n" "\n" "template<int BLOCK_SIZE, int PER_THREAD>\n" "__global__ void my_kernel(float* data) {\n" " typedef cub::BlockLoad<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_LOAD_VECTORIZE> BlockLoad;\n" " typedef cub::BlockRadixSort<float, BLOCK_SIZE, PER_THREAD>\n" " BlockSort;\n" " typedef cub::BlockReduce<float, BLOCK_SIZE> BlockReduce;\n" " typedef cub::BlockStore<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_STORE_VECTORIZE> BlockStore;\n" " __shared__ union {\n" " typename BlockLoad::TempStorage load;\n" " typename BlockSort::TempStorage sort;\n" " typename BlockReduce::TempStorage reduce;\n" " typename BlockStore::TempStorage store;\n" " float sum;\n" " } temp_storage;\n" " float thread_data[PER_THREAD];\n" " BlockLoad(temp_storage.load).Load(data, thread_data);\n" " __syncthreads();\n" " BlockSort(temp_storage.sort).Sort(thread_data);\n" " __syncthreads();\n" " float sum = BlockReduce(temp_storage.reduce).Sum(thread_data);\n" " __syncthreads();\n" " if (threadIdx.x == 0) {\n" " temp_storage.sum = sum;\n" " }\n" " __syncthreads();\n" " sum = temp_storage.sum;\n" " #pragma unroll\n" " for (int i = 0; i < PER_THREAD; ++i) {\n" " thread_data[i] *= 1.f / sum;\n" " }\n" " __syncthreads();\n" " BlockStore(temp_storage.store).Store(data, thread_data);\n" "}\n"; TEST(JitifyTest, CubBlockPrimitives) { int block_size = 64; int per_thread = 4; int n = block_size * per_thread; std::vector<float> h_data(n); float sum = 0; for (int i = 0; i < n; ++i) { // Start with values sorted in reverse. h_data[i] = (float)(n - 1 - i); sum += h_data[i]; } // Shuffle the values a bit. std::swap(h_data[3], h_data[7]); std::swap(h_data[10], h_data[20]); std::vector<float> h_expected(n); for (int i = 0; i < n; ++i) { // Expected sorted and normalized. h_expected[i] = (float)i / sum; } std::vector<float> h_result(n); float* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, n * sizeof(float))); jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}); CHECK_CUDART(cudaMemcpy(d_data, h_data.data(), n * sizeof(float), cudaMemcpyHostToDevice)); CHECK_CUDA(program_v1.kernel("my_kernel") .instantiate(block_size, per_thread) .configure(1, block_size) .launch(d_data)); CHECK_CUDART(cudaMemcpy(h_result.data(), d_data, n * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } auto program_v2 = jitify::experimental::Program::deserialize( jitify::experimental::Program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}) .serialize()); auto kernel_inst_v2 = jitify::experimental::KernelInstantiation::deserialize( program_v2.kernel("my_kernel") .instantiate(block_size, per_thread) .serialize()); CHECK_CUDART(cudaMemcpy(d_data, h_data.data(), n * sizeof(float), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, block_size).launch(d_data)); CHECK_CUDART(cudaMemcpy(h_result.data(), d_data, n * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } CHECK_CUDART(cudaFree(d_data)); } static const char* const unused_globals_source = "unused_globals_program\n" "struct Foo { static const int value = 7; };\n" "struct Bar { int a; double b; };\n" "__device__ float used_scalar;\n" "__device__ float used_array[2];\n" "__device__ Bar used_struct;\n" "__device__ float unused_scalar;\n" "__device__ float unused_array[3];\n" "__device__ Bar unused_struct;\n" "__device__ float reg, ret, bra;\n" // Tricky names "__global__ void foo_kernel(int* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " used_scalar = 1.f;\n" " used_array[1] = 2.f;\n" " used_struct.b = 3.f;\n" " __syncthreads();\n" " *data += Foo::value + used_scalar + used_array[1] + used_struct.b;\n" " printf(\"*data = %i\\n\", *data);\n" // Produces global symbols named // $str "}\n"; TEST(JitifyTest, RemoveUnusedGlobals) { cudaFree(0); auto program_v2 = jitify::experimental::Program( unused_globals_source, {}, // Note: Flag added twice to test handling of repeats. {"-remove-unused-globals", "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("foo_kernel").instantiate(); std::string ptx = kernel_inst_v2.ptx(); EXPECT_TRUE(ptx.find(".global .align 4 .f32 used_scalar;") != std::string::npos); // Note: PTX represents arrays and structs as .b8 instead of the actual type. EXPECT_TRUE(ptx.find(".global .align 4 .b8 used_array[8];") != std::string::npos); EXPECT_TRUE(ptx.find(".global .align 8 .b8 used_struct[16];") != std::string::npos); EXPECT_FALSE(ptx.find("_ZN3Foo5valueE") != std::string::npos); EXPECT_FALSE(ptx.find("unused_scalar;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_array;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_struct;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 reg;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 ret;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 bra;") != std::string::npos); int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( cudaMemcpy(d_data, &h_data, sizeof(int), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(h_data, 16); CHECK_CUDART(cudaFree(d_data)); } static const char* const curand_program_source = "curand_program\n" "#include <curand_kernel.h>\n" "__global__ void my_kernel() {}\n" "\n"; TEST(JitifyTest, CuRandKernel) { auto program_v2 = jitify::experimental::Program( curand_program_source, {}, // Note: --remove-unused-globals is added to remove huge precomputed // arrays that come from CURAND. {"-I" CUDA_INC_DIR, "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); // TODO: Expand this test to actually call curand kernels and check outputs. } static const char* const linktest_program1_source = "linktest_program1\n" "__constant__ int c = 5;\n" "__device__ int d = 7;\n" "__device__ int f(int i) { return i + 11; }\n" "\n"; static const char* const linktest_program2_source = "linktest_program2\n" "extern __constant__ int c;\n" "extern __device__ int d;\n" "extern __device__ int f(int);\n" "__global__ void my_kernel(int* data) {\n" " *data = f(*data + c + d);\n" "}\n" "\n"; TEST(JitifyTest, LinkExternalFiles) { cudaFree(0); // Ensure temporary file is deleted at the end. std::unique_ptr<const char, int (*)(const char*)> ptx_filename( "example_headers/linktest.ptx", std::remove); { std::ofstream ptx_file(ptx_filename.get()); ptx_file.exceptions(std::ofstream::failbit | std::ofstream::badbit); ptx_file << jitify::experimental::Program(linktest_program1_source, {}, {"-rdc=true"}) .kernel("") .instantiate() .ptx(); } auto program_v2 = jitify::experimental::Program( linktest_program2_source, {}, {"-rdc=true", "-Lexample_headers", "-llinktest.ptx"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( cudaMemcpy(d_data, &h_data, sizeof(int), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(h_data, 26); CHECK_CUDART(cudaFree(d_data)); } namespace a { __host__ __device__ int external_device_func(int i) { return i + 1; } } // namespace a static const char* const selflink_program_source = "selflink_program\n" "namespace a {\n" "extern __device__ int external_device_func(int);\n" "}\n" "__global__ void my_kernel(int* data) {\n" " *data = a::external_device_func(*data);\n" "}\n" "\n"; TEST(JitifyTest, LinkCurrentExecutable) { cudaFree(0); using namespace jitify::experimental; auto program = Program(selflink_program_source, {}, {"-l."}); auto kernel_inst = program.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( cudaMemcpy(d_data, &h_data, sizeof(int), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(1, 1).launch(d_data)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(h_data, 4); CHECK_CUDART(cudaFree(d_data)); } static const char* const reflection_program_source = "reflection_program\n" "struct Base { virtual ~Base() {} };\n" "template <typename T>\n" "struct Derived : public Base {};\n" "template<typename T>\n" "__global__ void type_kernel() {}\n" "template<unsigned short N>\n" "__global__ void nontype_kernel() {}\n" "\n"; struct Base { virtual ~Base() {} }; template <typename T> struct Derived : public Base {}; TEST(JitifyTest, Reflection) { cudaFree(0); using namespace jitify::experimental; using jitify::reflection::instance_of; Program program(reflection_program_source); auto type_kernel = program.kernel("type_kernel"); #define JITIFY_TYPE_REFLECTION_TEST(T) \ EXPECT_EQ(type_kernel.instantiate<T>().mangled_name(), \ type_kernel.instantiate({#T}).mangled_name()) JITIFY_TYPE_REFLECTION_TEST(const volatile float); JITIFY_TYPE_REFLECTION_TEST(const volatile float*); JITIFY_TYPE_REFLECTION_TEST(const volatile float&); JITIFY_TYPE_REFLECTION_TEST(Base * (const volatile float)); JITIFY_TYPE_REFLECTION_TEST(const volatile float[4]); #undef JITIFY_TYPE_REFLECTION_TEST typedef Derived<float> derived_type; const Base& base = derived_type(); EXPECT_EQ(type_kernel.instantiate(instance_of(base)).mangled_name(), type_kernel.instantiate<derived_type>().mangled_name()); auto nontype_kernel = program.kernel("nontype_kernel"); #define JITIFY_NONTYPE_REFLECTION_TEST(N) \ EXPECT_EQ(nontype_kernel.instantiate(N).mangled_name(), \ nontype_kernel.instantiate({#N}).mangled_name()) JITIFY_NONTYPE_REFLECTION_TEST(7); JITIFY_NONTYPE_REFLECTION_TEST('J'); #undef JITIFY_NONTYPE_REFLECTION_TEST } static const char* const builtin_numeric_limits_program_source = "builtin_numeric_limits_program\n" "#include <limits>\n" "struct MyType {};\n" "namespace std {\n" "template<> class numeric_limits<MyType> {\n" " public:\n" " static MyType min() { return {}; }\n" " static MyType max() { return {}; }\n" "};\n" "} // namespace std\n" "template <typename T>\n" "__global__ void my_kernel(T* data) {\n" " data[0] = std::numeric_limits<T>::min();\n" " data[1] = std::numeric_limits<T>::max();\n" "}\n"; TEST(JitifyTest, BuiltinNumericLimitsHeader) { cudaFree(0); using namespace jitify::experimental; auto program = Program(builtin_numeric_limits_program_source); for (const auto& type : {"float", "double", "char", "signed char", "unsigned char", "short", "unsigned short", "int", "unsigned int", "long", "unsigned long", "long long", "unsigned long long", "MyType"}) { program.kernel("my_kernel").instantiate({type}); } } static const char* const builtin_numeric_cuda_std_limits_program_source = "builtin_numeric_cuda_std_limits_program\n" "#include <climits>\n" "#include <limits>\n" "#include <cuda/std/climits>\n" // test fails without this explicit include "#include <cuda/std/limits>\n" "struct MyType {};\n" "namespace cuda {\n" "namespace std {\n" "template<> class numeric_limits<MyType> {\n" " public:\n" " static MyType min() { return {}; }\n" " static MyType max() { return {}; }\n" "};\n" "} // namespace std\n" "} // namespace cuda\n" "template <typename T>\n" "__global__ void my_kernel(T* data) {\n" " data[0] = cuda::std::numeric_limits<T>::min();\n" " data[1] = cuda::std::numeric_limits<T>::max();\n" "}\n"; TEST(JitifyTest, BuiltinNumericCudaStdLimitsHeader) { cudaFree(0); using namespace jitify::experimental; auto program = Program(builtin_numeric_cuda_std_limits_program_source, {}, {"-I" CUDA_INC_DIR}); for (const auto& type : {"float", "double", "char", "signed char", "unsigned char", "short", "unsigned short", "int", "unsigned int", "long", "unsigned long", "long long", "unsigned long long", "MyType"}) { program.kernel("my_kernel").instantiate({type}); } } TEST(JitifyTest, ClassKernelArg) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; int h_data; int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); dim3 grid(1); dim3 block(1); jitify::Program program = kernel_cache.program("example_headers/class_arg_kernel.cuh", 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); { // test that we can pass an arg object to a kernel Arg arg(-1); CHECK_CUDA(program.kernel("class_arg_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART(cudaDeviceSynchronize()); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(arg.x, h_data); } { // test that we can pass an arg object rvalue to a kernel int value = -2; CHECK_CUDA(program.kernel("class_arg_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, Arg(value))); CHECK_CUDART(cudaDeviceSynchronize()); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(value, h_data); } { // test that we can pass an arg object reference to a kernel Arg* arg = new Arg(-3); // references are passed as pointers since refernces are just pointers from // an ABI point of view CHECK_CUDA(program.kernel("class_arg_ref_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(arg->x, h_data); delete (arg); } { // test that we can pass an arg object reference to a kernel Arg* arg = new Arg(-4); CHECK_CUDA(program.kernel("class_arg_ptr_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(arg->x, h_data); delete (arg); } CHECK_CUDART(cudaFree(d_data)); } static const char* const assert_program_source = R"( #include <cassert> __global__ void my_assert_kernel() { assert(0 == 1); } )"; static const char* const get_attribute_program_source = R"( __global__ void get_attribute_kernel(int *out, int *in) { __shared__ int buffer[4096]; buffer[threadIdx.x] = in[threadIdx.x]; __syncthreads(); out[threadIdx.y] = buffer[threadIdx.x]; } )"; TEST(JitifyTest, GetAttribute) { // Checks that we can get function attributes jitify::JitCache kernel_cache; auto program = kernel_cache.program(get_attribute_program_source, {}, {"-I" CUDA_INC_DIR}); auto instance = program.kernel("get_attribute_kernel").instantiate(); EXPECT_EQ(4096 * (int)sizeof(int), instance.get_func_attribute(CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES)); } static const char* const set_attribute_program_source = R"( __global__ void set_attribute_kernel(int *out, int *in) { extern __shared__ int buffer[]; buffer[threadIdx.x] = in[threadIdx.x]; __syncthreads(); out[threadIdx.y] = buffer[threadIdx.x]; } )"; TEST(JitifyTest, SetAttribute) { // Checks that we can set function attributes jitify::JitCache kernel_cache; int* in; CHECK_CUDART(cudaMalloc((void**)&in, sizeof(int))); int* out; CHECK_CUDART(cudaMalloc((void**)&out, sizeof(int))); // query the maximum supported shared bytes per block CUdevice device; CHECK_CUDA(cuDeviceGet(&device, 0)); int shared_bytes; CHECK_CUDA(cuDeviceGetAttribute( &shared_bytes, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, device)); auto program = kernel_cache.program(set_attribute_program_source, {}, {"-I" CUDA_INC_DIR}); auto instance = program.kernel("set_attribute_kernel").instantiate(); instance.set_func_attribute(CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_bytes); dim3 grid(1); dim3 block(1); // this kernel will fail on Volta+ unless the set attribute succeeded CHECK_CUDA(instance.configure(grid, block, shared_bytes).launch(out, in)); CHECK_CUDART(cudaFree(out)); CHECK_CUDART(cudaFree(in)); } TEST(JitifyTest, EnvVarOptions) { setenv("JITIFY_OPTIONS", "-bad_option", true); EXPECT_THROW(jitify::JitCache kernel_cache; auto program = kernel_cache.program(simple_program_source), std::runtime_error); EXPECT_THROW(jitify::experimental::Program program(simple_program_source), std::runtime_error); setenv("JITIFY_OPTIONS", "", true); } // NOTE: This MUST be the last test in the file, due to sticky CUDA error. TEST(JitifyTest, AssertHeader) { // Checks that cassert works as expected jitify::JitCache kernel_cache; auto program = kernel_cache.program(assert_program_source, {}, {"-I" CUDA_INC_DIR}); dim3 grid(1); dim3 block(1); CHECK_CUDA((program.kernel("my_assert_kernel") .instantiate<>() .configure(grid, block) .launch())); }
6a70ae09879d22eaa60d96d05be721a4b508744f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "highlevel/CascadedCommon.h" #include "DeltaGPU.h" #include "common.h" #include "type_macros.h" #include <cassert> #include <limits> namespace nvcomp { /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr int const BLOCK_SIZE = 1024; } // namespace /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ namespace { template <typename VALUE> __global__ void deltaKernel( VALUE** const outputPtr, const VALUE* const input, const size_t* const numDevice, const size_t /* maxNum */) { const size_t num = *numDevice; if (BLOCK_SIZE * blockIdx.x < num) { VALUE* const output = *outputPtr; const int idx = threadIdx.x + BLOCK_SIZE * blockIdx.x; __shared__ VALUE buffer[BLOCK_SIZE + 1]; if (idx < num) { buffer[threadIdx.x + 1] = input[idx]; } if (threadIdx.x == 0) { // first thread must do something special if (idx > 0) { buffer[0] = input[idx - 1]; } else { buffer[0] = 0; } } __syncthreads(); if (idx < num) { output[idx] = buffer[threadIdx.x + 1] - buffer[threadIdx.x]; } } } } // namespace /****************************************************************************** * HELPER FUNCTIONS *********************************************************** *****************************************************************************/ namespace { template <typename VALUE> void deltaLaunch( void** const outPtr, void const* const in, const size_t* const numDevice, const size_t maxNum, hipStream_t stream) { VALUE** const outTypedPtr = reinterpret_cast<VALUE**>(outPtr); const VALUE* const inTyped = static_cast<const VALUE*>(in); const dim3 block(BLOCK_SIZE); const dim3 grid(roundUpDiv(maxNum, BLOCK_SIZE)); hipLaunchKernelGGL(( deltaKernel), dim3(grid), dim3(block), 0, stream, outTypedPtr, inTyped, numDevice, maxNum); hipError_t err = hipGetLastError(); if (err != hipSuccess) { throw std::runtime_error( "Failed to launch deltaKernel kernel: " + std::to_string(err)); } } } // namespace /****************************************************************************** * PUBLIC STATIC METHODS ****************************************************** *****************************************************************************/ void DeltaGPU::compress( void* const /* workspace */, const size_t /* workspaceSize*/, const nvcompType_t inType, void** const outPtr, const void* const in, const size_t* const numDevice, const size_t maxNum, hipStream_t stream) { NVCOMP_TYPE_ONE_SWITCH( inType, deltaLaunch, outPtr, in, numDevice, maxNum, stream); } size_t DeltaGPU::requiredWorkspaceSize( const size_t /*num*/, const nvcompType_t /* type */) { return 0; } } // namespace nvcomp
6a70ae09879d22eaa60d96d05be721a4b508744f.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "highlevel/CascadedCommon.h" #include "DeltaGPU.h" #include "common.h" #include "type_macros.h" #include <cassert> #include <limits> namespace nvcomp { /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr int const BLOCK_SIZE = 1024; } // namespace /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ namespace { template <typename VALUE> __global__ void deltaKernel( VALUE** const outputPtr, const VALUE* const input, const size_t* const numDevice, const size_t /* maxNum */) { const size_t num = *numDevice; if (BLOCK_SIZE * blockIdx.x < num) { VALUE* const output = *outputPtr; const int idx = threadIdx.x + BLOCK_SIZE * blockIdx.x; __shared__ VALUE buffer[BLOCK_SIZE + 1]; if (idx < num) { buffer[threadIdx.x + 1] = input[idx]; } if (threadIdx.x == 0) { // first thread must do something special if (idx > 0) { buffer[0] = input[idx - 1]; } else { buffer[0] = 0; } } __syncthreads(); if (idx < num) { output[idx] = buffer[threadIdx.x + 1] - buffer[threadIdx.x]; } } } } // namespace /****************************************************************************** * HELPER FUNCTIONS *********************************************************** *****************************************************************************/ namespace { template <typename VALUE> void deltaLaunch( void** const outPtr, void const* const in, const size_t* const numDevice, const size_t maxNum, cudaStream_t stream) { VALUE** const outTypedPtr = reinterpret_cast<VALUE**>(outPtr); const VALUE* const inTyped = static_cast<const VALUE*>(in); const dim3 block(BLOCK_SIZE); const dim3 grid(roundUpDiv(maxNum, BLOCK_SIZE)); deltaKernel<<<grid, block, 0, stream>>>( outTypedPtr, inTyped, numDevice, maxNum); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { throw std::runtime_error( "Failed to launch deltaKernel kernel: " + std::to_string(err)); } } } // namespace /****************************************************************************** * PUBLIC STATIC METHODS ****************************************************** *****************************************************************************/ void DeltaGPU::compress( void* const /* workspace */, const size_t /* workspaceSize*/, const nvcompType_t inType, void** const outPtr, const void* const in, const size_t* const numDevice, const size_t maxNum, cudaStream_t stream) { NVCOMP_TYPE_ONE_SWITCH( inType, deltaLaunch, outPtr, in, numDevice, maxNum, stream); } size_t DeltaGPU::requiredWorkspaceSize( const size_t /*num*/, const nvcompType_t /* type */) { return 0; } } // namespace nvcomp
b3f31a5f465e7953cc1c2974fbcb7e83b1e8a06f.hip
// !!! This is a file automatically generated by hipify!!! /*$Id: main.cu 755 2009-11-18 13:22:54Z wenbinor $*/ /** *This is the source code for Mars, a MapReduce framework on graphics *processors. *Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia) *Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com). *If you have any question on the code, please contact us at * wenbin@cse.ust.hk or savenhe@microsoft.com * *The license is a free non-exclusive, non-transferable license to reproduce, *use, modify and display the source code version of the Software, with or *without modifications solely for non-commercial research, educational or *evaluation purposes. The license does not entitle Licensee to technical support, *telephone assistance, enhancements or updates to the Software. All rights, title *to and ownership interest in Mars, including all intellectual property rights *therein shall remain in HKUST. */ /****************************************************************** *WordCount (WC): It counts the number of occurrences for each word in a file. Each Map * task processes a portion of the input file and emits intermediate data pairs, each of which consists * of a word as the key and a value of 1 for the occurrence. Group is required, and no reduce is * needed, because the Mars runtime provides the size of each group, after the Group stage. ******************************************************************/ #include "MarsInc.h" #include "global.h" #include <ctype.h> #define __OUTPUT__ void validate(char* h_filebuf, Spec_t* spec, int num) { char* key = (char*)spec->outputKeys; char* val = (char*)spec->outputVals; int4* offsetSizes = (int4*)spec->outputOffsetSizes; int2* range = (int2*)spec->outputKeyListRange; printf("# of words:%d\n", spec->outputDiffKeyCount); if (num > spec->outputDiffKeyCount) num = spec->outputDiffKeyCount; for (int i = 0; i < num; i++) { int keyOffset = offsetSizes[range[i].x].x; int valOffset = offsetSizes[range[i].x].z; char* word = key + keyOffset; int wordsize = *(int*)(val + valOffset); printf("%s - size: %d - count: %d\n", word, wordsize, range[i].y - range[i].x); } } //----------------------------------------------------------------------- //usage: WordCount datafile //param: datafile //----------------------------------------------------------------------- int main( int argc, char** argv) { if (argc != 2) { printf("usage: %s datafile\n", argv[0]); exit(-1); } Spec_t *spec = GetDefaultSpec(); spec->workflow = MAP_GROUP; #ifdef __OUTPUT__ spec->outputToHost = 1; #endif TimeVal_t allTimer; startTimer(&allTimer); TimeVal_t preTimer; startTimer(&preTimer); FILE* fp = fopen(argv[1], "r"); fseek(fp, 0, SEEK_END); int fileSize = ftell(fp) + 1; rewind(fp); char* h_filebuf = (char*)malloc(fileSize); char* d_filebuf = NULL; fread(h_filebuf, fileSize, 1, fp); CUDA_SAFE_CALL(hipMalloc((void**)&d_filebuf, fileSize)); fclose(fp); WC_KEY_T key; key.file = d_filebuf; for (int i = 0; i < fileSize; i++) h_filebuf[i] = toupper(h_filebuf[i]); WC_VAL_T val; int offset = 0; char* p = h_filebuf; char* start = h_filebuf; while (1) { int blockSize = 2048; if (offset + blockSize > fileSize) blockSize = fileSize - offset; p += blockSize; for (; *p >= 'A' && *p <= 'Z'; p++); if (*p != '\0') { *p = '\0'; ++p; blockSize = (int)(p - start); val.line_offset = offset; val.line_size = blockSize; AddMapInputRecord(spec, &key, &val, sizeof(WC_KEY_T), sizeof(WC_VAL_T)); offset += blockSize; start = p; } else { *p = '\0'; blockSize = (int)(fileSize - offset); val.line_offset = offset; val.line_size = blockSize; AddMapInputRecord(spec, &key, &val, sizeof(WC_KEY_T), sizeof(WC_VAL_T)); break; } } CUDA_SAFE_CALL(hipMemcpy(d_filebuf, h_filebuf, fileSize, hipMemcpyHostToDevice)); endTimer("preprocess", &preTimer); //---------------------------------------------- //map/reduce //---------------------------------------------- MapReduce(spec); endTimer("all", &allTimer); //---------------------------------------------- //further processing //---------------------------------------------- #ifdef __OUTPUT__ CUDA_SAFE_CALL(hipMemcpy(h_filebuf, d_filebuf, fileSize, hipMemcpyDeviceToHost)); validate(h_filebuf, spec, 10); #endif //---------------------------------------------- //finish //---------------------------------------------- FinishMapReduce(spec); hipFree(d_filebuf); free(h_filebuf); return 0; }
b3f31a5f465e7953cc1c2974fbcb7e83b1e8a06f.cu
/*$Id: main.cu 755 2009-11-18 13:22:54Z wenbinor $*/ /** *This is the source code for Mars, a MapReduce framework on graphics *processors. *Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia) *Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com). *If you have any question on the code, please contact us at * wenbin@cse.ust.hk or savenhe@microsoft.com * *The license is a free non-exclusive, non-transferable license to reproduce, *use, modify and display the source code version of the Software, with or *without modifications solely for non-commercial research, educational or *evaluation purposes. The license does not entitle Licensee to technical support, *telephone assistance, enhancements or updates to the Software. All rights, title *to and ownership interest in Mars, including all intellectual property rights *therein shall remain in HKUST. */ /****************************************************************** *WordCount (WC): It counts the number of occurrences for each word in a file. Each Map * task processes a portion of the input file and emits intermediate data pairs, each of which consists * of a word as the key and a value of 1 for the occurrence. Group is required, and no reduce is * needed, because the Mars runtime provides the size of each group, after the Group stage. ******************************************************************/ #include "MarsInc.h" #include "global.h" #include <ctype.h> #define __OUTPUT__ void validate(char* h_filebuf, Spec_t* spec, int num) { char* key = (char*)spec->outputKeys; char* val = (char*)spec->outputVals; int4* offsetSizes = (int4*)spec->outputOffsetSizes; int2* range = (int2*)spec->outputKeyListRange; printf("# of words:%d\n", spec->outputDiffKeyCount); if (num > spec->outputDiffKeyCount) num = spec->outputDiffKeyCount; for (int i = 0; i < num; i++) { int keyOffset = offsetSizes[range[i].x].x; int valOffset = offsetSizes[range[i].x].z; char* word = key + keyOffset; int wordsize = *(int*)(val + valOffset); printf("%s - size: %d - count: %d\n", word, wordsize, range[i].y - range[i].x); } } //----------------------------------------------------------------------- //usage: WordCount datafile //param: datafile //----------------------------------------------------------------------- int main( int argc, char** argv) { if (argc != 2) { printf("usage: %s datafile\n", argv[0]); exit(-1); } Spec_t *spec = GetDefaultSpec(); spec->workflow = MAP_GROUP; #ifdef __OUTPUT__ spec->outputToHost = 1; #endif TimeVal_t allTimer; startTimer(&allTimer); TimeVal_t preTimer; startTimer(&preTimer); FILE* fp = fopen(argv[1], "r"); fseek(fp, 0, SEEK_END); int fileSize = ftell(fp) + 1; rewind(fp); char* h_filebuf = (char*)malloc(fileSize); char* d_filebuf = NULL; fread(h_filebuf, fileSize, 1, fp); CUDA_SAFE_CALL(cudaMalloc((void**)&d_filebuf, fileSize)); fclose(fp); WC_KEY_T key; key.file = d_filebuf; for (int i = 0; i < fileSize; i++) h_filebuf[i] = toupper(h_filebuf[i]); WC_VAL_T val; int offset = 0; char* p = h_filebuf; char* start = h_filebuf; while (1) { int blockSize = 2048; if (offset + blockSize > fileSize) blockSize = fileSize - offset; p += blockSize; for (; *p >= 'A' && *p <= 'Z'; p++); if (*p != '\0') { *p = '\0'; ++p; blockSize = (int)(p - start); val.line_offset = offset; val.line_size = blockSize; AddMapInputRecord(spec, &key, &val, sizeof(WC_KEY_T), sizeof(WC_VAL_T)); offset += blockSize; start = p; } else { *p = '\0'; blockSize = (int)(fileSize - offset); val.line_offset = offset; val.line_size = blockSize; AddMapInputRecord(spec, &key, &val, sizeof(WC_KEY_T), sizeof(WC_VAL_T)); break; } } CUDA_SAFE_CALL(cudaMemcpy(d_filebuf, h_filebuf, fileSize, cudaMemcpyHostToDevice)); endTimer("preprocess", &preTimer); //---------------------------------------------- //map/reduce //---------------------------------------------- MapReduce(spec); endTimer("all", &allTimer); //---------------------------------------------- //further processing //---------------------------------------------- #ifdef __OUTPUT__ CUDA_SAFE_CALL(cudaMemcpy(h_filebuf, d_filebuf, fileSize, cudaMemcpyDeviceToHost)); validate(h_filebuf, spec, 10); #endif //---------------------------------------------- //finish //---------------------------------------------- FinishMapReduce(spec); cudaFree(d_filebuf); free(h_filebuf); return 0; }
78e9e034e5499b670f0cbc08db22a36cb4d729ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Binary packed integer values for common 3x3 kernels. */ #define CROSS 186 #define HOLLOW_CROSS 170 #define SLASH 84 #define BACKSLASH 273 #define VERTICAL_LINE 146 #define HORIZONTAL_LINE 56 /* * Kernel macros with code common to multiple kernels. */ #define KERNEL_VAR_INIT_AND_CHECK \ const int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; \ const int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; \ if (y >= height || x >= width) \ return; \ \ const dataType *imgCol = (dataType*)img + y * imgStep + x; template <class dataType, morphOperation MOP> __global__ void _backslashKernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height) { KERNEL_VAR_INIT_AND_CHECK; dataType bestval; dataType dr_vs_ul; if (MOP == ERODE) { dr_vs_ul = min((dataType)imgCol[1+imgStep], (dataType)imgCol[-1-imgStep]); bestval = min(dr_vs_ul, imgCol[0]); } else { dr_vs_ul = max((dataType)imgCol[1+imgStep], (dataType)imgCol[-1-imgStep]); bestval = max(dr_vs_ul, imgCol[0]); } result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> __global__ void _horizontalKernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height) { KERNEL_VAR_INIT_AND_CHECK; dataType bestval; dataType l_vs_r; if (MOP == ERODE) { l_vs_r = min((dataType)imgCol[1], (dataType)imgCol[-1]); bestval = min(l_vs_r, imgCol[0]); } else { l_vs_r = max((dataType)imgCol[1], (dataType)imgCol[-1]); bestval = max(l_vs_r, imgCol[0]); } result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> __global__ void _verticalKernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height) { KERNEL_VAR_INIT_AND_CHECK; dataType bestval; dataType u_vs_d; if (MOP == ERODE) { u_vs_d = min((dataType)imgCol[imgStep], (dataType)imgCol[-imgStep]); bestval = min(u_vs_d, imgCol[0]); } else { u_vs_d = max((dataType)imgCol[imgStep], (dataType)imgCol[-imgStep]); bestval = max(u_vs_d, imgCol[0]); } result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> __global__ void _slashKernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height) { KERNEL_VAR_INIT_AND_CHECK; dataType bestval; dataType dl_vs_ur; if (MOP == ERODE) { dl_vs_ur = min((dataType)imgCol[1-imgStep], (dataType)imgCol[-1+imgStep]); bestval = min(dl_vs_ur, imgCol[0]); } else { dl_vs_ur = max((dataType)imgCol[1-imgStep], (dataType)imgCol[-1+imgStep]); bestval = max(dl_vs_ur, imgCol[0]); } result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> __global__ void _crossKernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height) { KERNEL_VAR_INIT_AND_CHECK; dataType bestval; dataType u_vs_d; dataType l_vs_r; if (MOP == ERODE) { l_vs_r = min((dataType)imgCol[1], (dataType)imgCol[-1]); u_vs_d = min((dataType)imgCol[+imgStep], (dataType)imgCol[-imgStep]); bestval = min(u_vs_d, l_vs_r); bestval = min(bestval, imgCol[0]); } else { l_vs_r = max((dataType)imgCol[1], (dataType)imgCol[-1]); u_vs_d = max((dataType)imgCol[imgStep], (dataType)imgCol[-imgStep]); bestval = max(u_vs_d, l_vs_r); bestval = max(bestval, imgCol[0]); } result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> __global__ void _hollowCrossKernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height) { KERNEL_VAR_INIT_AND_CHECK; dataType bestval; dataType u_vs_d; dataType l_vs_r; if (MOP == ERODE) { l_vs_r = min((dataType)imgCol[1], (dataType)imgCol[-1]); u_vs_d = min((dataType)imgCol[imgStep], (dataType)imgCol[-imgStep]); bestval = min(u_vs_d, l_vs_r); } else { l_vs_r = max((dataType)imgCol[1], (dataType)imgCol[-1]); u_vs_d = max((dataType)imgCol[imgStep], (dataType)imgCol[-imgStep]); bestval = max(u_vs_d, l_vs_r); } result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> __global__ void _generic3x3Kernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height, const unsigned char *pMask, unsigned int maskStep) { const int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; const int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (y >= height || x >= width) return; const dataType *imgCol = (dataType*)img + y * imgStep + x; dataType bestval = (pMask[0] > 0) ? imgCol[-imgStep-1] : 255; const dataType up = (pMask[1] > 0) ? imgCol[-imgStep] : 255; const dataType upRight = (pMask[2] > 0) ? imgCol[-imgStep+1] : 255; const dataType left = (pMask[maskStep] > 0) ? imgCol[-1] : 255; const dataType center = (pMask[maskStep+1] > 0) ? imgCol[0] : 255; const dataType right = (pMask[maskStep+2] > 0) ? imgCol[1] : 255; const dataType downLeft = (pMask[2*maskStep] > 0) ? imgCol[imgStep-1] : 255; const dataType down = (pMask[2*maskStep+1] > 0) ? imgCol[imgStep] : 255; const dataType downRight = (pMask[2*maskStep+2] > 0) ? imgCol[imgStep+1] : 255; MINMAX(MOP,bestval, up); MINMAX(MOP,bestval, upRight); MINMAX(MOP,bestval, left); MINMAX(MOP,bestval, center); MINMAX(MOP,bestval, right); MINMAX(MOP,bestval, downLeft); MINMAX(MOP,bestval, down); MINMAX(MOP,bestval, downRight); result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> int _global3x3(const dataType *img, int imgStep, dataType *result, int resultStep, rect2d oSizeROI, morphMask mask) { const unsigned int width = oSizeROI.width; const unsigned int height = oSizeROI.height; dim3 gridSize((width+16-1)/16, (height+16-1)/16); dim3 blockSize(16,16); // Anchor at (1,1) int offset = 1*imgStep + 1; PRINTF("BinaryValue :%d\n", mask.binaryValue); switch(mask.binaryValue/*-mask.binaryValue+1*/) { case CROSS: hipLaunchKernelGGL(( _crossKernel<dataType, MOP>), dim3(gridSize),dim3(blockSize), 0, 0, img + offset, imgStep,result, resultStep, width, height); break; case SLASH: hipLaunchKernelGGL(( _slashKernel<dataType, MOP>), dim3(gridSize),dim3(blockSize), 0, 0, img + offset, imgStep,result, resultStep, width, height); break; case BACKSLASH: hipLaunchKernelGGL(( _backslashKernel<dataType, MOP>), dim3(gridSize),dim3(blockSize), 0, 0, img + offset, imgStep,result, resultStep, width, height); break; case HOLLOW_CROSS: hipLaunchKernelGGL(( _hollowCrossKernel<dataType, MOP>), dim3(gridSize),dim3(blockSize), 0, 0, img + offset, imgStep,result, resultStep, width, height); break; case VERTICAL_LINE: PRINTF("VERTICAL\n"); hipLaunchKernelGGL(( _verticalKernel<dataType, MOP>), dim3(gridSize),dim3(blockSize), 0, 0, img + offset, imgStep,result, resultStep, width, height); break; case HORIZONTAL_LINE: PRINTF("hOZ\n"); hipLaunchKernelGGL(( _horizontalKernel<dataType, MOP>), dim3(gridSize),dim3(blockSize), 0, 0, img + offset, imgStep,result, resultStep, width, height); break; default: { hipLaunchKernelGGL(( _generic3x3Kernel<dataType, MOP>), dim3(gridSize),dim3(blockSize), 0, 0, img + offset, imgStep,result, resultStep, width, height, mask.data, mask.pitch); } } #if 1 // DEBUG_ON // check for error hipError_t error = hipGetLastError(); if(error != hipSuccess) { // print the CUDA error message PRINTF("CUDA error: %s\n", hipGetErrorString(error)); } #endif return LCUDA_SUCCESS; }
78e9e034e5499b670f0cbc08db22a36cb4d729ad.cu
/* * Binary packed integer values for common 3x3 kernels. */ #define CROSS 186 #define HOLLOW_CROSS 170 #define SLASH 84 #define BACKSLASH 273 #define VERTICAL_LINE 146 #define HORIZONTAL_LINE 56 /* * Kernel macros with code common to multiple kernels. */ #define KERNEL_VAR_INIT_AND_CHECK \ const int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; \ const int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; \ if (y >= height || x >= width) \ return; \ \ const dataType *imgCol = (dataType*)img + y * imgStep + x; template <class dataType, morphOperation MOP> __global__ void _backslashKernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height) { KERNEL_VAR_INIT_AND_CHECK; dataType bestval; dataType dr_vs_ul; if (MOP == ERODE) { dr_vs_ul = min((dataType)imgCol[1+imgStep], (dataType)imgCol[-1-imgStep]); bestval = min(dr_vs_ul, imgCol[0]); } else { dr_vs_ul = max((dataType)imgCol[1+imgStep], (dataType)imgCol[-1-imgStep]); bestval = max(dr_vs_ul, imgCol[0]); } result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> __global__ void _horizontalKernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height) { KERNEL_VAR_INIT_AND_CHECK; dataType bestval; dataType l_vs_r; if (MOP == ERODE) { l_vs_r = min((dataType)imgCol[1], (dataType)imgCol[-1]); bestval = min(l_vs_r, imgCol[0]); } else { l_vs_r = max((dataType)imgCol[1], (dataType)imgCol[-1]); bestval = max(l_vs_r, imgCol[0]); } result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> __global__ void _verticalKernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height) { KERNEL_VAR_INIT_AND_CHECK; dataType bestval; dataType u_vs_d; if (MOP == ERODE) { u_vs_d = min((dataType)imgCol[imgStep], (dataType)imgCol[-imgStep]); bestval = min(u_vs_d, imgCol[0]); } else { u_vs_d = max((dataType)imgCol[imgStep], (dataType)imgCol[-imgStep]); bestval = max(u_vs_d, imgCol[0]); } result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> __global__ void _slashKernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height) { KERNEL_VAR_INIT_AND_CHECK; dataType bestval; dataType dl_vs_ur; if (MOP == ERODE) { dl_vs_ur = min((dataType)imgCol[1-imgStep], (dataType)imgCol[-1+imgStep]); bestval = min(dl_vs_ur, imgCol[0]); } else { dl_vs_ur = max((dataType)imgCol[1-imgStep], (dataType)imgCol[-1+imgStep]); bestval = max(dl_vs_ur, imgCol[0]); } result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> __global__ void _crossKernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height) { KERNEL_VAR_INIT_AND_CHECK; dataType bestval; dataType u_vs_d; dataType l_vs_r; if (MOP == ERODE) { l_vs_r = min((dataType)imgCol[1], (dataType)imgCol[-1]); u_vs_d = min((dataType)imgCol[+imgStep], (dataType)imgCol[-imgStep]); bestval = min(u_vs_d, l_vs_r); bestval = min(bestval, imgCol[0]); } else { l_vs_r = max((dataType)imgCol[1], (dataType)imgCol[-1]); u_vs_d = max((dataType)imgCol[imgStep], (dataType)imgCol[-imgStep]); bestval = max(u_vs_d, l_vs_r); bestval = max(bestval, imgCol[0]); } result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> __global__ void _hollowCrossKernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height) { KERNEL_VAR_INIT_AND_CHECK; dataType bestval; dataType u_vs_d; dataType l_vs_r; if (MOP == ERODE) { l_vs_r = min((dataType)imgCol[1], (dataType)imgCol[-1]); u_vs_d = min((dataType)imgCol[imgStep], (dataType)imgCol[-imgStep]); bestval = min(u_vs_d, l_vs_r); } else { l_vs_r = max((dataType)imgCol[1], (dataType)imgCol[-1]); u_vs_d = max((dataType)imgCol[imgStep], (dataType)imgCol[-imgStep]); bestval = max(u_vs_d, l_vs_r); } result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> __global__ void _generic3x3Kernel(const dataType *img, int imgStep, dataType *result, int resultStep, unsigned int width, unsigned int height, const unsigned char *pMask, unsigned int maskStep) { const int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; const int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (y >= height || x >= width) return; const dataType *imgCol = (dataType*)img + y * imgStep + x; dataType bestval = (pMask[0] > 0) ? imgCol[-imgStep-1] : 255; const dataType up = (pMask[1] > 0) ? imgCol[-imgStep] : 255; const dataType upRight = (pMask[2] > 0) ? imgCol[-imgStep+1] : 255; const dataType left = (pMask[maskStep] > 0) ? imgCol[-1] : 255; const dataType center = (pMask[maskStep+1] > 0) ? imgCol[0] : 255; const dataType right = (pMask[maskStep+2] > 0) ? imgCol[1] : 255; const dataType downLeft = (pMask[2*maskStep] > 0) ? imgCol[imgStep-1] : 255; const dataType down = (pMask[2*maskStep+1] > 0) ? imgCol[imgStep] : 255; const dataType downRight = (pMask[2*maskStep+2] > 0) ? imgCol[imgStep+1] : 255; MINMAX(MOP,bestval, up); MINMAX(MOP,bestval, upRight); MINMAX(MOP,bestval, left); MINMAX(MOP,bestval, center); MINMAX(MOP,bestval, right); MINMAX(MOP,bestval, downLeft); MINMAX(MOP,bestval, down); MINMAX(MOP,bestval, downRight); result[y * resultStep + x] = bestval; } template <class dataType, morphOperation MOP> int _global3x3(const dataType *img, int imgStep, dataType *result, int resultStep, rect2d oSizeROI, morphMask mask) { const unsigned int width = oSizeROI.width; const unsigned int height = oSizeROI.height; dim3 gridSize((width+16-1)/16, (height+16-1)/16); dim3 blockSize(16,16); // Anchor at (1,1) int offset = 1*imgStep + 1; PRINTF("BinaryValue :%d\n", mask.binaryValue); switch(mask.binaryValue/*-mask.binaryValue+1*/) { case CROSS: _crossKernel<dataType, MOP><<<gridSize,blockSize>>>(img + offset, imgStep,result, resultStep, width, height); break; case SLASH: _slashKernel<dataType, MOP><<<gridSize,blockSize>>>(img + offset, imgStep,result, resultStep, width, height); break; case BACKSLASH: _backslashKernel<dataType, MOP><<<gridSize,blockSize>>>(img + offset, imgStep,result, resultStep, width, height); break; case HOLLOW_CROSS: _hollowCrossKernel<dataType, MOP><<<gridSize,blockSize>>>(img + offset, imgStep,result, resultStep, width, height); break; case VERTICAL_LINE: PRINTF("VERTICAL\n"); _verticalKernel<dataType, MOP><<<gridSize,blockSize>>>(img + offset, imgStep,result, resultStep, width, height); break; case HORIZONTAL_LINE: PRINTF("hOZ\n"); _horizontalKernel<dataType, MOP><<<gridSize,blockSize>>>(img + offset, imgStep,result, resultStep, width, height); break; default: { _generic3x3Kernel<dataType, MOP><<<gridSize,blockSize>>>(img + offset, imgStep,result, resultStep, width, height, mask.data, mask.pitch); } } #if 1 // DEBUG_ON // check for error cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { // print the CUDA error message PRINTF("CUDA error: %s\n", cudaGetErrorString(error)); } #endif return LCUDA_SUCCESS; }
c28f746da32ff795b6f1ed6403cbbc81a4642675.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
c28f746da32ff795b6f1ed6403cbbc81a4642675.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
a540817189369e3b50b4b3c268affe4f6c2d9fd9.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dot_based_interact_volta.h" #include "../launchers/dot_based_interact_fp32_launcher.cu" #include "../launchers/dot_based_interact_fp16_launcher.cu" void dotBasedInteractVoltaF16Fwd(const void *input, const void *bottom_mlp_output, void *output, uint batch_size, uint num_rows, uint num_cols, hipStream_t stream) { dotBasedInteractFP16Fwd(input, bottom_mlp_output, output, batch_size, num_rows, num_cols, stream); } void dotBasedInteractVoltaF16Bwd(const void *input, const void *upstream_grad, void *grad, void *bottom_mlp_grad, uint batch_size, uint num_rows, uint num_cols, hipStream_t stream) { dotBasedInteractFP16Bwd(input, upstream_grad, grad, bottom_mlp_grad, batch_size, num_rows, num_cols, stream); } void dotBasedInteractVoltaF32Fwd(const void *input, const void *bottom_mlp_output, void *output, uint batch_size, uint num_rows, uint num_cols, hipStream_t stream) { dotBasedInteractFP32Fwd(input, bottom_mlp_output, output, batch_size, num_rows, num_cols, stream); } void dotBasedInteractVoltaF32Bwd(const void *input, const void *upstream_grad, void *grad, void *bottom_mlp_grad, uint batch_size, uint num_rows, uint num_cols, hipStream_t stream) { dotBasedInteractFP32Bwd(input, upstream_grad, grad, bottom_mlp_grad, batch_size, num_rows, num_cols, stream); }
a540817189369e3b50b4b3c268affe4f6c2d9fd9.cu
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dot_based_interact_volta.h" #include "../launchers/dot_based_interact_fp32_launcher.cu" #include "../launchers/dot_based_interact_fp16_launcher.cu" void dotBasedInteractVoltaF16Fwd(const void *input, const void *bottom_mlp_output, void *output, uint batch_size, uint num_rows, uint num_cols, cudaStream_t stream) { dotBasedInteractFP16Fwd(input, bottom_mlp_output, output, batch_size, num_rows, num_cols, stream); } void dotBasedInteractVoltaF16Bwd(const void *input, const void *upstream_grad, void *grad, void *bottom_mlp_grad, uint batch_size, uint num_rows, uint num_cols, cudaStream_t stream) { dotBasedInteractFP16Bwd(input, upstream_grad, grad, bottom_mlp_grad, batch_size, num_rows, num_cols, stream); } void dotBasedInteractVoltaF32Fwd(const void *input, const void *bottom_mlp_output, void *output, uint batch_size, uint num_rows, uint num_cols, cudaStream_t stream) { dotBasedInteractFP32Fwd(input, bottom_mlp_output, output, batch_size, num_rows, num_cols, stream); } void dotBasedInteractVoltaF32Bwd(const void *input, const void *upstream_grad, void *grad, void *bottom_mlp_grad, uint batch_size, uint num_rows, uint num_cols, cudaStream_t stream) { dotBasedInteractFP32Bwd(input, upstream_grad, grad, bottom_mlp_grad, batch_size, num_rows, num_cols, stream); }
88638ad8899f8c223ab8eb02d507214dee6c51ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Udacity HW 6 //Poisson Blending /* Background ========== The goal for this assignment is to take one image (the source) and paste it into another image (the destination) attempting to match the two images so that the pasting is non-obvious. This is known as a "seamless clone". The basic ideas are as follows: 1) Figure out the interior and border of the source image 2) Use the values of the border pixels in the destination image as boundary conditions for solving a Poisson equation that tells us how to blend the images. No pixels from the destination except pixels on the border are used to compute the match. Solving the Poisson Equation ============================ There are multiple ways to solve this equation - we choose an iterative method - specifically the Jacobi method. Iterative methods start with a guess of the solution and then iterate to try and improve the guess until it stops changing. If the problem was well-suited for the method then it will stop and where it stops will be the solution. The Jacobi method is the simplest iterative method and converges slowly - that is we need a lot of iterations to get to the answer, but it is the easiest method to write. Jacobi Iterations ================= Our initial guess is going to be the source image itself. This is a pretty good guess for what the blended image will look like and it means that we won't have to do as many iterations compared to if we had started far from the final solution. ImageGuess_prev (Floating point) ImageGuess_next (Floating point) DestinationImg SourceImg Follow these steps to implement one iteration: 1) For every pixel p in the interior, compute two sums over the four neighboring pixels: Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor] else if the neighbor in on the border then += DestinationImg[neighbor] Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) 2) Calculate the new pixel value: float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255] In this assignment we will do 800 iterations. */ #include "utils.h" #include <thrust/host_vector.h> #include <algorithm> enum PixelType { PTOutside = 0, PTBorder = 1, PTInterior = 2 }; __host__ __device__ int div_up(int a, int b) { return (a + b - 1) / b; } template<class T> __host__ __device__ T clamp(T i, T min, T max) { if(i<min) { return min; } if(i>max) { return max; } return i; } __global__ void compute_mask(const uchar4 *d_sourceImg, unsigned char *d_mask, int sourceSize) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<sourceSize) { uchar4 p = d_sourceImg[i]; d_mask[i] = (static_cast<int>(p.x) + p.y + p.z < 3*255) ? 1 : 0; } } // TODO blokkostani __global__ void compute_pixel_type(const unsigned char *d_mask, unsigned char *d_pixelTypes, int numRowsSource, int numColsSource) { int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; if(row < numRowsSource && col < numColsSource) { if(d_mask[row*numColsSource + col]) { int sum = d_mask[ clamp(row-1, 0, numRowsSource-1)*numColsSource + col ] + d_mask[ clamp(row+1, 0, numRowsSource-1)*numColsSource + col ] + d_mask[ row*numColsSource + clamp(col - 1, 0, numColsSource - 1) ] + d_mask[ row*numColsSource + clamp(col + 1, 0, numColsSource - 1) ]; d_pixelTypes[row*numColsSource + col] = (sum == 4) ? PTInterior: PTBorder; } else { d_pixelTypes[row*numColsSource + col] = PTOutside; } } } template<class OutputType> __global__ void split_channels(const uchar4 *d_img, OutputType* d_r, OutputType* d_g, OutputType* d_b, int size) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<size) { uchar4 p = d_img[i]; d_r[i] = static_cast<OutputType>(p.x); d_g[i] = static_cast<OutputType>(p.y); d_b[i] = static_cast<OutputType>(p.z); } } __global__ void compose_img(uchar4 *d_destImg, float *d_r, float *d_g, float *d_b, unsigned char *d_pixelTypes, int size) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<size && d_pixelTypes[i] == PTInterior) { uchar4 p; p.x = static_cast<unsigned char>(d_r[i]); p.y = static_cast<unsigned char>(d_g[i]); p.z = static_cast<unsigned char>(d_b[i]); p.w = 255; d_destImg[i] = p; } } __global__ void process(const unsigned char *src, const unsigned char *dst, const unsigned char *type, const float *guess_prev, float *guess_next, int height, int width) { // 1) For every pixel p in the interior, compute two sums over the four neighboring pixels: // Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor] // else if the neighbor in on the border then += DestinationImg[neighbor] // Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) // 2) Calculate the new pixel value: // float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT // ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255] int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; int i = row*width + col; if(row < height && col < width && type[i] == PTInterior) { int i_nb[] = { clamp(row-1, 0, height-1)*width + col, clamp(row+1, 0, height-1)*width + col, row*width + clamp(col - 1, 0, width-1), row*width + clamp(col + 1, 0, width-1) }; float sum = 0; for (int j = 0; j < 4; j++) { if(type[i_nb[j]] == PTInterior) { sum += guess_prev[i_nb[j]]; } else if(type[i_nb[j]] == PTBorder) { sum += dst[i_nb[j]]; } sum += static_cast<float>(src[i]) - static_cast<float>(src[i_nb[j]]); } guess_next[i] = clamp(sum/4.f, 0.f, 255.f); } } void your_blend(const uchar4* const h_sourceImg, //IN const size_t numRowsSource, const size_t numColsSource, const uchar4* const h_destImg, //IN uchar4* const h_blendedImg) //OUT { /* To Recap here are the steps you need to implement 1) Compute a mask of the pixels from the source image to be copied The pixels that shouldn't be copied are completely white, they have R=255, G=255, B=255. Any other pixels SHOULD be copied. 2) Compute the interior and border regions of the mask. An interior pixel has all 4 neighbors also inside the mask. A border pixel is in the mask itself, but has at least one neighbor that isn't. 3) Separate out the incoming image into three separate channels 4) Create two float(!) buffers for each color channel that will act as our guesses. Initialize them to the respective color channel of the source image since that will act as our intial guess. 5) For each color channel perform the Jacobi iteration described above 800 times. 6) Create the output image by replacing all the interior pixels in the destination image with the result of the Jacobi iterations. Just cast the floating point values to unsigned chars since we have already made sure to clamp them to the correct range. Since this is final assignment we provide little boilerplate code to help you. Notice that all the input/output pointers are HOST pointers. You will have to allocate all of your own GPU memory and perform your own memcopies to get data in and out of the GPU memory. Remember to wrap all of your calls with checkCudaErrors() to catch any thing that might go wrong. After each kernel call do: hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); to catch any errors that happened while executing the kernel. */ int sourceSize = numRowsSource * numColsSource; unsigned char* d_mask; unsigned char* d_pixelTypes; uchar4* d_sourceImg; unsigned char* d_src[3]; unsigned char* d_dst[3]; float* d_guess[3][2]; uchar4* d_destImg; checkCudaErrors(hipMalloc(&d_mask, sourceSize * sizeof(unsigned char))); checkCudaErrors(hipMalloc(&d_pixelTypes, sourceSize * sizeof(unsigned char))); checkCudaErrors(hipMalloc(&d_sourceImg, sourceSize * sizeof(uchar4))); for(int color = 0; color < 3; color++) { for(int i = 0; i<2; i++) checkCudaErrors(hipMalloc(&d_guess[color][i], sourceSize * sizeof(float))); checkCudaErrors(hipMalloc(&d_src[color], sourceSize * sizeof(unsigned char))); checkCudaErrors(hipMalloc(&d_dst[color], sourceSize * sizeof(unsigned char))); } checkCudaErrors(hipMalloc(&d_destImg, sourceSize * sizeof(uchar4))); checkCudaErrors(hipMemcpy(d_sourceImg, h_sourceImg, sourceSize * sizeof(uchar4), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_destImg, h_destImg, sourceSize * sizeof(uchar4), hipMemcpyHostToDevice)); { int block_size = 1024; int grid_size = div_up(sourceSize, block_size); hipLaunchKernelGGL(( compute_mask), dim3(grid_size), dim3(block_size), 0, 0, d_sourceImg, d_mask, sourceSize); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } { dim3 block_size(32, 32, 1); dim3 grid_size(div_up(numColsSource, block_size.x), div_up(numRowsSource, block_size.y), 1); hipLaunchKernelGGL(( compute_pixel_type), dim3(grid_size), dim3(block_size), 0, 0, d_mask, d_pixelTypes, numRowsSource, numColsSource); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } { int block_size = 1024; int grid_size = div_up(sourceSize, block_size); hipLaunchKernelGGL(( split_channels), dim3(grid_size), dim3(block_size), 0, 0, d_sourceImg, d_src[0], d_src[1], d_src[2], sourceSize); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } { int block_size = 1024; int grid_size = div_up(sourceSize, block_size); hipLaunchKernelGGL(( split_channels), dim3(grid_size), dim3(block_size), 0, 0, d_destImg, d_dst[0], d_dst[1], d_dst[2], sourceSize); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } { int block_size = 1024; int grid_size = div_up(sourceSize, block_size); hipLaunchKernelGGL(( split_channels), dim3(grid_size), dim3(block_size), 0, 0, d_sourceImg, d_guess[0][0], d_guess[1][0], d_guess[2][0], sourceSize); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } { dim3 block_size(32, 32, 1); dim3 grid_size(div_up(numColsSource, block_size.x), div_up(numRowsSource, block_size.y), 1); for(int i = 0; i<800; i++) { for(int color = 0; color<3; color++) { hipLaunchKernelGGL(( process), dim3(grid_size), dim3(block_size), 0, 0, d_src[color], d_dst[color], d_pixelTypes, d_guess[color][0], d_guess[color][1], numRowsSource, numColsSource); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); std::swap(d_guess[color][0], d_guess[color][1]); } } } { int block_size = 1024; int grid_size = div_up(sourceSize, block_size); hipLaunchKernelGGL(( compose_img), dim3(grid_size), dim3(block_size), 0, 0, d_destImg, d_guess[0][0], d_guess[1][0], d_guess[2][0], d_pixelTypes, sourceSize); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } checkCudaErrors(hipMemcpy(h_blendedImg, d_destImg, sourceSize * sizeof(uchar4), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_mask)); checkCudaErrors(hipFree(d_pixelTypes)); checkCudaErrors(hipFree(d_sourceImg)); for(int color = 0; color < 3; color++) { for(int i = 0; i<2; i++) checkCudaErrors(hipFree(d_guess[color][i])); checkCudaErrors(hipFree(d_src[color])); checkCudaErrors(hipFree(d_dst[color])); } checkCudaErrors(hipFree(d_destImg)); }
88638ad8899f8c223ab8eb02d507214dee6c51ea.cu
//Udacity HW 6 //Poisson Blending /* Background ========== The goal for this assignment is to take one image (the source) and paste it into another image (the destination) attempting to match the two images so that the pasting is non-obvious. This is known as a "seamless clone". The basic ideas are as follows: 1) Figure out the interior and border of the source image 2) Use the values of the border pixels in the destination image as boundary conditions for solving a Poisson equation that tells us how to blend the images. No pixels from the destination except pixels on the border are used to compute the match. Solving the Poisson Equation ============================ There are multiple ways to solve this equation - we choose an iterative method - specifically the Jacobi method. Iterative methods start with a guess of the solution and then iterate to try and improve the guess until it stops changing. If the problem was well-suited for the method then it will stop and where it stops will be the solution. The Jacobi method is the simplest iterative method and converges slowly - that is we need a lot of iterations to get to the answer, but it is the easiest method to write. Jacobi Iterations ================= Our initial guess is going to be the source image itself. This is a pretty good guess for what the blended image will look like and it means that we won't have to do as many iterations compared to if we had started far from the final solution. ImageGuess_prev (Floating point) ImageGuess_next (Floating point) DestinationImg SourceImg Follow these steps to implement one iteration: 1) For every pixel p in the interior, compute two sums over the four neighboring pixels: Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor] else if the neighbor in on the border then += DestinationImg[neighbor] Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) 2) Calculate the new pixel value: float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255] In this assignment we will do 800 iterations. */ #include "utils.h" #include <thrust/host_vector.h> #include <algorithm> enum PixelType { PTOutside = 0, PTBorder = 1, PTInterior = 2 }; __host__ __device__ int div_up(int a, int b) { return (a + b - 1) / b; } template<class T> __host__ __device__ T clamp(T i, T min, T max) { if(i<min) { return min; } if(i>max) { return max; } return i; } __global__ void compute_mask(const uchar4 *d_sourceImg, unsigned char *d_mask, int sourceSize) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<sourceSize) { uchar4 p = d_sourceImg[i]; d_mask[i] = (static_cast<int>(p.x) + p.y + p.z < 3*255) ? 1 : 0; } } // TODO blokkosítani __global__ void compute_pixel_type(const unsigned char *d_mask, unsigned char *d_pixelTypes, int numRowsSource, int numColsSource) { int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; if(row < numRowsSource && col < numColsSource) { if(d_mask[row*numColsSource + col]) { int sum = d_mask[ clamp(row-1, 0, numRowsSource-1)*numColsSource + col ] + d_mask[ clamp(row+1, 0, numRowsSource-1)*numColsSource + col ] + d_mask[ row*numColsSource + clamp(col - 1, 0, numColsSource - 1) ] + d_mask[ row*numColsSource + clamp(col + 1, 0, numColsSource - 1) ]; d_pixelTypes[row*numColsSource + col] = (sum == 4) ? PTInterior: PTBorder; } else { d_pixelTypes[row*numColsSource + col] = PTOutside; } } } template<class OutputType> __global__ void split_channels(const uchar4 *d_img, OutputType* d_r, OutputType* d_g, OutputType* d_b, int size) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<size) { uchar4 p = d_img[i]; d_r[i] = static_cast<OutputType>(p.x); d_g[i] = static_cast<OutputType>(p.y); d_b[i] = static_cast<OutputType>(p.z); } } __global__ void compose_img(uchar4 *d_destImg, float *d_r, float *d_g, float *d_b, unsigned char *d_pixelTypes, int size) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<size && d_pixelTypes[i] == PTInterior) { uchar4 p; p.x = static_cast<unsigned char>(d_r[i]); p.y = static_cast<unsigned char>(d_g[i]); p.z = static_cast<unsigned char>(d_b[i]); p.w = 255; d_destImg[i] = p; } } __global__ void process(const unsigned char *src, const unsigned char *dst, const unsigned char *type, const float *guess_prev, float *guess_next, int height, int width) { // 1) For every pixel p in the interior, compute two sums over the four neighboring pixels: // Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor] // else if the neighbor in on the border then += DestinationImg[neighbor] // Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) // 2) Calculate the new pixel value: // float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT // ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255] int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; int i = row*width + col; if(row < height && col < width && type[i] == PTInterior) { int i_nb[] = { clamp(row-1, 0, height-1)*width + col, clamp(row+1, 0, height-1)*width + col, row*width + clamp(col - 1, 0, width-1), row*width + clamp(col + 1, 0, width-1) }; float sum = 0; for (int j = 0; j < 4; j++) { if(type[i_nb[j]] == PTInterior) { sum += guess_prev[i_nb[j]]; } else if(type[i_nb[j]] == PTBorder) { sum += dst[i_nb[j]]; } sum += static_cast<float>(src[i]) - static_cast<float>(src[i_nb[j]]); } guess_next[i] = clamp(sum/4.f, 0.f, 255.f); } } void your_blend(const uchar4* const h_sourceImg, //IN const size_t numRowsSource, const size_t numColsSource, const uchar4* const h_destImg, //IN uchar4* const h_blendedImg) //OUT { /* To Recap here are the steps you need to implement 1) Compute a mask of the pixels from the source image to be copied The pixels that shouldn't be copied are completely white, they have R=255, G=255, B=255. Any other pixels SHOULD be copied. 2) Compute the interior and border regions of the mask. An interior pixel has all 4 neighbors also inside the mask. A border pixel is in the mask itself, but has at least one neighbor that isn't. 3) Separate out the incoming image into three separate channels 4) Create two float(!) buffers for each color channel that will act as our guesses. Initialize them to the respective color channel of the source image since that will act as our intial guess. 5) For each color channel perform the Jacobi iteration described above 800 times. 6) Create the output image by replacing all the interior pixels in the destination image with the result of the Jacobi iterations. Just cast the floating point values to unsigned chars since we have already made sure to clamp them to the correct range. Since this is final assignment we provide little boilerplate code to help you. Notice that all the input/output pointers are HOST pointers. You will have to allocate all of your own GPU memory and perform your own memcopies to get data in and out of the GPU memory. Remember to wrap all of your calls with checkCudaErrors() to catch any thing that might go wrong. After each kernel call do: cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); to catch any errors that happened while executing the kernel. */ int sourceSize = numRowsSource * numColsSource; unsigned char* d_mask; unsigned char* d_pixelTypes; uchar4* d_sourceImg; unsigned char* d_src[3]; unsigned char* d_dst[3]; float* d_guess[3][2]; uchar4* d_destImg; checkCudaErrors(cudaMalloc(&d_mask, sourceSize * sizeof(unsigned char))); checkCudaErrors(cudaMalloc(&d_pixelTypes, sourceSize * sizeof(unsigned char))); checkCudaErrors(cudaMalloc(&d_sourceImg, sourceSize * sizeof(uchar4))); for(int color = 0; color < 3; color++) { for(int i = 0; i<2; i++) checkCudaErrors(cudaMalloc(&d_guess[color][i], sourceSize * sizeof(float))); checkCudaErrors(cudaMalloc(&d_src[color], sourceSize * sizeof(unsigned char))); checkCudaErrors(cudaMalloc(&d_dst[color], sourceSize * sizeof(unsigned char))); } checkCudaErrors(cudaMalloc(&d_destImg, sourceSize * sizeof(uchar4))); checkCudaErrors(cudaMemcpy(d_sourceImg, h_sourceImg, sourceSize * sizeof(uchar4), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_destImg, h_destImg, sourceSize * sizeof(uchar4), cudaMemcpyHostToDevice)); { int block_size = 1024; int grid_size = div_up(sourceSize, block_size); compute_mask<<<grid_size, block_size>>>(d_sourceImg, d_mask, sourceSize); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } { dim3 block_size(32, 32, 1); dim3 grid_size(div_up(numColsSource, block_size.x), div_up(numRowsSource, block_size.y), 1); compute_pixel_type<<<grid_size, block_size>>>(d_mask, d_pixelTypes, numRowsSource, numColsSource); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } { int block_size = 1024; int grid_size = div_up(sourceSize, block_size); split_channels<<<grid_size, block_size>>>(d_sourceImg, d_src[0], d_src[1], d_src[2], sourceSize); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } { int block_size = 1024; int grid_size = div_up(sourceSize, block_size); split_channels<<<grid_size, block_size>>>(d_destImg, d_dst[0], d_dst[1], d_dst[2], sourceSize); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } { int block_size = 1024; int grid_size = div_up(sourceSize, block_size); split_channels<<<grid_size, block_size>>>(d_sourceImg, d_guess[0][0], d_guess[1][0], d_guess[2][0], sourceSize); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } { dim3 block_size(32, 32, 1); dim3 grid_size(div_up(numColsSource, block_size.x), div_up(numRowsSource, block_size.y), 1); for(int i = 0; i<800; i++) { for(int color = 0; color<3; color++) { process<<<grid_size, block_size>>>(d_src[color], d_dst[color], d_pixelTypes, d_guess[color][0], d_guess[color][1], numRowsSource, numColsSource); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); std::swap(d_guess[color][0], d_guess[color][1]); } } } { int block_size = 1024; int grid_size = div_up(sourceSize, block_size); compose_img<<<grid_size, block_size>>>(d_destImg, d_guess[0][0], d_guess[1][0], d_guess[2][0], d_pixelTypes, sourceSize); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } checkCudaErrors(cudaMemcpy(h_blendedImg, d_destImg, sourceSize * sizeof(uchar4), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_mask)); checkCudaErrors(cudaFree(d_pixelTypes)); checkCudaErrors(cudaFree(d_sourceImg)); for(int color = 0; color < 3; color++) { for(int i = 0; i<2; i++) checkCudaErrors(cudaFree(d_guess[color][i])); checkCudaErrors(cudaFree(d_src[color])); checkCudaErrors(cudaFree(d_dst[color])); } checkCudaErrors(cudaFree(d_destImg)); }
c3b08ed85046972c278cc6ca3b6a426730833b83.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void BitonicMergeSort(float * d_output, float * d_input, int subarray_size) { extern __shared__ float shared_data[]; // internal index for sorting of the subarray int index = threadIdx.x; int index_global = index + blockDim.x * blockIdx.x; double portions = log2(double(subarray_size)) - 1; //copying of data portion dedicated to this block into shared memory shared_data[index] = d_input[index_global]; __syncthreads(); for (short portion = 0; portion <= portions; portion++) { short offset = 1 << portion; short threads_in_box = offset << 1; // calculated at the beginning of each portion //int boxI = index % (threads_in_box + (blockDim.x * blockIdx.x)); int boxI = threadIdx.x / threads_in_box; for (short subportion = portion; subportion >= 0; subportion--) { offset = 1 << subportion; threads_in_box = offset << 1; int arrow_bottom = index % threads_in_box; if (((boxI + 1) % 2) == 1) { // top down if (arrow_bottom < offset) { float temp = shared_data[index]; if (shared_data[index + offset] < temp) { shared_data[index] = shared_data[index + offset]; shared_data[index + offset] = temp; } } } else { // bottom up if (arrow_bottom >= offset) { float temp = shared_data[index]; if (shared_data[index - offset] < temp) { shared_data[index] = shared_data[index - offset]; shared_data[index - offset] = temp; } } } __syncthreads(); } } d_output[index_global] = shared_data[index]; }
c3b08ed85046972c278cc6ca3b6a426730833b83.cu
#include "includes.h" __global__ void BitonicMergeSort(float * d_output, float * d_input, int subarray_size) { extern __shared__ float shared_data[]; // internal index for sorting of the subarray int index = threadIdx.x; int index_global = index + blockDim.x * blockIdx.x; double portions = log2(double(subarray_size)) - 1; //copying of data portion dedicated to this block into shared memory shared_data[index] = d_input[index_global]; __syncthreads(); for (short portion = 0; portion <= portions; portion++) { short offset = 1 << portion; short threads_in_box = offset << 1; // calculated at the beginning of each portion //int boxI = index % (threads_in_box + (blockDim.x * blockIdx.x)); int boxI = threadIdx.x / threads_in_box; for (short subportion = portion; subportion >= 0; subportion--) { offset = 1 << subportion; threads_in_box = offset << 1; int arrow_bottom = index % threads_in_box; if (((boxI + 1) % 2) == 1) { // top down if (arrow_bottom < offset) { float temp = shared_data[index]; if (shared_data[index + offset] < temp) { shared_data[index] = shared_data[index + offset]; shared_data[index + offset] = temp; } } } else { // bottom up if (arrow_bottom >= offset) { float temp = shared_data[index]; if (shared_data[index - offset] < temp) { shared_data[index] = shared_data[index - offset]; shared_data[index - offset] = temp; } } } __syncthreads(); } } d_output[index_global] = shared_data[index]; }
c78a300daca7d1ec43d1a42309153aeefed27f25.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Tiled version of matrix multiplication * Sequential Matrix multiplication * TODO Step 1. Matrix dimensions are multiples of TILE_WIDTH [DONE] * TODO Step 1.a make each thread do more work * TODO Step 2. MAtrix dimensions are arbitary size */ #include<stdio.h> #include<assert.h> #include<cuda.h> #include<stdlib.h> #include<sys/time.h> #include<errno.h> #define DEBUG 0 #define VAL_LIMIT 10 // TILE_WIDTH and MAT_DIM can be given at compile time check Makefile #ifndef TILE_WIDTH #define TILE_WIDTH 16 #endif #ifndef MAT_DIM #define MAT_DIM 1024 #endif hipError_t cuerr; /* * @DESC : Allocate memory a linear aare of dimension r*c * @PRAM : number of rows and columns * @RETURN : address of the allocated memory * @SEE : * @TODO : * */ float *createMartrix(int r, int c) { float * temp; temp = (float*) malloc(sizeof(float) *r *c); if(temp == NULL) printf("Cannot create matrix :(\n"); return temp; } /* * @DESC : Free the linear array memory * @PRAM : pointer to the array * @RETURN : nothing * @SEE : * @TODO : * */ void destroyMAtrix(float *m) { free(m); } /* * @DESC : Initialize matrix with some random values * @PRAM : pointer to the matrix and its dimensions * @RETURN : nothing * @SEE : * @TODO : * */ void initMatrix(float *m, int r, int c) { for( int i=0; i<r; i++) { for( int j=0; j<c; j++) { m[ i*c + j] = (float) (rand()%VAL_LIMIT); } } } /* * @DESC : Sequential multiplication of matrix A and B result sotred in C * @PRAM : host pointer to matrices A, B, and C dimensions of matrix C and common * : dimension of matrix A, B * @RETURN : nothing * @SEE : * @TODO : * */ void matMul(float *A, float *B, float *C, int Ac, int Ar, int Bc) // Br == Ac { for( int i=0; i<Ar; i++) { for( int j=0; j<Bc; j++) { float sum =0; for( int k=0; k<Ac; k++) { float a = A[ i*Ac + k]; float b = B[ k*Bc + j]; sum += a*b; } C[ i*Bc + j] = sum; } } } /* * @PRAM : Device pointer, number of rows and columns * @RETURN : Nothing * @DESC : Creates a matrix of float * rows * columns on device * @SEE : * @TODO : * */ void createMatrixDevice(float **m, int r, int c) { int size = sizeof(float)*r*c; cuerr = hipSuccess; cuerr = hipMalloc(m, size); if (cuerr != hipSuccess) { fprintf(stderr, "%s, %d.\n %s.", __FILE__, __LINE__, hipGetErrorString(cuerr)); exit(EXIT_FAILURE); } } /* * @PRAM : Host pointer, Device pointer, Number of rows and columns * @RETURN : Nothing * @DESC : Copies data from host pointer to device pointer * @SEE : * @TODO : * */ void transferToDevice(float *hostptr, float *deviceptr, int r, int c) { int size = sizeof(float) * r*c; cuerr = hipSuccess; cuerr = hipMemcpy(deviceptr, hostptr, size, hipMemcpyHostToDevice); if (cuerr != hipSuccess) { //fprintf(stderr, "%s, %d.\n %s.", __FILE__, __LINE__, hipGetErrorString(err)); fprintf(stderr, "%s, %d.\n %s", __FILE__, __LINE__, hipGetErrorString(cuerr)); exit(EXIT_FAILURE); } } /* * @PRAM : Host pointer, Device pointer, Number of rows and columns * @RETURN : Nothing * @DESC : Copies data from device pointer to host pointer * @SEE : * @TODO : * */ void transferFromDevice(float *hostptr, float *deviceptr, int r, int c) { int size = sizeof(float) * r*c; cuerr = hipSuccess; cuerr = hipMemcpy(hostptr, deviceptr, size, hipMemcpyDeviceToHost); if (cuerr != hipSuccess) { fprintf(stderr, "%s, %d.\n %s.", __FILE__, __LINE__, hipGetErrorString(cuerr)); exit(EXIT_FAILURE); } } /* * @DESC : Multiplies matrix A with matrix B and stores output in C * @PRAM : device pointers matrix A, matrix B, matrix C, dimensions of matrix C and * comman dimension for matrix A and B * @RETURN : Nothing * @SEE : Tiled matrix multiplication * @TODO : A detailed description * */ __global__ void matMulKernel(float *A, float *B, float *C, int Ac, int Ar, int Bc) { __shared__ float Sa[TILE_WIDTH][TILE_WIDTH]; __shared__ float Sb[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y; int tx = threadIdx.x, ty = threadIdx.y; int Crow = by * TILE_WIDTH + ty; int Ccol = bx * TILE_WIDTH + tx; float sum; for( int phase=0 ; phase < MAT_DIM/TILE_WIDTH; phase++) { Sa[ty][tx] = A[Crow*MAT_DIM + phase*TILE_WIDTH + tx]; Sb[ty][tx] = B[ (phase*TILE_WIDTH + ty)*MAT_DIM + Ccol]; __syncthreads(); for( int i=0 ; i<TILE_WIDTH ; i++) { float a = Sa[ty][i]; float b = Sb[i][tx]; sum += a*b; } __syncthreads(); } C[Crow*MAT_DIM + Ccol] = sum; } /* * @DESC : Wrapper function to set kernel configuration and invoke the kernel * @PRAM : device pointers for matrix A, B, C, and dimensions for C * @RETURN : nothing * @SEE : * @TODO : * */ void pMatMul(float *A, float *B, float *C, int Ac, int Ar, int Bc) { dim3 gridprop(ceil(Bc/TILE_WIDTH), ceil(Ar/TILE_WIDTH), 1); dim3 blockprop(TILE_WIDTH, TILE_WIDTH, 1); hipLaunchKernelGGL(( matMulKernel), dim3(gridprop), dim3(blockprop), 0, 0, A, B, C, Ac, Ar, Bc); } /* * @DESC : Print the matrix * @PRAM : host pointer to the matrix and its dimensions * @RETURN : nothing * @SEE : * @TODO : * */ void printMat(float *A, int r, int c) { for( int i=0; i<r; i++) { for( int j=0; j<c; j++) { printf("%3.2f\t", A[ i*c +j]); } printf("\n"); } } /* * @DESC : Check if the two given matrices are equal * @PRAM : host matrix pointer A, B and their dimensions * @RETURN : true if matrices are equal else false * @SEE : * @TODO : * */ bool check(float *A, float *B, int r, int c) { for( int i=0; i<r*c; i++) { if(A[i] != B[i]) return false; } return true; } int main() { float *h_A, *h_B, *h_C, *h_D; float *d_A, *d_B, *d_C; float milli; int Ar = MAT_DIM, Ac = MAT_DIM; int Br = MAT_DIM, Bc = MAT_DIM; int Cr = MAT_DIM, Cc = MAT_DIM; assert(Ac == Br); // Matrix are multipliable hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); h_A = createMartrix(Ar, Ac); assert(h_A != NULL); h_B = createMartrix(Br, Bc); assert(h_B != NULL); h_C = createMartrix(Cr, Cc); assert(h_C != NULL); h_D = createMartrix(Cr, Cc); assert(h_D != NULL); initMatrix(h_A, Ar, Ac); if(DEBUG) { printf("MAtrix A:\n"); printMat(h_A, Ar, Ac); } initMatrix(h_B, Br, Bc); if(DEBUG) { printf("Matrix B:\n"); printMat(h_B, Br, Bc); } matMul(h_A, h_B, h_C, Ac, Ar, Bc); if(DEBUG) { printf("Matrix C:\n"); printMat(h_C, Cr, Cc); } createMatrixDevice(&d_A, Ar, Ac); createMatrixDevice(&d_B, Br, Bc); createMatrixDevice(&d_C, Cr, Cc); transferToDevice(h_A, d_A, Ar, Ac); transferToDevice(h_B, d_B, Br, Bc); hipEventRecord(start); pMatMul(d_A, d_B, d_C, Ac, Ar, Bc); hipEventRecord(stop); transferFromDevice(h_D, d_C, Cr, Cc); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); printf("Time required for (Configuration %d TILE_WIDTH and %d MAT_DIM) parallel \ execution %f\n", TILE_WIDTH, MAT_DIM, milli); #if defined(TILE_WIDTH) && defined(MAT_DIM) char cmd[1024]; char vals[256]; sprintf(vals, "%d\t%d\t%f", TILE_WIDTH, MAT_DIM, milli); strcpy(cmd, "echo \""); strcat(cmd, vals); strcat(cmd, "\" >>res.data"); system(cmd); #endif if(DEBUG) { printf("Matrix C:\n"); printMat(h_C, Cr, Cc); } if(check(h_D, h_C, Cr, Cc)) { printf("Success :) \n"); } else { printf("Failed :( \n"); } hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); return 0; }
c78a300daca7d1ec43d1a42309153aeefed27f25.cu
/* * Tiled version of matrix multiplication * Sequential Matrix multiplication * TODO Step 1. Matrix dimensions are multiples of TILE_WIDTH [DONE] * TODO Step 1.a make each thread do more work * TODO Step 2. MAtrix dimensions are arbitary size */ #include<stdio.h> #include<assert.h> #include<cuda.h> #include<stdlib.h> #include<sys/time.h> #include<errno.h> #define DEBUG 0 #define VAL_LIMIT 10 // TILE_WIDTH and MAT_DIM can be given at compile time check Makefile #ifndef TILE_WIDTH #define TILE_WIDTH 16 #endif #ifndef MAT_DIM #define MAT_DIM 1024 #endif cudaError_t cuerr; /* * @DESC : Allocate memory a linear aare of dimension r*c * @PRAM : number of rows and columns * @RETURN : address of the allocated memory * @SEE : * @TODO : * */ float *createMartrix(int r, int c) { float * temp; temp = (float*) malloc(sizeof(float) *r *c); if(temp == NULL) printf("Cannot create matrix :(\n"); return temp; } /* * @DESC : Free the linear array memory * @PRAM : pointer to the array * @RETURN : nothing * @SEE : * @TODO : * */ void destroyMAtrix(float *m) { free(m); } /* * @DESC : Initialize matrix with some random values * @PRAM : pointer to the matrix and its dimensions * @RETURN : nothing * @SEE : * @TODO : * */ void initMatrix(float *m, int r, int c) { for( int i=0; i<r; i++) { for( int j=0; j<c; j++) { m[ i*c + j] = (float) (rand()%VAL_LIMIT); } } } /* * @DESC : Sequential multiplication of matrix A and B result sotred in C * @PRAM : host pointer to matrices A, B, and C dimensions of matrix C and common * : dimension of matrix A, B * @RETURN : nothing * @SEE : * @TODO : * */ void matMul(float *A, float *B, float *C, int Ac, int Ar, int Bc) // Br == Ac { for( int i=0; i<Ar; i++) { for( int j=0; j<Bc; j++) { float sum =0; for( int k=0; k<Ac; k++) { float a = A[ i*Ac + k]; float b = B[ k*Bc + j]; sum += a*b; } C[ i*Bc + j] = sum; } } } /* * @PRAM : Device pointer, number of rows and columns * @RETURN : Nothing * @DESC : Creates a matrix of float * rows * columns on device * @SEE : * @TODO : * */ void createMatrixDevice(float **m, int r, int c) { int size = sizeof(float)*r*c; cuerr = cudaSuccess; cuerr = cudaMalloc(m, size); if (cuerr != cudaSuccess) { fprintf(stderr, "%s, %d.\n %s.", __FILE__, __LINE__, cudaGetErrorString(cuerr)); exit(EXIT_FAILURE); } } /* * @PRAM : Host pointer, Device pointer, Number of rows and columns * @RETURN : Nothing * @DESC : Copies data from host pointer to device pointer * @SEE : * @TODO : * */ void transferToDevice(float *hostptr, float *deviceptr, int r, int c) { int size = sizeof(float) * r*c; cuerr = cudaSuccess; cuerr = cudaMemcpy(deviceptr, hostptr, size, cudaMemcpyHostToDevice); if (cuerr != cudaSuccess) { //fprintf(stderr, "%s, %d.\n %s.", __FILE__, __LINE__, cudaGetErrorString(err)); fprintf(stderr, "%s, %d.\n %s", __FILE__, __LINE__, cudaGetErrorString(cuerr)); exit(EXIT_FAILURE); } } /* * @PRAM : Host pointer, Device pointer, Number of rows and columns * @RETURN : Nothing * @DESC : Copies data from device pointer to host pointer * @SEE : * @TODO : * */ void transferFromDevice(float *hostptr, float *deviceptr, int r, int c) { int size = sizeof(float) * r*c; cuerr = cudaSuccess; cuerr = cudaMemcpy(hostptr, deviceptr, size, cudaMemcpyDeviceToHost); if (cuerr != cudaSuccess) { fprintf(stderr, "%s, %d.\n %s.", __FILE__, __LINE__, cudaGetErrorString(cuerr)); exit(EXIT_FAILURE); } } /* * @DESC : Multiplies matrix A with matrix B and stores output in C * @PRAM : device pointers matrix A, matrix B, matrix C, dimensions of matrix C and * comman dimension for matrix A and B * @RETURN : Nothing * @SEE : Tiled matrix multiplication * @TODO : A detailed description * */ __global__ void matMulKernel(float *A, float *B, float *C, int Ac, int Ar, int Bc) { __shared__ float Sa[TILE_WIDTH][TILE_WIDTH]; __shared__ float Sb[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y; int tx = threadIdx.x, ty = threadIdx.y; int Crow = by * TILE_WIDTH + ty; int Ccol = bx * TILE_WIDTH + tx; float sum; for( int phase=0 ; phase < MAT_DIM/TILE_WIDTH; phase++) { Sa[ty][tx] = A[Crow*MAT_DIM + phase*TILE_WIDTH + tx]; Sb[ty][tx] = B[ (phase*TILE_WIDTH + ty)*MAT_DIM + Ccol]; __syncthreads(); for( int i=0 ; i<TILE_WIDTH ; i++) { float a = Sa[ty][i]; float b = Sb[i][tx]; sum += a*b; } __syncthreads(); } C[Crow*MAT_DIM + Ccol] = sum; } /* * @DESC : Wrapper function to set kernel configuration and invoke the kernel * @PRAM : device pointers for matrix A, B, C, and dimensions for C * @RETURN : nothing * @SEE : * @TODO : * */ void pMatMul(float *A, float *B, float *C, int Ac, int Ar, int Bc) { dim3 gridprop(ceil(Bc/TILE_WIDTH), ceil(Ar/TILE_WIDTH), 1); dim3 blockprop(TILE_WIDTH, TILE_WIDTH, 1); matMulKernel<<<gridprop, blockprop>>>(A, B, C, Ac, Ar, Bc); } /* * @DESC : Print the matrix * @PRAM : host pointer to the matrix and its dimensions * @RETURN : nothing * @SEE : * @TODO : * */ void printMat(float *A, int r, int c) { for( int i=0; i<r; i++) { for( int j=0; j<c; j++) { printf("%3.2f\t", A[ i*c +j]); } printf("\n"); } } /* * @DESC : Check if the two given matrices are equal * @PRAM : host matrix pointer A, B and their dimensions * @RETURN : true if matrices are equal else false * @SEE : * @TODO : * */ bool check(float *A, float *B, int r, int c) { for( int i=0; i<r*c; i++) { if(A[i] != B[i]) return false; } return true; } int main() { float *h_A, *h_B, *h_C, *h_D; float *d_A, *d_B, *d_C; float milli; int Ar = MAT_DIM, Ac = MAT_DIM; int Br = MAT_DIM, Bc = MAT_DIM; int Cr = MAT_DIM, Cc = MAT_DIM; assert(Ac == Br); // Matrix are multipliable cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); h_A = createMartrix(Ar, Ac); assert(h_A != NULL); h_B = createMartrix(Br, Bc); assert(h_B != NULL); h_C = createMartrix(Cr, Cc); assert(h_C != NULL); h_D = createMartrix(Cr, Cc); assert(h_D != NULL); initMatrix(h_A, Ar, Ac); if(DEBUG) { printf("MAtrix A:\n"); printMat(h_A, Ar, Ac); } initMatrix(h_B, Br, Bc); if(DEBUG) { printf("Matrix B:\n"); printMat(h_B, Br, Bc); } matMul(h_A, h_B, h_C, Ac, Ar, Bc); if(DEBUG) { printf("Matrix C:\n"); printMat(h_C, Cr, Cc); } createMatrixDevice(&d_A, Ar, Ac); createMatrixDevice(&d_B, Br, Bc); createMatrixDevice(&d_C, Cr, Cc); transferToDevice(h_A, d_A, Ar, Ac); transferToDevice(h_B, d_B, Br, Bc); cudaEventRecord(start); pMatMul(d_A, d_B, d_C, Ac, Ar, Bc); cudaEventRecord(stop); transferFromDevice(h_D, d_C, Cr, Cc); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); printf("Time required for (Configuration %d TILE_WIDTH and %d MAT_DIM) parallel \ execution %f\n", TILE_WIDTH, MAT_DIM, milli); #if defined(TILE_WIDTH) && defined(MAT_DIM) char cmd[1024]; char vals[256]; sprintf(vals, "%d\t%d\t%f", TILE_WIDTH, MAT_DIM, milli); strcpy(cmd, "echo \""); strcat(cmd, vals); strcat(cmd, "\" >>res.data"); system(cmd); #endif if(DEBUG) { printf("Matrix C:\n"); printMat(h_C, Cr, Cc); } if(check(h_D, h_C, Cr, Cc)) { printf("Success :) \n"); } else { printf("Failed :( \n"); } cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); return 0; }
0252ca4b28f5874aeec898c5bc66ecd343b984e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "../wb.h" #include <stdio.h> #ifndef __HIPCC__ void atomicAdd(void *address, int rightSide); void __syncthreads(); #endif #define NUM_BINS 256 #define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); } #define BLOCK_SIZE 256 inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void histo(unsigned int *data, unsigned int *bins, int len) { int id = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.x; int totalNumberOfThreads = blockDim.x * gridDim.x; // Init shared memory (faster than global) __shared__ unsigned int privateHisto[NUM_BINS]; while (i < NUM_BINS) { privateHisto[i] = 0; i += blockDim.x; } __syncthreads(); // Compute histo locally (in shared memory) i = id; while (i < len) { atomicAdd(&(privateHisto[data[i]]), 1); i += totalNumberOfThreads; } __syncthreads(); // Copy histo in global memory i = threadIdx.x; while (i < NUM_BINS) { atomicAdd(&(bins[i]), privateHisto[i]); i += blockDim.x; } } void printHisto(unsigned int *histo, unsigned int len) { for (unsigned int i = 0; i < len; i++) { std::cout << "char " << (char)i << " : " << histo[i] << std::endl; } } int main(int argc, char *argv[]) { wbArg_t args; int inputLength; unsigned int *hostInput; unsigned int *hostBins; unsigned int *deviceInput; unsigned int *deviceBins; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0), &inputLength, "Integer"); hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); wbLog(TRACE, "The number of bins is ", NUM_BINS); int size = inputLength * sizeof(unsigned int); int binSize = NUM_BINS * sizeof(unsigned int); wbTime_start(GPU, "Allocating GPU memory."); hipMalloc((void **)&deviceInput, size); hipMalloc((void **)&deviceBins, binSize); CUDA_CHECK(hipDeviceSynchronize()); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); hipMemcpy(deviceInput, hostInput, size, hipMemcpyHostToDevice); hipMemset(deviceBins, 0, binSize); CUDA_CHECK(hipDeviceSynchronize()); wbTime_stop(GPU, "Copying input memory to the GPU."); dim3 block(BLOCK_SIZE); dim3 grid((BLOCK_SIZE - 1) / inputLength + 1); // Launch kernel // ---------------------------------------------------------- wbLog(TRACE, "Launching kernel"); wbTime_start(Compute, "Performing CUDA computation"); hipLaunchKernelGGL(( histo) , dim3(grid), dim3(block) , 0, 0, deviceInput, deviceBins, inputLength); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); hipMemcpy(hostBins, deviceBins, binSize, hipMemcpyDeviceToHost); CUDA_CHECK(hipDeviceSynchronize()); wbTime_stop(Copy, "Copying output memory to the CPU"); printHisto(hostBins, NUM_BINS); wbTime_start(GPU, "Freeing GPU Memory"); hipFree(deviceInput); hipFree(deviceBins); wbTime_stop(GPU, "Freeing GPU Memory"); free(hostBins); free(hostInput); return 0; }
0252ca4b28f5874aeec898c5bc66ecd343b984e1.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "../wb.h" #include <stdio.h> #ifndef __CUDACC__ void atomicAdd(void *address, int rightSide); void __syncthreads(); #endif #define NUM_BINS 256 #define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); } #define BLOCK_SIZE 256 inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void histo(unsigned int *data, unsigned int *bins, int len) { int id = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.x; int totalNumberOfThreads = blockDim.x * gridDim.x; // Init shared memory (faster than global) __shared__ unsigned int privateHisto[NUM_BINS]; while (i < NUM_BINS) { privateHisto[i] = 0; i += blockDim.x; } __syncthreads(); // Compute histo locally (in shared memory) i = id; while (i < len) { atomicAdd(&(privateHisto[data[i]]), 1); i += totalNumberOfThreads; } __syncthreads(); // Copy histo in global memory i = threadIdx.x; while (i < NUM_BINS) { atomicAdd(&(bins[i]), privateHisto[i]); i += blockDim.x; } } void printHisto(unsigned int *histo, unsigned int len) { for (unsigned int i = 0; i < len; i++) { std::cout << "char " << (char)i << " : " << histo[i] << std::endl; } } int main(int argc, char *argv[]) { wbArg_t args; int inputLength; unsigned int *hostInput; unsigned int *hostBins; unsigned int *deviceInput; unsigned int *deviceBins; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0), &inputLength, "Integer"); hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); wbLog(TRACE, "The number of bins is ", NUM_BINS); int size = inputLength * sizeof(unsigned int); int binSize = NUM_BINS * sizeof(unsigned int); wbTime_start(GPU, "Allocating GPU memory."); cudaMalloc((void **)&deviceInput, size); cudaMalloc((void **)&deviceBins, binSize); CUDA_CHECK(cudaDeviceSynchronize()); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); cudaMemcpy(deviceInput, hostInput, size, cudaMemcpyHostToDevice); cudaMemset(deviceBins, 0, binSize); CUDA_CHECK(cudaDeviceSynchronize()); wbTime_stop(GPU, "Copying input memory to the GPU."); dim3 block(BLOCK_SIZE); dim3 grid((BLOCK_SIZE - 1) / inputLength + 1); // Launch kernel // ---------------------------------------------------------- wbLog(TRACE, "Launching kernel"); wbTime_start(Compute, "Performing CUDA computation"); histo <<< grid, block >>> (deviceInput, deviceBins, inputLength); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); cudaMemcpy(hostBins, deviceBins, binSize, cudaMemcpyDeviceToHost); CUDA_CHECK(cudaDeviceSynchronize()); wbTime_stop(Copy, "Copying output memory to the CPU"); printHisto(hostBins, NUM_BINS); wbTime_start(GPU, "Freeing GPU Memory"); cudaFree(deviceInput); cudaFree(deviceBins); wbTime_stop(GPU, "Freeing GPU Memory"); free(hostBins); free(hostInput); return 0; }
00d08d9fd3744f3d4a4e0c8e7f030116f13c2f6c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "device_launch_parameters.h" #include <algorithm> __global__ void saxpy(int n, float a, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; } float bandwidth(void) { int N = 20 * (1 << 20); float *x, *y, *d_x, *d_y; x = (float*)malloc(N * sizeof(float)); y = (float*)malloc(N * sizeof(float)); hipMalloc(&d_x, N * sizeof(float)); hipMalloc(&d_y, N * sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipMemcpy(d_x, x, N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N * sizeof(float), hipMemcpyHostToDevice); hipEventRecord(start); // Perform SAXPY on 1M elements hipLaunchKernelGGL(( saxpy), dim3((N + 511)/512), dim3(512) , 0, 0, N, 2.0f, d_x, d_y); hipEventRecord(stop); hipMemcpy(y, d_y, N * sizeof(float), hipMemcpyDeviceToHost); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); float maxError = 0.0f; for (int i = 0; i < N; i++) { maxError = ::max(maxError, abs(y[i] - 4.0f)); } return N * 4 * 3 / milliseconds / 1e6; getchar(); }
00d08d9fd3744f3d4a4e0c8e7f030116f13c2f6c.cu
#include <cuda.h> #include <cuda_runtime.h> #include "device_launch_parameters.h" #include <algorithm> __global__ void saxpy(int n, float a, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; } float bandwidth(void) { int N = 20 * (1 << 20); float *x, *y, *d_x, *d_y; x = (float*)malloc(N * sizeof(float)); y = (float*)malloc(N * sizeof(float)); cudaMalloc(&d_x, N * sizeof(float)); cudaMalloc(&d_y, N * sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(start); // Perform SAXPY on 1M elements saxpy<<< (N + 511)/512, 512 >>>(N, 2.0f, d_x, d_y); cudaEventRecord(stop); cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); float maxError = 0.0f; for (int i = 0; i < N; i++) { maxError = std::max(maxError, abs(y[i] - 4.0f)); } return N * 4 * 3 / milliseconds / 1e6; getchar(); }
cfa4f6fd59f30ee584c4a20469b1460292a1db73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Convert a real4 to a real3 by removing its last element. */ inline __device__ real3 trim(real4 v) { return make_real3(v.x, v.y, v.z); } /** * This does nothing, and just exists to simply the code generation. */ inline __device__ real3 trim(real3 v) { return v; } /** * Compute the difference between two vectors, setting the fourth component to the squared magnitude. */ inline __device__ real4 delta(real4 vec1, real4 vec2) { real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0.0f); result.w = result.x*result.x + result.y*result.y + result.z*result.z; return result; } /** * Compute the difference between two vectors, taking periodic boundary conditions into account * and setting the fourth component to the squared magnitude. */ inline __device__ real4 deltaPeriodic(real4 vec1, real4 vec2, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ) { real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0.0f); #ifdef USE_PERIODIC APPLY_PERIODIC_TO_DELTA(result) #endif result.w = result.x*result.x + result.y*result.y + result.z*result.z; return result; } /** * Compute the angle between two vectors. The w component of each vector should contain the squared magnitude. */ inline __device__ real computeAngle(real4 vec1, real4 vec2) { real dotProduct = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z; real cosine = dotProduct*RSQRT(vec1.w*vec2.w); real angle; if (cosine > 0.99f || cosine < -0.99f) { // We're close to the singularity in acos(), so take the cross product and use asin() instead. real3 crossProduct = cross(vec1, vec2); real scale = vec1.w*vec2.w; angle = ASIN(SQRT(dot(crossProduct, crossProduct)/scale)); if (cosine < 0.0f) angle = M_PI-angle; } else angle = ACOS(cosine); return angle; } /** * Compute the cross product of two vectors, setting the fourth component to the squared magnitude. */ inline __device__ real4 computeCross(real4 vec1, real4 vec2) { real3 result = cross(vec1, vec2); return make_real4(result.x, result.y, result.z, result.x*result.x + result.y*result.y + result.z*result.z); } /** * Compute forces on donors. */ extern "C" __global__ void computeDonorForces(unsigned long long* __restrict__ force, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, const int4* __restrict__ exclusions, const int4* __restrict__ donorAtoms, const int4* __restrict__ acceptorAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ PARAMETER_ARGUMENTS) { extern __shared__ real4 posBuffer[]; mixed energy = 0; real3 f1 = make_real3(0); real3 f2 = make_real3(0); real3 f3 = make_real3(0); for (int donorStart = 0; donorStart < NUM_DONORS; donorStart += blockDim.x*gridDim.x) { // Load information about the donor this thread will compute forces on. int donorIndex = donorStart+blockIdx.x*blockDim.x+threadIdx.x; int4 atoms, exclusionIndices; real4 d1, d2, d3; if (donorIndex < NUM_DONORS) { atoms = donorAtoms[donorIndex]; d1 = (atoms.x > -1 ? posq[atoms.x] : make_real4(0)); d2 = (atoms.y > -1 ? posq[atoms.y] : make_real4(0)); d3 = (atoms.z > -1 ? posq[atoms.z] : make_real4(0)); #ifdef USE_EXCLUSIONS exclusionIndices = exclusions[donorIndex]; #endif } else atoms = make_int4(-1, -1, -1, -1); for (int acceptorStart = 0; acceptorStart < NUM_ACCEPTORS; acceptorStart += blockDim.x) { // Load the next block of acceptors into local memory. int blockSize = min((int) blockDim.x, NUM_ACCEPTORS-acceptorStart); if (threadIdx.x < blockSize) { int4 atoms2 = acceptorAtoms[acceptorStart+threadIdx.x]; posBuffer[3*threadIdx.x] = (atoms2.x > -1 ? posq[atoms2.x] : make_real4(0)); posBuffer[3*threadIdx.x+1] = (atoms2.y > -1 ? posq[atoms2.y] : make_real4(0)); posBuffer[3*threadIdx.x+2] = (atoms2.z > -1 ? posq[atoms2.z] : make_real4(0)); } __syncthreads(); if (donorIndex < NUM_DONORS) { for (int index = 0; index < blockSize; index++) { int acceptorIndex = acceptorStart+index; #ifdef USE_EXCLUSIONS if (acceptorIndex == exclusionIndices.x || acceptorIndex == exclusionIndices.y || acceptorIndex == exclusionIndices.z || acceptorIndex == exclusionIndices.w) continue; #endif // Compute the interaction between a donor and an acceptor. real4 a1 = posBuffer[3*index]; real4 a2 = posBuffer[3*index+1]; real4 a3 = posBuffer[3*index+2]; real4 deltaD1A1 = deltaPeriodic(d1, a1, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ); #ifdef USE_CUTOFF if (deltaD1A1.w < CUTOFF_SQUARED) { #endif COMPUTE_DONOR_FORCE #ifdef USE_CUTOFF } #endif } } } // Write results if (donorIndex < NUM_DONORS) { if (atoms.x > -1) { atomicAdd(&force[atoms.x], static_cast<unsigned long long>((long long) (f1.x*0x100000000))); atomicAdd(&force[atoms.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.y*0x100000000))); atomicAdd(&force[atoms.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.z*0x100000000))); __threadfence_block(); } if (atoms.y > -1) { atomicAdd(&force[atoms.y], static_cast<unsigned long long>((long long) (f2.x*0x100000000))); atomicAdd(&force[atoms.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.y*0x100000000))); atomicAdd(&force[atoms.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.z*0x100000000))); __threadfence_block(); } if (atoms.z > -1) { atomicAdd(&force[atoms.z], static_cast<unsigned long long>((long long) (f3.x*0x100000000))); atomicAdd(&force[atoms.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.y*0x100000000))); atomicAdd(&force[atoms.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.z*0x100000000))); __threadfence_block(); } } } energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy; } /** * Compute forces on acceptors. */ extern "C" __global__ void computeAcceptorForces(unsigned long long* __restrict__ force, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, const int4* __restrict__ exclusions, const int4* __restrict__ donorAtoms, const int4* __restrict__ acceptorAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ PARAMETER_ARGUMENTS) { extern __shared__ real4 posBuffer[]; real3 f1 = make_real3(0); real3 f2 = make_real3(0); real3 f3 = make_real3(0); for (int acceptorStart = 0; acceptorStart < NUM_ACCEPTORS; acceptorStart += blockDim.x*gridDim.x) { // Load information about the acceptor this thread will compute forces on. int acceptorIndex = acceptorStart+blockIdx.x*blockDim.x+threadIdx.x; int4 atoms, exclusionIndices; real4 a1, a2, a3; if (acceptorIndex < NUM_ACCEPTORS) { atoms = acceptorAtoms[acceptorIndex]; a1 = (atoms.x > -1 ? posq[atoms.x] : make_real4(0)); a2 = (atoms.y > -1 ? posq[atoms.y] : make_real4(0)); a3 = (atoms.z > -1 ? posq[atoms.z] : make_real4(0)); #ifdef USE_EXCLUSIONS exclusionIndices = exclusions[acceptorIndex]; #endif } else atoms = make_int4(-1, -1, -1, -1); for (int donorStart = 0; donorStart < NUM_DONORS; donorStart += blockDim.x) { // Load the next block of donors into local memory. int blockSize = min((int) blockDim.x, NUM_DONORS-donorStart); if (threadIdx.x < blockSize) { int4 atoms2 = donorAtoms[donorStart+threadIdx.x]; posBuffer[3*threadIdx.x] = (atoms2.x > -1 ? posq[atoms2.x] : make_real4(0)); posBuffer[3*threadIdx.x+1] = (atoms2.y > -1 ? posq[atoms2.y] : make_real4(0)); posBuffer[3*threadIdx.x+2] = (atoms2.z > -1 ? posq[atoms2.z] : make_real4(0)); } __syncthreads(); if (acceptorIndex < NUM_ACCEPTORS) { for (int index = 0; index < blockSize; index++) { int donorIndex = donorStart+index; #ifdef USE_EXCLUSIONS if (donorIndex == exclusionIndices.x || donorIndex == exclusionIndices.y || donorIndex == exclusionIndices.z || donorIndex == exclusionIndices.w) continue; #endif // Compute the interaction between a donor and an acceptor. real4 d1 = posBuffer[3*index]; real4 d2 = posBuffer[3*index+1]; real4 d3 = posBuffer[3*index+2]; real4 deltaD1A1 = deltaPeriodic(d1, a1, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ); #ifdef USE_CUTOFF if (deltaD1A1.w < CUTOFF_SQUARED) { #endif COMPUTE_ACCEPTOR_FORCE #ifdef USE_CUTOFF } #endif } } } // Write results if (acceptorIndex < NUM_ACCEPTORS) { if (atoms.x > -1) { atomicAdd(&force[atoms.x], static_cast<unsigned long long>((long long) (f1.x*0x100000000))); atomicAdd(&force[atoms.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.y*0x100000000))); atomicAdd(&force[atoms.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.z*0x100000000))); __threadfence_block(); } if (atoms.y > -1) { atomicAdd(&force[atoms.y], static_cast<unsigned long long>((long long) (f2.x*0x100000000))); atomicAdd(&force[atoms.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.y*0x100000000))); atomicAdd(&force[atoms.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.z*0x100000000))); __threadfence_block(); } if (atoms.z > -1) { atomicAdd(&force[atoms.z], static_cast<unsigned long long>((long long) (f3.x*0x100000000))); atomicAdd(&force[atoms.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.y*0x100000000))); atomicAdd(&force[atoms.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.z*0x100000000))); __threadfence_block(); } } } }
cfa4f6fd59f30ee584c4a20469b1460292a1db73.cu
/** * Convert a real4 to a real3 by removing its last element. */ inline __device__ real3 trim(real4 v) { return make_real3(v.x, v.y, v.z); } /** * This does nothing, and just exists to simply the code generation. */ inline __device__ real3 trim(real3 v) { return v; } /** * Compute the difference between two vectors, setting the fourth component to the squared magnitude. */ inline __device__ real4 delta(real4 vec1, real4 vec2) { real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0.0f); result.w = result.x*result.x + result.y*result.y + result.z*result.z; return result; } /** * Compute the difference between two vectors, taking periodic boundary conditions into account * and setting the fourth component to the squared magnitude. */ inline __device__ real4 deltaPeriodic(real4 vec1, real4 vec2, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ) { real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0.0f); #ifdef USE_PERIODIC APPLY_PERIODIC_TO_DELTA(result) #endif result.w = result.x*result.x + result.y*result.y + result.z*result.z; return result; } /** * Compute the angle between two vectors. The w component of each vector should contain the squared magnitude. */ inline __device__ real computeAngle(real4 vec1, real4 vec2) { real dotProduct = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z; real cosine = dotProduct*RSQRT(vec1.w*vec2.w); real angle; if (cosine > 0.99f || cosine < -0.99f) { // We're close to the singularity in acos(), so take the cross product and use asin() instead. real3 crossProduct = cross(vec1, vec2); real scale = vec1.w*vec2.w; angle = ASIN(SQRT(dot(crossProduct, crossProduct)/scale)); if (cosine < 0.0f) angle = M_PI-angle; } else angle = ACOS(cosine); return angle; } /** * Compute the cross product of two vectors, setting the fourth component to the squared magnitude. */ inline __device__ real4 computeCross(real4 vec1, real4 vec2) { real3 result = cross(vec1, vec2); return make_real4(result.x, result.y, result.z, result.x*result.x + result.y*result.y + result.z*result.z); } /** * Compute forces on donors. */ extern "C" __global__ void computeDonorForces(unsigned long long* __restrict__ force, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, const int4* __restrict__ exclusions, const int4* __restrict__ donorAtoms, const int4* __restrict__ acceptorAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ PARAMETER_ARGUMENTS) { extern __shared__ real4 posBuffer[]; mixed energy = 0; real3 f1 = make_real3(0); real3 f2 = make_real3(0); real3 f3 = make_real3(0); for (int donorStart = 0; donorStart < NUM_DONORS; donorStart += blockDim.x*gridDim.x) { // Load information about the donor this thread will compute forces on. int donorIndex = donorStart+blockIdx.x*blockDim.x+threadIdx.x; int4 atoms, exclusionIndices; real4 d1, d2, d3; if (donorIndex < NUM_DONORS) { atoms = donorAtoms[donorIndex]; d1 = (atoms.x > -1 ? posq[atoms.x] : make_real4(0)); d2 = (atoms.y > -1 ? posq[atoms.y] : make_real4(0)); d3 = (atoms.z > -1 ? posq[atoms.z] : make_real4(0)); #ifdef USE_EXCLUSIONS exclusionIndices = exclusions[donorIndex]; #endif } else atoms = make_int4(-1, -1, -1, -1); for (int acceptorStart = 0; acceptorStart < NUM_ACCEPTORS; acceptorStart += blockDim.x) { // Load the next block of acceptors into local memory. int blockSize = min((int) blockDim.x, NUM_ACCEPTORS-acceptorStart); if (threadIdx.x < blockSize) { int4 atoms2 = acceptorAtoms[acceptorStart+threadIdx.x]; posBuffer[3*threadIdx.x] = (atoms2.x > -1 ? posq[atoms2.x] : make_real4(0)); posBuffer[3*threadIdx.x+1] = (atoms2.y > -1 ? posq[atoms2.y] : make_real4(0)); posBuffer[3*threadIdx.x+2] = (atoms2.z > -1 ? posq[atoms2.z] : make_real4(0)); } __syncthreads(); if (donorIndex < NUM_DONORS) { for (int index = 0; index < blockSize; index++) { int acceptorIndex = acceptorStart+index; #ifdef USE_EXCLUSIONS if (acceptorIndex == exclusionIndices.x || acceptorIndex == exclusionIndices.y || acceptorIndex == exclusionIndices.z || acceptorIndex == exclusionIndices.w) continue; #endif // Compute the interaction between a donor and an acceptor. real4 a1 = posBuffer[3*index]; real4 a2 = posBuffer[3*index+1]; real4 a3 = posBuffer[3*index+2]; real4 deltaD1A1 = deltaPeriodic(d1, a1, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ); #ifdef USE_CUTOFF if (deltaD1A1.w < CUTOFF_SQUARED) { #endif COMPUTE_DONOR_FORCE #ifdef USE_CUTOFF } #endif } } } // Write results if (donorIndex < NUM_DONORS) { if (atoms.x > -1) { atomicAdd(&force[atoms.x], static_cast<unsigned long long>((long long) (f1.x*0x100000000))); atomicAdd(&force[atoms.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.y*0x100000000))); atomicAdd(&force[atoms.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.z*0x100000000))); __threadfence_block(); } if (atoms.y > -1) { atomicAdd(&force[atoms.y], static_cast<unsigned long long>((long long) (f2.x*0x100000000))); atomicAdd(&force[atoms.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.y*0x100000000))); atomicAdd(&force[atoms.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.z*0x100000000))); __threadfence_block(); } if (atoms.z > -1) { atomicAdd(&force[atoms.z], static_cast<unsigned long long>((long long) (f3.x*0x100000000))); atomicAdd(&force[atoms.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.y*0x100000000))); atomicAdd(&force[atoms.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.z*0x100000000))); __threadfence_block(); } } } energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy; } /** * Compute forces on acceptors. */ extern "C" __global__ void computeAcceptorForces(unsigned long long* __restrict__ force, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, const int4* __restrict__ exclusions, const int4* __restrict__ donorAtoms, const int4* __restrict__ acceptorAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ PARAMETER_ARGUMENTS) { extern __shared__ real4 posBuffer[]; real3 f1 = make_real3(0); real3 f2 = make_real3(0); real3 f3 = make_real3(0); for (int acceptorStart = 0; acceptorStart < NUM_ACCEPTORS; acceptorStart += blockDim.x*gridDim.x) { // Load information about the acceptor this thread will compute forces on. int acceptorIndex = acceptorStart+blockIdx.x*blockDim.x+threadIdx.x; int4 atoms, exclusionIndices; real4 a1, a2, a3; if (acceptorIndex < NUM_ACCEPTORS) { atoms = acceptorAtoms[acceptorIndex]; a1 = (atoms.x > -1 ? posq[atoms.x] : make_real4(0)); a2 = (atoms.y > -1 ? posq[atoms.y] : make_real4(0)); a3 = (atoms.z > -1 ? posq[atoms.z] : make_real4(0)); #ifdef USE_EXCLUSIONS exclusionIndices = exclusions[acceptorIndex]; #endif } else atoms = make_int4(-1, -1, -1, -1); for (int donorStart = 0; donorStart < NUM_DONORS; donorStart += blockDim.x) { // Load the next block of donors into local memory. int blockSize = min((int) blockDim.x, NUM_DONORS-donorStart); if (threadIdx.x < blockSize) { int4 atoms2 = donorAtoms[donorStart+threadIdx.x]; posBuffer[3*threadIdx.x] = (atoms2.x > -1 ? posq[atoms2.x] : make_real4(0)); posBuffer[3*threadIdx.x+1] = (atoms2.y > -1 ? posq[atoms2.y] : make_real4(0)); posBuffer[3*threadIdx.x+2] = (atoms2.z > -1 ? posq[atoms2.z] : make_real4(0)); } __syncthreads(); if (acceptorIndex < NUM_ACCEPTORS) { for (int index = 0; index < blockSize; index++) { int donorIndex = donorStart+index; #ifdef USE_EXCLUSIONS if (donorIndex == exclusionIndices.x || donorIndex == exclusionIndices.y || donorIndex == exclusionIndices.z || donorIndex == exclusionIndices.w) continue; #endif // Compute the interaction between a donor and an acceptor. real4 d1 = posBuffer[3*index]; real4 d2 = posBuffer[3*index+1]; real4 d3 = posBuffer[3*index+2]; real4 deltaD1A1 = deltaPeriodic(d1, a1, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ); #ifdef USE_CUTOFF if (deltaD1A1.w < CUTOFF_SQUARED) { #endif COMPUTE_ACCEPTOR_FORCE #ifdef USE_CUTOFF } #endif } } } // Write results if (acceptorIndex < NUM_ACCEPTORS) { if (atoms.x > -1) { atomicAdd(&force[atoms.x], static_cast<unsigned long long>((long long) (f1.x*0x100000000))); atomicAdd(&force[atoms.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.y*0x100000000))); atomicAdd(&force[atoms.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.z*0x100000000))); __threadfence_block(); } if (atoms.y > -1) { atomicAdd(&force[atoms.y], static_cast<unsigned long long>((long long) (f2.x*0x100000000))); atomicAdd(&force[atoms.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.y*0x100000000))); atomicAdd(&force[atoms.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.z*0x100000000))); __threadfence_block(); } if (atoms.z > -1) { atomicAdd(&force[atoms.z], static_cast<unsigned long long>((long long) (f3.x*0x100000000))); atomicAdd(&force[atoms.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.y*0x100000000))); atomicAdd(&force[atoms.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.z*0x100000000))); __threadfence_block(); } } } }
c3f1f5c92755b1af599322e73b55bcad7274aa3d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include "matrixmul.h" #define BLOCK_SIZE 32 __global__ void matmul(float *_A, float *_B, float *_C); int main(){ float *hA, *hB, *hC; // time hipEvent_t start, stop; float gpu_time; hipEventCreate(&start); hipEventCreate(&stop); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(WB/BLOCK_SIZE+1, HA/BLOCK_SIZE+1); // allocate mem hA = (float*)malloc(WA*HA*sizeof(float)); hB = (float*)malloc(WB*HB*sizeof(float)); hC = (float*)malloc(WB*HA*sizeof(float)); hipMalloc((void **)&A, WA*HA*sizeof(float)); hipMalloc((void **)&B, WB*HB*sizeof(float)); hipMalloc((void **)&C, WB*HA*sizeof(float)); // init value initMatrix(hA, WA, HA); initMatrix(hB, WB, HB); hipMemcpy(A, hA, WA*HA*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(B, hB, WB*HB*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(C, hC, WB*HA*sizeof(float), hipMemcpyHostToDevice); // check if there is no matching dim of matrix mul if (WA != HB){ hipFree(A); hipFree(B); hipFree(C); free(hA); free(hB); free(hC); exit(0); return 0; } hipEventRecord(start); // compute and record for (unsigned int iter = 0; iter < MAX_ITER ; iter++) hipLaunchKernelGGL(( matmul), dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, C); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&gpu_time, start, stop); printf("GPU time = %f s\n", gpu_time*0.001/MAX_ITER); hipMemcpy(hA, A, WA*HA*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(hB, B, WB*HB*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(hC, C, WB*HA*sizeof(float), hipMemcpyDeviceToHost); // printf("Matrix A =\n"); // printMatrix(hA, WA, HA); // printf("Matrix B =\n"); // printMatrix(hB, WB, HB); // printf("Matrix C =\n"); // printMatrix(hC, WB, HA); hipFree(A); hipFree(B); hipFree(C); free(hA); free(hB); free(hC); return 0; } void initMatrix(float *_M, int _W, int _H){ srand(time(NULL)); for (unsigned int h=0; h<_H;h++){ for (unsigned int w=0; w<_W; w++){ _M[w+h*_W] = (float)rand()/ (float)RAND_MAX;//(int)rand() % 16; } } } void printMatrix(float *_M, int _W, int _H){ for (unsigned int h=0; h<_H;h++){ printf("%d|\t", h); for (unsigned int w=0; w<_W; w++){ printf("%f\t", _M[w+h*_W]); } printf("|\n"); } } __global__ void matmul(float *_A, float *_B, float *_C){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; if (i < WB && j < HA){ float sumover = .0f; for (unsigned int dmmy = 0; dmmy< HB; dmmy++){ sumover += _A[dmmy+j*WA]*_B[i+dmmy*WB]; } _C[i+j*WB] = sumover; } }
c3f1f5c92755b1af599322e73b55bcad7274aa3d.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include "matrixmul.h" #define BLOCK_SIZE 32 __global__ void matmul(float *_A, float *_B, float *_C); int main(){ float *hA, *hB, *hC; // time cudaEvent_t start, stop; float gpu_time; cudaEventCreate(&start); cudaEventCreate(&stop); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(WB/BLOCK_SIZE+1, HA/BLOCK_SIZE+1); // allocate mem hA = (float*)malloc(WA*HA*sizeof(float)); hB = (float*)malloc(WB*HB*sizeof(float)); hC = (float*)malloc(WB*HA*sizeof(float)); cudaMalloc((void **)&A, WA*HA*sizeof(float)); cudaMalloc((void **)&B, WB*HB*sizeof(float)); cudaMalloc((void **)&C, WB*HA*sizeof(float)); // init value initMatrix(hA, WA, HA); initMatrix(hB, WB, HB); cudaMemcpy(A, hA, WA*HA*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B, hB, WB*HB*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(C, hC, WB*HA*sizeof(float), cudaMemcpyHostToDevice); // check if there is no matching dim of matrix mul if (WA != HB){ cudaFree(A); cudaFree(B); cudaFree(C); free(hA); free(hB); free(hC); exit(0); return 0; } cudaEventRecord(start); // compute and record for (unsigned int iter = 0; iter < MAX_ITER ; iter++) matmul<<<dimGrid, dimBlock>>>(A, B, C); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_time, start, stop); printf("GPU time = %f s\n", gpu_time*0.001/MAX_ITER); cudaMemcpy(hA, A, WA*HA*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hB, B, WB*HB*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hC, C, WB*HA*sizeof(float), cudaMemcpyDeviceToHost); // printf("Matrix A =\n"); // printMatrix(hA, WA, HA); // printf("Matrix B =\n"); // printMatrix(hB, WB, HB); // printf("Matrix C =\n"); // printMatrix(hC, WB, HA); cudaFree(A); cudaFree(B); cudaFree(C); free(hA); free(hB); free(hC); return 0; } void initMatrix(float *_M, int _W, int _H){ srand(time(NULL)); for (unsigned int h=0; h<_H;h++){ for (unsigned int w=0; w<_W; w++){ _M[w+h*_W] = (float)rand()/ (float)RAND_MAX;//(int)rand() % 16; } } } void printMatrix(float *_M, int _W, int _H){ for (unsigned int h=0; h<_H;h++){ printf("%d|\t", h); for (unsigned int w=0; w<_W; w++){ printf("%f\t", _M[w+h*_W]); } printf("|\n"); } } __global__ void matmul(float *_A, float *_B, float *_C){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; if (i < WB && j < HA){ float sumover = .0f; for (unsigned int dmmy = 0; dmmy< HB; dmmy++){ sumover += _A[dmmy+j*WA]*_B[i+dmmy*WB]; } _C[i+j*WB] = sumover; } }
75da6912cc2233f9c9bb04b4cc746adfb24ac603.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "particle.h" #include "JETBRAIN_IDE.h" #include <cstdlib> #include <cstdio> __global__ void advanceParticles(float dt, particle *pArray, int nParticles) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < nParticles) { pArray[idx].advance(dt); } } int main(int argc, char **argv) { hipError_t error; int n = 1000000; if (argc > 1) { n = atoi(argv[1]); } // Number of particles if (argc > 2) { srand(atoi(argv[2])); } // Random seed error = hipGetLastError(); if (error != hipSuccess) { printf("0 %s\n", hipGetErrorString(error)); exit(1); } particle *pArray = new particle[n]; particle *devPArray = NULL; hipMalloc(&devPArray, n * sizeof(particle)); hipDeviceSynchronize(); error = hipGetLastError(); if (error != hipSuccess) { printf("1 %s\n", hipGetErrorString(error)); exit(1); } hipMemcpy(devPArray, pArray, n * sizeof(particle), hipMemcpyHostToDevice); hipDeviceSynchronize(); error = hipGetLastError(); if (error != hipSuccess) { printf("2 %s\n", hipGetErrorString(error)); exit(1); } for (int i = 0; i < 100; i++) { float dt = (float) rand() / (float) RAND_MAX; // Random distance each step advanceParticles << < 1 + n / 256, 256 >> > (dt, devPArray, n); error = hipGetLastError(); if (error != hipSuccess) { printf("3 %s\n", hipGetErrorString(error)); exit(1); } hipDeviceSynchronize(); } hipMemcpy(pArray, devPArray, n * sizeof(particle), hipMemcpyDeviceToHost); v3 totalDistance(0, 0, 0); v3 temp; for (int i = 0; i < n; i++) { temp = pArray[i].getTotalDistance(); totalDistance.x += temp.x; totalDistance.y += temp.y; totalDistance.z += temp.z; } float avgX = totalDistance.x / (float) n; float avgY = totalDistance.y / (float) n; float avgZ = totalDistance.z / (float) n; float avgNorm = sqrt(avgX * avgX + avgY * avgY + avgZ * avgZ); printf("Moved %d particles 100 steps. Average distance traveled is |(%f, %f, %f)| = %f\n", n, avgX, avgY, avgZ, avgNorm); return 0; }
75da6912cc2233f9c9bb04b4cc746adfb24ac603.cu
#include "particle.h" #include "JETBRAIN_IDE.h" #include <cstdlib> #include <cstdio> __global__ void advanceParticles(float dt, particle *pArray, int nParticles) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < nParticles) { pArray[idx].advance(dt); } } int main(int argc, char **argv) { cudaError_t error; int n = 1000000; if (argc > 1) { n = atoi(argv[1]); } // Number of particles if (argc > 2) { srand(atoi(argv[2])); } // Random seed error = cudaGetLastError(); if (error != cudaSuccess) { printf("0 %s\n", cudaGetErrorString(error)); exit(1); } particle *pArray = new particle[n]; particle *devPArray = NULL; cudaMalloc(&devPArray, n * sizeof(particle)); cudaDeviceSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf("1 %s\n", cudaGetErrorString(error)); exit(1); } cudaMemcpy(devPArray, pArray, n * sizeof(particle), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf("2 %s\n", cudaGetErrorString(error)); exit(1); } for (int i = 0; i < 100; i++) { float dt = (float) rand() / (float) RAND_MAX; // Random distance each step advanceParticles << < 1 + n / 256, 256 >> > (dt, devPArray, n); error = cudaGetLastError(); if (error != cudaSuccess) { printf("3 %s\n", cudaGetErrorString(error)); exit(1); } cudaDeviceSynchronize(); } cudaMemcpy(pArray, devPArray, n * sizeof(particle), cudaMemcpyDeviceToHost); v3 totalDistance(0, 0, 0); v3 temp; for (int i = 0; i < n; i++) { temp = pArray[i].getTotalDistance(); totalDistance.x += temp.x; totalDistance.y += temp.y; totalDistance.z += temp.z; } float avgX = totalDistance.x / (float) n; float avgY = totalDistance.y / (float) n; float avgZ = totalDistance.z / (float) n; float avgNorm = sqrt(avgX * avgX + avgY * avgY + avgZ * avgZ); printf("Moved %d particles 100 steps. Average distance traveled is |(%f, %f, %f)| = %f\n", n, avgX, avgY, avgZ, avgNorm); return 0; }
09c40c6b9dcd17c3951f3be7179355c2b9ca5f2f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudamap.h" #include <cuda_gl_interop.h> #include "helper_math.h" #include <stdio.h> #define BLOCK_SIZE 512 #define MAX_FLOAT 1e9 __device__ static float2 cmpVI(float2 a, float2 b) { return a.x<b.x?a:b; } __device__ static unsigned long long int _float2_ll(float2 a) { return *((unsigned long long int*) &a); } __device__ static float2 _ll_float2(unsigned long long int a) { return *((float2*) &a); } // From http://stackoverflow.com/questions/17399119/cant-we-use-atomic-operations-for-floating-point-variables-in-cuda __device__ static float2 atomicMin2(float2* address, float2 val) { unsigned long long int* address_as_i = (unsigned long long int*) address; unsigned long long int old = *address_as_i, assumed; do { assumed = old; old = ::atomicCAS(address_as_i, assumed, _float2_ll(cmpVI(val, _ll_float2(assumed))) ); } while (assumed != old); return _ll_float2(old); } __global__ void cuAddlight( float* intensities, float3* surfel_pos, float3* surfel_normal, float intensity, float x, float y, float z, int n) { int tid = threadIdx.x; int surfaceIdx = tid + blockDim.x*blockIdx.x; if (surfaceIdx < n) { float3 pos = surfel_pos[surfaceIdx]; float3 norm = surfel_normal[surfaceIdx]; float3 p = make_float3(x,y,z); float3 L = p - pos; float LdotL = dot(L,L); float ndotL = dot(norm, L); float ret = LdotL>0?ndotL*intensity/(LdotL*sqrt(LdotL)):0; atomicAdd(intensities+surfaceIdx, ret); } } template <unsigned int blockSize> __global__ void cuCompute( float* intensities, float3* surfel_pos, float3* surfel_normal, int n, float2* field, int w, int ycoord, int zcoord) { __shared__ float2 mini[BLOCK_SIZE]; int tid = threadIdx.x; int surfaceIdx = tid + blockDim.x*blockIdx.x; mini[tid] = make_float2(MAX_FLOAT, 0); if (surfaceIdx < n) { // Data load float intensity = intensities[surfaceIdx]; float3 pos = surfel_pos[surfaceIdx]; float3 norm = surfel_normal[surfaceIdx]; float3 r = make_float3(pos.x-0.5, pos.y-0.5, pos.z-0.5); float phi = atan2(r.y, r.x); if (phi < 0) phi += 2*M_PI; float theta = acos(r.z/sqrt(dot(r,r))); unsigned int iphi = (1<<15)*phi/(2*M_PI); unsigned int itheta = (1<<15)*theta/(M_PI); mini[tid].y = __int_as_float((iphi<<15)|itheta); // Computation float3 p = make_float3( (blockIdx.y + 0.5)/(float)w, (ycoord + 0.5)/(float)w, (zcoord + 0.5)/(float)w ); float3 L = p - pos; float LdotL = dot(L,L); float ndotLn = dot(norm, L)/sqrt(LdotL); char occl = 1; float v = intensity*occl*ndotLn>0?intensity*LdotL/ndotLn:MAX_FLOAT; mini[tid].x = v>0.f?v:MAX_FLOAT; } __syncthreads(); // Reduction if (blockSize >= 512) { if (tid < 256) { mini[tid] = cmpVI(mini[tid+256], mini[tid]); } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { mini[tid] = cmpVI(mini[tid+128], mini[tid]); } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { mini[tid] = cmpVI(mini[tid+64], mini[tid]); } __syncthreads(); } if (blockSize >= 64) { if (tid < 32) { mini[tid] = cmpVI(mini[tid+32], mini[tid]); } __syncthreads(); } if (blockSize >= 32) { if (tid < 16) { mini[tid] = cmpVI(mini[tid+16], mini[tid]); } __syncthreads(); } if (blockSize >= 16) { if (tid < 8) { mini[tid] = cmpVI(mini[tid+8], mini[tid]); } __syncthreads(); } if (blockSize >= 8) { if (tid < 4) { mini[tid] = cmpVI(mini[tid+4], mini[tid]); } __syncthreads(); } if (blockSize >= 4) { if (tid < 2) { mini[tid] = cmpVI(mini[tid+2], mini[tid]); } __syncthreads(); } if (blockSize >= 2) { if (tid < 1) { mini[tid] = cmpVI(mini[tid+1], mini[tid]); } __syncthreads(); } // Final data copy if (tid == 0) { atomicMin2(field+zcoord*w*w+ycoord*w+blockIdx.y, mini[0]); } } void Cudamap_init(Cudamap* cudamap, const float* surfel_pos, const float* surfel_normal) { hipSetDevice(0); hipMalloc((void**) &(cudamap->d_intensities), sizeof(float)*cudamap->n); hipMalloc((void**) &(cudamap->d_surfel_pos), sizeof(float3)*cudamap->n); hipMalloc((void**) &(cudamap->d_surfel_normal), sizeof(float3)*cudamap->n); hipMalloc((void**) &(cudamap->d_field), sizeof(float2)*cudamap->w*cudamap->w*cudamap->w); hipMemcpy(cudamap->d_surfel_pos, surfel_pos, sizeof(float3)*cudamap->n, hipMemcpyHostToDevice); hipMemcpy(cudamap->d_surfel_normal, surfel_normal, sizeof(float3)*cudamap->n, hipMemcpyHostToDevice); hipMemset((void*) cudamap->d_intensities, 0, sizeof(float)*cudamap->n); } void Cudamap_free(Cudamap* cudamap) { hipFree(cudamap->d_surfel_pos); hipFree(cudamap->d_surfel_normal); hipFree(cudamap->d_intensities); hipFree(cudamap->d_field); } void Cudamap_setIntensities(Cudamap* cudamap, float* intensities) { if (intensities) { hipMemcpy(cudamap->d_intensities, intensities, sizeof(float)*cudamap->n, hipMemcpyHostToDevice); } else { hipMemset((void*) cudamap->d_intensities, 0, sizeof(float)*cudamap->n); } } void Cudamap_addLight(Cudamap* cudamap, float intensity, float x, float y, float z) { hipLaunchKernelGGL(( cuAddlight), dim3((cudamap->n+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, cudamap->d_intensities, cudamap->d_surfel_pos, cudamap->d_surfel_normal, intensity, x, y, z, cudamap->n); } void Cudamap_compute(Cudamap* cudamap, float* field) { int n = cudamap->n; int w = cudamap->w; for (int i = 0; i < w*w*w; i++) { field[2*i] = MAX_FLOAT; field[2*i+1] = 0; } hipMemcpy(cudamap->d_field, field, sizeof(float2)*w*w*w, hipMemcpyHostToDevice); dim3 threads(BLOCK_SIZE, 1, 1); dim3 blocks((n+BLOCK_SIZE-1)/BLOCK_SIZE, w, 1); for (int i = 0; i < w; i++) { for (int j = 0; j < w; j++) { hipLaunchKernelGGL(( cuCompute<BLOCK_SIZE>), dim3(blocks), dim3(threads) , 0, 0, cudamap->d_intensities, cudamap->d_surfel_pos, cudamap->d_surfel_normal, n, cudamap->d_field, w, i, j); } printf("Done %d/%d\n", i, w); hipDeviceSynchronize(); } hipMemcpy(field, cudamap->d_field, sizeof(float2)*w*w*w, hipMemcpyDeviceToHost); hipDeviceSynchronize(); }
09c40c6b9dcd17c3951f3be7179355c2b9ca5f2f.cu
#include "cudamap.h" #include <cuda_gl_interop.h> #include "helper_math.h" #include <stdio.h> #define BLOCK_SIZE 512 #define MAX_FLOAT 1e9 __device__ static float2 cmpVI(float2 a, float2 b) { return a.x<b.x?a:b; } __device__ static unsigned long long int _float2_ll(float2 a) { return *((unsigned long long int*) &a); } __device__ static float2 _ll_float2(unsigned long long int a) { return *((float2*) &a); } // From http://stackoverflow.com/questions/17399119/cant-we-use-atomic-operations-for-floating-point-variables-in-cuda __device__ static float2 atomicMin2(float2* address, float2 val) { unsigned long long int* address_as_i = (unsigned long long int*) address; unsigned long long int old = *address_as_i, assumed; do { assumed = old; old = ::atomicCAS(address_as_i, assumed, _float2_ll(cmpVI(val, _ll_float2(assumed))) ); } while (assumed != old); return _ll_float2(old); } __global__ void cuAddlight( float* intensities, float3* surfel_pos, float3* surfel_normal, float intensity, float x, float y, float z, int n) { int tid = threadIdx.x; int surfaceIdx = tid + blockDim.x*blockIdx.x; if (surfaceIdx < n) { float3 pos = surfel_pos[surfaceIdx]; float3 norm = surfel_normal[surfaceIdx]; float3 p = make_float3(x,y,z); float3 L = p - pos; float LdotL = dot(L,L); float ndotL = dot(norm, L); float ret = LdotL>0?ndotL*intensity/(LdotL*sqrt(LdotL)):0; atomicAdd(intensities+surfaceIdx, ret); } } template <unsigned int blockSize> __global__ void cuCompute( float* intensities, float3* surfel_pos, float3* surfel_normal, int n, float2* field, int w, int ycoord, int zcoord) { __shared__ float2 mini[BLOCK_SIZE]; int tid = threadIdx.x; int surfaceIdx = tid + blockDim.x*blockIdx.x; mini[tid] = make_float2(MAX_FLOAT, 0); if (surfaceIdx < n) { // Data load float intensity = intensities[surfaceIdx]; float3 pos = surfel_pos[surfaceIdx]; float3 norm = surfel_normal[surfaceIdx]; float3 r = make_float3(pos.x-0.5, pos.y-0.5, pos.z-0.5); float phi = atan2(r.y, r.x); if (phi < 0) phi += 2*M_PI; float theta = acos(r.z/sqrt(dot(r,r))); unsigned int iphi = (1<<15)*phi/(2*M_PI); unsigned int itheta = (1<<15)*theta/(M_PI); mini[tid].y = __int_as_float((iphi<<15)|itheta); // Computation float3 p = make_float3( (blockIdx.y + 0.5)/(float)w, (ycoord + 0.5)/(float)w, (zcoord + 0.5)/(float)w ); float3 L = p - pos; float LdotL = dot(L,L); float ndotLn = dot(norm, L)/sqrt(LdotL); char occl = 1; float v = intensity*occl*ndotLn>0?intensity*LdotL/ndotLn:MAX_FLOAT; mini[tid].x = v>0.f?v:MAX_FLOAT; } __syncthreads(); // Reduction if (blockSize >= 512) { if (tid < 256) { mini[tid] = cmpVI(mini[tid+256], mini[tid]); } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { mini[tid] = cmpVI(mini[tid+128], mini[tid]); } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { mini[tid] = cmpVI(mini[tid+64], mini[tid]); } __syncthreads(); } if (blockSize >= 64) { if (tid < 32) { mini[tid] = cmpVI(mini[tid+32], mini[tid]); } __syncthreads(); } if (blockSize >= 32) { if (tid < 16) { mini[tid] = cmpVI(mini[tid+16], mini[tid]); } __syncthreads(); } if (blockSize >= 16) { if (tid < 8) { mini[tid] = cmpVI(mini[tid+8], mini[tid]); } __syncthreads(); } if (blockSize >= 8) { if (tid < 4) { mini[tid] = cmpVI(mini[tid+4], mini[tid]); } __syncthreads(); } if (blockSize >= 4) { if (tid < 2) { mini[tid] = cmpVI(mini[tid+2], mini[tid]); } __syncthreads(); } if (blockSize >= 2) { if (tid < 1) { mini[tid] = cmpVI(mini[tid+1], mini[tid]); } __syncthreads(); } // Final data copy if (tid == 0) { atomicMin2(field+zcoord*w*w+ycoord*w+blockIdx.y, mini[0]); } } void Cudamap_init(Cudamap* cudamap, const float* surfel_pos, const float* surfel_normal) { cudaSetDevice(0); cudaMalloc((void**) &(cudamap->d_intensities), sizeof(float)*cudamap->n); cudaMalloc((void**) &(cudamap->d_surfel_pos), sizeof(float3)*cudamap->n); cudaMalloc((void**) &(cudamap->d_surfel_normal), sizeof(float3)*cudamap->n); cudaMalloc((void**) &(cudamap->d_field), sizeof(float2)*cudamap->w*cudamap->w*cudamap->w); cudaMemcpy(cudamap->d_surfel_pos, surfel_pos, sizeof(float3)*cudamap->n, cudaMemcpyHostToDevice); cudaMemcpy(cudamap->d_surfel_normal, surfel_normal, sizeof(float3)*cudamap->n, cudaMemcpyHostToDevice); cudaMemset((void*) cudamap->d_intensities, 0, sizeof(float)*cudamap->n); } void Cudamap_free(Cudamap* cudamap) { cudaFree(cudamap->d_surfel_pos); cudaFree(cudamap->d_surfel_normal); cudaFree(cudamap->d_intensities); cudaFree(cudamap->d_field); } void Cudamap_setIntensities(Cudamap* cudamap, float* intensities) { if (intensities) { cudaMemcpy(cudamap->d_intensities, intensities, sizeof(float)*cudamap->n, cudaMemcpyHostToDevice); } else { cudaMemset((void*) cudamap->d_intensities, 0, sizeof(float)*cudamap->n); } } void Cudamap_addLight(Cudamap* cudamap, float intensity, float x, float y, float z) { cuAddlight<<< (cudamap->n+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE >>>( cudamap->d_intensities, cudamap->d_surfel_pos, cudamap->d_surfel_normal, intensity, x, y, z, cudamap->n); } void Cudamap_compute(Cudamap* cudamap, float* field) { int n = cudamap->n; int w = cudamap->w; for (int i = 0; i < w*w*w; i++) { field[2*i] = MAX_FLOAT; field[2*i+1] = 0; } cudaMemcpy(cudamap->d_field, field, sizeof(float2)*w*w*w, cudaMemcpyHostToDevice); dim3 threads(BLOCK_SIZE, 1, 1); dim3 blocks((n+BLOCK_SIZE-1)/BLOCK_SIZE, w, 1); for (int i = 0; i < w; i++) { for (int j = 0; j < w; j++) { cuCompute<BLOCK_SIZE><<< blocks, threads >>>( cudamap->d_intensities, cudamap->d_surfel_pos, cudamap->d_surfel_normal, n, cudamap->d_field, w, i, j); } printf("Done %d/%d\n", i, w); cudaDeviceSynchronize(); } cudaMemcpy(field, cudamap->d_field, sizeof(float2)*w*w*w, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); }
43578d1e194f696db5c05c61f8a0fa1c2b012d49.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*$Id: MarsScan.cu 720 2009-11-10 10:13:52Z wenbinor $*/ /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include "MarsInc.h" // Define this to more rigorously avoid bank conflicts, // even at the lower (root) levels of the tree // Note that due to the higher addressing overhead, performance // is lower with ZERO_BANK_CONFLICTS enabled. It is provided // as an example. //#define ZERO_BANK_CONFLICTS // 16 banks on G80 #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS) #endif /////////////////////////////////////////////////////////////////////////////// // Work-efficient compute implementation of scan, one thread per 2 elements // Work-efficient: O(log(n)) steps, and O(n) adds. // Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging // Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements. // // In addition, If ZERO_BANK_CONFLICTS is defined, uses // n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS) // shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using // single-element offsets every NUM_BANKS elements, plus additional single-element offsets // after every NUM_BANKS^2 elements. // // Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums // and Their Applications", or Prins and Chatterjee PRAM course notes: // http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf // // This work-efficient version is based on the algorithm presented in Guy Blelloch's // excellent paper "Prefix sums and their applications". // http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html // // Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined) // Con: More instructions to compute bank-conflict-free shared memory addressing, // and slightly more shared memory storage used. // template <bool isNP2> __device__ void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) { int thid = threadIdx.x; mem_ai = baseIndex + threadIdx.x; mem_bi = mem_ai + blockDim.x; ai = thid; bi = thid + blockDim.x; // compute spacing to avoid bank conflicts bankOffsetA = CONFLICT_FREE_OFFSET(ai); bankOffsetB = CONFLICT_FREE_OFFSET(bi); // Cache the computational window in shared memory // pad values beyond n with zeros s_data[ai + bankOffsetA] = g_idata[mem_ai]; if (isNP2) // compile-time decision { s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0; } else { s_data[bi + bankOffsetB] = g_idata[mem_bi]; } } template <bool isNP2> __device__ void storeSharedChunkToMem(int* g_odata, const int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) { __syncthreads(); // write results to global memory g_odata[mem_ai] = s_data[ai + bankOffsetA]; if (isNP2) // compile-time decision { if (bi < n) g_odata[mem_bi] = s_data[bi + bankOffsetB]; } else { g_odata[mem_bi] = s_data[bi + bankOffsetB]; } } template <bool storeSum> __device__ void clearLastElement(int* s_data, int *g_blockSums, int blockIndex) { if (threadIdx.x == 0) { int index = (blockDim.x << 1) - 1; index += CONFLICT_FREE_OFFSET(index); if (storeSum) // compile-time decision { // write this block's total sum to the corresponding index in the blockSums array g_blockSums[blockIndex] = s_data[index]; } // zero the last element in the scan so it will propagate back to the front s_data[index] = 0; } } __device__ unsigned int buildSum(int *s_data) { unsigned int thid = threadIdx.x; unsigned int stride = 1; // build the sum in place up the tree for (int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } return stride; } __device__ void scanRootToLeaves(int *s_data, unsigned int stride) { unsigned int thid = threadIdx.x; // traverse down the tree building the scan in place for (int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); int t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } } template <bool storeSum> __device__ void prescanBlock(int *data, int blockIndex, int *blockSums) { int stride = buildSum(data); // build the sum in place up the tree clearLastElement<storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex); scanRootToLeaves(data, stride); // traverse down tree to build the scan } template <bool storeSum, bool isNP2> __global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) { int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB; extern __shared__ int s_data[]; // load data into shared memory loadSharedChunkFromMem<isNP2>(s_data, g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); // scan the data in each block prescanBlock<storeSum>(s_data, blockIndex, g_blockSums); // write results to device memory storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); } __global__ void uniformAdd(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex) { __shared__ int uni; if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset]; unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x; __syncthreads(); // note two adds per thread g_data[address] += uni; g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni; } inline bool isPowerOfTwo(int n) { return ((n&(n-1))==0) ; } inline int floorPow2(int n) { #ifdef WIN32 // method 2 return 1 << (int)logb((float)n); #else // method 1 // int nf = (int)n; // return 1 << (((*(int*)&nf) >> 23) - 127); int exp; frexp((double)n, &exp); return 1 << (exp - 1); #endif } #define BLOCK_SIZE 256 int** g_scanBlockSums; unsigned int g_numEltsAllocated = 0; unsigned int g_numLevelsAllocated = 0; void preallocBlockSums(unsigned int maxNumElements) { // assert(g_numEltsAllocated == 0); // shouldn't be called g_numEltsAllocated = maxNumElements; unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numElts = maxNumElements; int level = 0; do { unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize))); if (numBlocks > 1) { level++; } numElts = numBlocks; } while (numElts > 1); g_scanBlockSums = (int**) malloc(level * sizeof(int*)); g_numLevelsAllocated = level; numElts = maxNumElements; level = 0; do { unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize))); if (numBlocks > 1) { checkCudaErrors(hipMalloc((void**) &g_scanBlockSums[level++], numBlocks * sizeof(int))); } numElts = numBlocks; } while (numElts > 1); } void deallocBlockSums() { for (int i = 0; i < g_numLevelsAllocated; i++) { hipFree(g_scanBlockSums[i]); } free((void**)g_scanBlockSums); g_scanBlockSums = 0; g_numEltsAllocated = 0; g_numLevelsAllocated = 0; } void saven_initialPrefixSum(unsigned int maxNumElements) { if(g_numEltsAllocated == 0) preallocBlockSums(maxNumElements); else if(g_numEltsAllocated>maxNumElements) { deallocBlockSums(); preallocBlockSums(maxNumElements); } } void prescanArrayRecursive(int *outArray, const int *inArray, int numElements, int level) { unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numBlocks = max(1, (int)ceil((int)numElements / (2.f * blockSize))); unsigned int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = floorPow2(numElements); unsigned int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2); unsigned int np2LastBlock = 0; unsigned int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts unsigned int extraSpace = numEltsPerBlock / NUM_BANKS; unsigned int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); #ifdef DEBUG if (numBlocks > 1) { assert(g_numEltsAllocated >= numElements); } #endif // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); // make sure there are no CUDA errors before we start // execute the scan if (numBlocks > 1) { hipLaunchKernelGGL(( prescan<true, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { hipLaunchKernelGGL(( prescan<true, true>), dim3(1), dim3(numThreadsLastBlock), sharedMemLastBlock , 0, outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); hipLaunchKernelGGL(( uniformAdd), dim3(grid), dim3(threads) , 0, 0, outArray, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { hipLaunchKernelGGL(( uniformAdd), dim3(1), dim3(numThreadsLastBlock) , 0, 0, outArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { hipLaunchKernelGGL(( prescan<false, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, 0, numThreads * 2, 0, 0); } else { hipLaunchKernelGGL(( prescan<false, true>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, 0, numElements, 0, 0); } } void prescanArray(int *outArray, int *inArray, int numElements) { prescanArrayRecursive(outArray, inArray, numElements, 0); } int prefexSum( int* d_inArr, int* d_outArr, int numRecords ) { preallocBlockSums(numRecords); prescanArray( d_outArr, d_inArr, numRecords ); deallocBlockSums(); int* h_outLast = ( int* )malloc( sizeof( int ) ); checkCudaErrors( hipMemcpy( h_outLast, d_outArr+numRecords-1, sizeof(int), hipMemcpyDeviceToHost) ); int* h_inLast = ( int* )malloc( sizeof( int ) ); checkCudaErrors( hipMemcpy( h_inLast, d_inArr+numRecords-1, sizeof(int), hipMemcpyDeviceToHost) ); unsigned int sum = *h_outLast + *h_inLast; free( h_outLast ); free( h_inLast ); return sum; } #endif // _PRESCAN_CU_
43578d1e194f696db5c05c61f8a0fa1c2b012d49.cu
/*$Id: MarsScan.cu 720 2009-11-10 10:13:52Z wenbinor $*/ /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include "MarsInc.h" // Define this to more rigorously avoid bank conflicts, // even at the lower (root) levels of the tree // Note that due to the higher addressing overhead, performance // is lower with ZERO_BANK_CONFLICTS enabled. It is provided // as an example. //#define ZERO_BANK_CONFLICTS // 16 banks on G80 #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS) #endif /////////////////////////////////////////////////////////////////////////////// // Work-efficient compute implementation of scan, one thread per 2 elements // Work-efficient: O(log(n)) steps, and O(n) adds. // Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging // Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements. // // In addition, If ZERO_BANK_CONFLICTS is defined, uses // n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS) // shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using // single-element offsets every NUM_BANKS elements, plus additional single-element offsets // after every NUM_BANKS^2 elements. // // Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums // and Their Applications", or Prins and Chatterjee PRAM course notes: // http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf // // This work-efficient version is based on the algorithm presented in Guy Blelloch's // excellent paper "Prefix sums and their applications". // http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html // // Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined) // Con: More instructions to compute bank-conflict-free shared memory addressing, // and slightly more shared memory storage used. // template <bool isNP2> __device__ void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) { int thid = threadIdx.x; mem_ai = baseIndex + threadIdx.x; mem_bi = mem_ai + blockDim.x; ai = thid; bi = thid + blockDim.x; // compute spacing to avoid bank conflicts bankOffsetA = CONFLICT_FREE_OFFSET(ai); bankOffsetB = CONFLICT_FREE_OFFSET(bi); // Cache the computational window in shared memory // pad values beyond n with zeros s_data[ai + bankOffsetA] = g_idata[mem_ai]; if (isNP2) // compile-time decision { s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0; } else { s_data[bi + bankOffsetB] = g_idata[mem_bi]; } } template <bool isNP2> __device__ void storeSharedChunkToMem(int* g_odata, const int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) { __syncthreads(); // write results to global memory g_odata[mem_ai] = s_data[ai + bankOffsetA]; if (isNP2) // compile-time decision { if (bi < n) g_odata[mem_bi] = s_data[bi + bankOffsetB]; } else { g_odata[mem_bi] = s_data[bi + bankOffsetB]; } } template <bool storeSum> __device__ void clearLastElement(int* s_data, int *g_blockSums, int blockIndex) { if (threadIdx.x == 0) { int index = (blockDim.x << 1) - 1; index += CONFLICT_FREE_OFFSET(index); if (storeSum) // compile-time decision { // write this block's total sum to the corresponding index in the blockSums array g_blockSums[blockIndex] = s_data[index]; } // zero the last element in the scan so it will propagate back to the front s_data[index] = 0; } } __device__ unsigned int buildSum(int *s_data) { unsigned int thid = threadIdx.x; unsigned int stride = 1; // build the sum in place up the tree for (int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } return stride; } __device__ void scanRootToLeaves(int *s_data, unsigned int stride) { unsigned int thid = threadIdx.x; // traverse down the tree building the scan in place for (int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); int t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } } template <bool storeSum> __device__ void prescanBlock(int *data, int blockIndex, int *blockSums) { int stride = buildSum(data); // build the sum in place up the tree clearLastElement<storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex); scanRootToLeaves(data, stride); // traverse down tree to build the scan } template <bool storeSum, bool isNP2> __global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) { int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB; extern __shared__ int s_data[]; // load data into shared memory loadSharedChunkFromMem<isNP2>(s_data, g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); // scan the data in each block prescanBlock<storeSum>(s_data, blockIndex, g_blockSums); // write results to device memory storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); } __global__ void uniformAdd(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex) { __shared__ int uni; if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset]; unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x; __syncthreads(); // note two adds per thread g_data[address] += uni; g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni; } inline bool isPowerOfTwo(int n) { return ((n&(n-1))==0) ; } inline int floorPow2(int n) { #ifdef WIN32 // method 2 return 1 << (int)logb((float)n); #else // method 1 // int nf = (int)n; // return 1 << (((*(int*)&nf) >> 23) - 127); int exp; frexp((double)n, &exp); return 1 << (exp - 1); #endif } #define BLOCK_SIZE 256 int** g_scanBlockSums; unsigned int g_numEltsAllocated = 0; unsigned int g_numLevelsAllocated = 0; void preallocBlockSums(unsigned int maxNumElements) { // assert(g_numEltsAllocated == 0); // shouldn't be called g_numEltsAllocated = maxNumElements; unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numElts = maxNumElements; int level = 0; do { unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize))); if (numBlocks > 1) { level++; } numElts = numBlocks; } while (numElts > 1); g_scanBlockSums = (int**) malloc(level * sizeof(int*)); g_numLevelsAllocated = level; numElts = maxNumElements; level = 0; do { unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize))); if (numBlocks > 1) { checkCudaErrors(cudaMalloc((void**) &g_scanBlockSums[level++], numBlocks * sizeof(int))); } numElts = numBlocks; } while (numElts > 1); } void deallocBlockSums() { for (int i = 0; i < g_numLevelsAllocated; i++) { cudaFree(g_scanBlockSums[i]); } free((void**)g_scanBlockSums); g_scanBlockSums = 0; g_numEltsAllocated = 0; g_numLevelsAllocated = 0; } void saven_initialPrefixSum(unsigned int maxNumElements) { if(g_numEltsAllocated == 0) preallocBlockSums(maxNumElements); else if(g_numEltsAllocated>maxNumElements) { deallocBlockSums(); preallocBlockSums(maxNumElements); } } void prescanArrayRecursive(int *outArray, const int *inArray, int numElements, int level) { unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numBlocks = max(1, (int)ceil((int)numElements / (2.f * blockSize))); unsigned int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = floorPow2(numElements); unsigned int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2); unsigned int np2LastBlock = 0; unsigned int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts unsigned int extraSpace = numEltsPerBlock / NUM_BANKS; unsigned int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); #ifdef DEBUG if (numBlocks > 1) { assert(g_numEltsAllocated >= numElements); } #endif // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); // make sure there are no CUDA errors before we start // execute the scan if (numBlocks > 1) { prescan<true, false><<< grid, threads, sharedMemSize >>>(outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { prescan<true, true><<< 1, numThreadsLastBlock, sharedMemLastBlock >>> (outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); uniformAdd<<< grid, threads >>>(outArray, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { uniformAdd<<< 1, numThreadsLastBlock >>>(outArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { prescan<false, false><<< grid, threads, sharedMemSize >>>(outArray, inArray, 0, numThreads * 2, 0, 0); } else { prescan<false, true><<< grid, threads, sharedMemSize >>>(outArray, inArray, 0, numElements, 0, 0); } } void prescanArray(int *outArray, int *inArray, int numElements) { prescanArrayRecursive(outArray, inArray, numElements, 0); } int prefexSum( int* d_inArr, int* d_outArr, int numRecords ) { preallocBlockSums(numRecords); prescanArray( d_outArr, d_inArr, numRecords ); deallocBlockSums(); int* h_outLast = ( int* )malloc( sizeof( int ) ); checkCudaErrors( cudaMemcpy( h_outLast, d_outArr+numRecords-1, sizeof(int), cudaMemcpyDeviceToHost) ); int* h_inLast = ( int* )malloc( sizeof( int ) ); checkCudaErrors( cudaMemcpy( h_inLast, d_inArr+numRecords-1, sizeof(int), cudaMemcpyDeviceToHost) ); unsigned int sum = *h_outLast + *h_inLast; free( h_outLast ); free( h_inLast ); return sum; } #endif // _PRESCAN_CU_
a23c105ea5e1193eb3600fe6ab31a087d5147d3d.hip
// !!! This is a file automatically generated by hipify!!! #include "../THCTensorMathCompareT.cuh" #include "THHTensor.hpp" #include "THHStream.h" #include "../generic/THCTensorMathCompareT.cu" #include "../THCGenerateLongType.h"
a23c105ea5e1193eb3600fe6ab31a087d5147d3d.cu
#include "../THCTensorMathCompareT.cuh" #include "THCTensor.hpp" #include "THCStream.h" #include "../generic/THCTensorMathCompareT.cu" #include "../THCGenerateLongType.h"
671b58528c1cae8fbfde4b8663f95534a8e8d515.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #define BLOCK 10 using namespace std; __global__ void matrizEscalar(float* A, float B, float* C, int fil,int col,int d3){ int i = blockIdx.y*blockDim.y + threadIdx.y; int j = blockIdx.x*blockDim.x + threadIdx.x; int k = blockIdx.z*blockDim.z + threadIdx.z; if ((i < fil) && (j < col) && (k < d3)) { C[k*col*fil + i*col + j] = B * A[k*col*fil + i*col + j]; } } void matrizescalar(float* A, float B, float* C, int fil,int col,int d3) { int size = fil * col * d3 * sizeof(float); float *d_A, *d_C; hipMalloc((void **)&d_A, size); hipMemcpy(d_A, A, size, hipMemcpyHostToDevice); hipMalloc((void **)&d_C, size); dim3 DimGrid((col-1)/BLOCK+1, (fil-1)/BLOCK+1,(d3-1)/BLOCK+1); dim3 DimBlock(BLOCK, BLOCK, BLOCK); hipLaunchKernelGGL(( matrizEscalar) , dim3(dime1), dim3(DimBlock) , 0, 0, d_A, B, d_C, fil, col,d3); hipMemcpy(C, d_C, size, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_C); } int main() { int fila,columna,D3; float B; float *A;float *C; fila = 10; columna = 10; D3 = 10; B=2; A = (float*)malloc(fila*columna*D3*sizeof(float)); C = (float*)malloc(fila*columna*D3*sizeof(float)); for (int i = 0; i < fila*columna*D3; i++) A[i] = 2; matrizescalar(A, B, C, fila,columna,D3); }
671b58528c1cae8fbfde4b8663f95534a8e8d515.cu
#include <iostream> #include <stdio.h> #include <cuda_runtime.h> #include <cuda.h> #include <device_launch_parameters.h> #define BLOCK 10 using namespace std; __global__ void matrizEscalar(float* A, float B, float* C, int fil,int col,int d3){ int i = blockIdx.y*blockDim.y + threadIdx.y; int j = blockIdx.x*blockDim.x + threadIdx.x; int k = blockIdx.z*blockDim.z + threadIdx.z; if ((i < fil) && (j < col) && (k < d3)) { C[k*col*fil + i*col + j] = B * A[k*col*fil + i*col + j]; } } void matrizescalar(float* A, float B, float* C, int fil,int col,int d3) { int size = fil * col * d3 * sizeof(float); float *d_A, *d_C; cudaMalloc((void **)&d_A, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMalloc((void **)&d_C, size); dim3 DimGrid((col-1)/BLOCK+1, (fil-1)/BLOCK+1,(d3-1)/BLOCK+1); dim3 DimBlock(BLOCK, BLOCK, BLOCK); matrizEscalar <<< dime1, DimBlock >>> (d_A, B, d_C, fil, col,d3); cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_C); } int main() { int fila,columna,D3; float B; float *A;float *C; fila = 10; columna = 10; D3 = 10; B=2; A = (float*)malloc(fila*columna*D3*sizeof(float)); C = (float*)malloc(fila*columna*D3*sizeof(float)); for (int i = 0; i < fila*columna*D3; i++) A[i] = 2; matrizescalar(A, B, C, fila,columna,D3); }
eaa259b113616906c823d1c971fafcf133cf12d3.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <bits/stdc++.h> #define BLOCK_SIZE 32 #define EPSILON 0.1 using namespace std; bool cmp_float(float a, float b){ if (fabs(a - b) > EPSILON) return false; else return true; } void fill_matrix_random(float *mat, int rows, int cols){ for (int i = 0; i < rows; i++){ for (int j = 0; j < cols; j++){ mat[i * cols + j] = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX/10.0)); } } } bool check_matrix(float *A, float *B, int rows, int cols){ for (int i = 0; i < rows; i++){ for (int j = 0; j < cols; j++){ //cout << A[i * cols + j] << " the fuck?? " << B[i * cols +j] << endl; if (!cmp_float(A[i * cols + j], B[i * cols +j])) return false; } } return true; } void print_matrix(float *mat, int rows, int cols){ cout << "------------" << endl; for (int i = 0; i < rows; i++){ for (int j = 0; j < cols; j++){ cout << mat[i * cols + j] << " "; } cout << endl; } cout << "------------" << endl; } void mat_mul_seq(float *m_A, float *m_B, float *m_C, int A_rows, int A_cols, int B_rows, int B_cols){ float sum; for(int i = 0; i < A_rows; i++){ for (int j = 0; j < B_cols; j++){ sum = 0.0; for (int k = 0; k < A_cols; k++){ sum += m_A[i * A_cols + k] * m_B[k * B_cols + j]; } m_C[i * B_cols + j] = sum; } } } __global__ void mat_mul_kernel(float *m_A, float *m_B, float *m_C, int A_rows, int A_cols, int B_rows, int B_cols){ float sum = 0; int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; if(row < A_rows && col < B_cols){ for(int i = 0; i < A_cols; i ++){ sum += m_A[row * A_cols + i] * m_B[i * B_cols + col]; } m_C[row * B_cols + col] = sum; } } __global__ void mat_mul_kernel_tiled(float *m_A, float *m_B, float *m_C, int A_rows, int A_cols, int B_rows, int B_cols){ __shared__ float s_A[BLOCK_SIZE][BLOCK_SIZE], s_B[BLOCK_SIZE][BLOCK_SIZE]; int blockRow = blockIdx.y; int blockCol = blockIdx.x; int threadRow = threadIdx.y; int threadCol = threadIdx.x; int row = blockRow * BLOCK_SIZE + threadRow; int col = blockCol * BLOCK_SIZE + threadCol; float sum = 0; for (int sm = 0; sm < ceil (A_cols / float (BLOCK_SIZE)); sm++){ if (row < A_rows && (sm * BLOCK_SIZE + threadCol) < A_cols){ s_A[threadRow][threadCol] = m_A[(row) * A_cols + (sm * BLOCK_SIZE + threadCol)]; } else{ s_A[threadRow][threadCol] = 0.0; } if (col < B_cols && (threadRow + sm * BLOCK_SIZE) < B_rows){ s_B[threadRow][threadCol] = m_B[(threadRow + sm * BLOCK_SIZE) * B_cols + (col)]; } else{ s_B[threadRow][threadCol] = 0.0; } __syncthreads(); for (int i = 0; i < BLOCK_SIZE; i++){ sum += s_A[threadRow][i] * s_B[i][threadCol]; } __syncthreads(); if (row < A_rows && col < B_cols) m_C[row * B_cols + col] = sum; } } void mat_mul_con(float *m_A, float *m_B, float *m_C, int A_rows, int A_cols, int B_rows, int B_cols){ int A_size = A_rows * A_cols * sizeof(float); int B_size = B_rows * B_cols * sizeof(float); int C_size = A_rows * B_cols * sizeof(float); float *d_A, *d_B, *d_C; //1. Allocate memory for d_A, etc. on the device (hipMalloc) hipMalloc(&d_A, A_size); hipMalloc(&d_B, B_size); hipMalloc(&d_C, C_size); //2. Copy Data from host to d_A, etc. (hipMemcpy) hipMemcpy(d_A, m_A, A_size, hipMemcpyHostToDevice); hipMemcpy(d_B, m_B, B_size, hipMemcpyHostToDevice); //3. Kernel Launch Code dim3 dimGrid(ceil(max(A_cols, B_cols) / float (BLOCK_SIZE)), ceil(max(A_rows, B_rows) / float (BLOCK_SIZE)), 1); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); hipLaunchKernelGGL(( mat_mul_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, A_rows, A_cols, B_rows, B_cols); hipDeviceSynchronize(); //4. Copy d_C to C from device, free device memory (cusdaFree), sync if neccessary hipMemcpy (m_C, d_C, C_size, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); } void mat_mul_con_tiled(float *m_A, float *m_B, float *m_C, int A_rows, int A_cols, int B_rows, int B_cols){ int A_size = A_rows * A_cols * sizeof(float); int B_size = B_rows * B_cols * sizeof(float); int C_size = A_rows * B_cols * sizeof(float); float *d_A, *d_B, *d_C; //1. Allocate memory for d_A, etc. on the device (hipMalloc) hipMalloc(&d_A, A_size); hipMalloc(&d_B, B_size); hipMalloc(&d_C, C_size); //2. Copy Data from host to d_A, etc. (hipMemcpy) hipMemcpy(d_A, m_A, A_size, hipMemcpyHostToDevice); hipMemcpy(d_B, m_B, B_size, hipMemcpyHostToDevice); //3. Kernel Launch Code dim3 dimGrid(ceil(max(A_cols, B_cols) / float (BLOCK_SIZE)), ceil(max(A_rows, B_rows) / float (BLOCK_SIZE)), 1); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); hipLaunchKernelGGL(( mat_mul_kernel_tiled), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, A_rows, A_cols, B_rows, B_cols); hipDeviceSynchronize(); //4. Copy d_C to C from device, free device memory (cusdaFree), sync if neccessary hipMemcpy (m_C, d_C, C_size, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); } int main(int argc, char **argv){ if (argc < 5){ cout << "Usage: ./mul max_number step offset_A, offset_B" << endl; return 0; } const int max_number = atoi(argv[1]), step = atoi(argv[2]), offset_A = atoi(argv[3]), offset_B = atoi(argv[4]); srand (time(NULL)); ofstream x("x.mio"), y_seq("y_seq.mio"), y_con("y_con.mio"), y_con_tiled("y_con_tiled.mio"); clock_t begin, end; double elapsed_secs; for (int i = step; i <= max_number; i += step){ float *A, *B, *C, *D; A = (float*) malloc((i + offset_A) * i * sizeof(float)); B = (float*) malloc((i + offset_B) * i * sizeof(float)); C = (float*) malloc((i + offset_A) * (i + offset_B) * sizeof(float)); D = (float*) malloc((i + offset_A) * (i + offset_B) * sizeof(float)); x << i << endl; fill_matrix_random(A, i + offset_A, i); fill_matrix_random(B, i, i + offset_B); begin = clock(); mat_mul_seq(A, B, C, i + offset_A, i, i, i + offset_B); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; y_seq << elapsed_secs << endl; begin = clock(); mat_mul_con(A, B, D, i + offset_A, i, i, i + offset_B); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; y_con << elapsed_secs << endl; cout << "-----------" << endl << "Not Tiled: "; if (check_matrix(C, D, i + offset_A, i + offset_B)) cout << "All good" << endl; else cout << "Something Went Wrong" << endl; cout << "-----------" << endl; begin = clock(); mat_mul_con_tiled(A, B, D, i + offset_A, i, i, i + offset_B); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; y_con_tiled << elapsed_secs << endl; cout << "Tiled: "; if (check_matrix(C, D, i + offset_A, i + offset_B)) cout << "All good" << endl; else cout << "Something Went Wrong" << endl; cout << "-----------" << endl; free(A); free(B); free(C); free(D); } return 0; }
eaa259b113616906c823d1c971fafcf133cf12d3.cu
#include <cuda.h> #include <bits/stdc++.h> #define BLOCK_SIZE 32 #define EPSILON 0.1 using namespace std; bool cmp_float(float a, float b){ if (fabs(a - b) > EPSILON) return false; else return true; } void fill_matrix_random(float *mat, int rows, int cols){ for (int i = 0; i < rows; i++){ for (int j = 0; j < cols; j++){ mat[i * cols + j] = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX/10.0)); } } } bool check_matrix(float *A, float *B, int rows, int cols){ for (int i = 0; i < rows; i++){ for (int j = 0; j < cols; j++){ //cout << A[i * cols + j] << " the fuck?? " << B[i * cols +j] << endl; if (!cmp_float(A[i * cols + j], B[i * cols +j])) return false; } } return true; } void print_matrix(float *mat, int rows, int cols){ cout << "------------" << endl; for (int i = 0; i < rows; i++){ for (int j = 0; j < cols; j++){ cout << mat[i * cols + j] << " "; } cout << endl; } cout << "------------" << endl; } void mat_mul_seq(float *m_A, float *m_B, float *m_C, int A_rows, int A_cols, int B_rows, int B_cols){ float sum; for(int i = 0; i < A_rows; i++){ for (int j = 0; j < B_cols; j++){ sum = 0.0; for (int k = 0; k < A_cols; k++){ sum += m_A[i * A_cols + k] * m_B[k * B_cols + j]; } m_C[i * B_cols + j] = sum; } } } __global__ void mat_mul_kernel(float *m_A, float *m_B, float *m_C, int A_rows, int A_cols, int B_rows, int B_cols){ float sum = 0; int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; if(row < A_rows && col < B_cols){ for(int i = 0; i < A_cols; i ++){ sum += m_A[row * A_cols + i] * m_B[i * B_cols + col]; } m_C[row * B_cols + col] = sum; } } __global__ void mat_mul_kernel_tiled(float *m_A, float *m_B, float *m_C, int A_rows, int A_cols, int B_rows, int B_cols){ __shared__ float s_A[BLOCK_SIZE][BLOCK_SIZE], s_B[BLOCK_SIZE][BLOCK_SIZE]; int blockRow = blockIdx.y; int blockCol = blockIdx.x; int threadRow = threadIdx.y; int threadCol = threadIdx.x; int row = blockRow * BLOCK_SIZE + threadRow; int col = blockCol * BLOCK_SIZE + threadCol; float sum = 0; for (int sm = 0; sm < ceil (A_cols / float (BLOCK_SIZE)); sm++){ if (row < A_rows && (sm * BLOCK_SIZE + threadCol) < A_cols){ s_A[threadRow][threadCol] = m_A[(row) * A_cols + (sm * BLOCK_SIZE + threadCol)]; } else{ s_A[threadRow][threadCol] = 0.0; } if (col < B_cols && (threadRow + sm * BLOCK_SIZE) < B_rows){ s_B[threadRow][threadCol] = m_B[(threadRow + sm * BLOCK_SIZE) * B_cols + (col)]; } else{ s_B[threadRow][threadCol] = 0.0; } __syncthreads(); for (int i = 0; i < BLOCK_SIZE; i++){ sum += s_A[threadRow][i] * s_B[i][threadCol]; } __syncthreads(); if (row < A_rows && col < B_cols) m_C[row * B_cols + col] = sum; } } void mat_mul_con(float *m_A, float *m_B, float *m_C, int A_rows, int A_cols, int B_rows, int B_cols){ int A_size = A_rows * A_cols * sizeof(float); int B_size = B_rows * B_cols * sizeof(float); int C_size = A_rows * B_cols * sizeof(float); float *d_A, *d_B, *d_C; //1. Allocate memory for d_A, etc. on the device (cudaMalloc) cudaMalloc(&d_A, A_size); cudaMalloc(&d_B, B_size); cudaMalloc(&d_C, C_size); //2. Copy Data from host to d_A, etc. (cudaMemcpy) cudaMemcpy(d_A, m_A, A_size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, m_B, B_size, cudaMemcpyHostToDevice); //3. Kernel Launch Code dim3 dimGrid(ceil(max(A_cols, B_cols) / float (BLOCK_SIZE)), ceil(max(A_rows, B_rows) / float (BLOCK_SIZE)), 1); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); mat_mul_kernel<<<dimGrid, dimBlock>>> (d_A, d_B, d_C, A_rows, A_cols, B_rows, B_cols); cudaDeviceSynchronize(); //4. Copy d_C to C from device, free device memory (cusdaFree), sync if neccessary cudaMemcpy (m_C, d_C, C_size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } void mat_mul_con_tiled(float *m_A, float *m_B, float *m_C, int A_rows, int A_cols, int B_rows, int B_cols){ int A_size = A_rows * A_cols * sizeof(float); int B_size = B_rows * B_cols * sizeof(float); int C_size = A_rows * B_cols * sizeof(float); float *d_A, *d_B, *d_C; //1. Allocate memory for d_A, etc. on the device (cudaMalloc) cudaMalloc(&d_A, A_size); cudaMalloc(&d_B, B_size); cudaMalloc(&d_C, C_size); //2. Copy Data from host to d_A, etc. (cudaMemcpy) cudaMemcpy(d_A, m_A, A_size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, m_B, B_size, cudaMemcpyHostToDevice); //3. Kernel Launch Code dim3 dimGrid(ceil(max(A_cols, B_cols) / float (BLOCK_SIZE)), ceil(max(A_rows, B_rows) / float (BLOCK_SIZE)), 1); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); mat_mul_kernel_tiled<<<dimGrid, dimBlock>>> (d_A, d_B, d_C, A_rows, A_cols, B_rows, B_cols); cudaDeviceSynchronize(); //4. Copy d_C to C from device, free device memory (cusdaFree), sync if neccessary cudaMemcpy (m_C, d_C, C_size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } int main(int argc, char **argv){ if (argc < 5){ cout << "Usage: ./mul max_number step offset_A, offset_B" << endl; return 0; } const int max_number = atoi(argv[1]), step = atoi(argv[2]), offset_A = atoi(argv[3]), offset_B = atoi(argv[4]); srand (time(NULL)); ofstream x("x.mio"), y_seq("y_seq.mio"), y_con("y_con.mio"), y_con_tiled("y_con_tiled.mio"); clock_t begin, end; double elapsed_secs; for (int i = step; i <= max_number; i += step){ float *A, *B, *C, *D; A = (float*) malloc((i + offset_A) * i * sizeof(float)); B = (float*) malloc((i + offset_B) * i * sizeof(float)); C = (float*) malloc((i + offset_A) * (i + offset_B) * sizeof(float)); D = (float*) malloc((i + offset_A) * (i + offset_B) * sizeof(float)); x << i << endl; fill_matrix_random(A, i + offset_A, i); fill_matrix_random(B, i, i + offset_B); begin = clock(); mat_mul_seq(A, B, C, i + offset_A, i, i, i + offset_B); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; y_seq << elapsed_secs << endl; begin = clock(); mat_mul_con(A, B, D, i + offset_A, i, i, i + offset_B); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; y_con << elapsed_secs << endl; cout << "-----------" << endl << "Not Tiled: "; if (check_matrix(C, D, i + offset_A, i + offset_B)) cout << "All good" << endl; else cout << "Something Went Wrong" << endl; cout << "-----------" << endl; begin = clock(); mat_mul_con_tiled(A, B, D, i + offset_A, i, i, i + offset_B); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; y_con_tiled << elapsed_secs << endl; cout << "Tiled: "; if (check_matrix(C, D, i + offset_A, i + offset_B)) cout << "All good" << endl; else cout << "Something Went Wrong" << endl; cout << "-----------" << endl; free(A); free(B); free(C); free(D); } return 0; }
e15ac436b736672904806c0520e239b9357a7fd5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sequence.cuh" __global__ void make_tensors_kernel(Sequence** seq){ const unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; //Ensures that we don't over-index the G_tensor if more threads used than time-steps required. if (tid < (*seq)->getSteps()){ real element[9] = {0}; real RF_element[3] = {0}; (*seq)->get_G_tensor(tid, element, RF_element); for (int i = 0; i < 9; i++){ (*seq)->G_tensor_t[9*tid+i] = element[i]; } for (int i = 0; i < 3; i++){ (*seq)->RF_tensor_t[3*tid+i] = RF_element[i]; } } } __global__ void assign_tensors_ptr(Sequence** seq, real* G_tensor_t_ptr, real* RF_tensor_t_ptr){ (*seq)->G_tensor_t = G_tensor_t_ptr; (*seq)->RF_tensor_t = RF_tensor_t_ptr; }
e15ac436b736672904806c0520e239b9357a7fd5.cu
#include "sequence.cuh" __global__ void make_tensors_kernel(Sequence** seq){ const unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; //Ensures that we don't over-index the G_tensor if more threads used than time-steps required. if (tid < (*seq)->getSteps()){ real element[9] = {0}; real RF_element[3] = {0}; (*seq)->get_G_tensor(tid, element, RF_element); for (int i = 0; i < 9; i++){ (*seq)->G_tensor_t[9*tid+i] = element[i]; } for (int i = 0; i < 3; i++){ (*seq)->RF_tensor_t[3*tid+i] = RF_element[i]; } } } __global__ void assign_tensors_ptr(Sequence** seq, real* G_tensor_t_ptr, real* RF_tensor_t_ptr){ (*seq)->G_tensor_t = G_tensor_t_ptr; (*seq)->RF_tensor_t = RF_tensor_t_ptr; }
3a62c23a3240568be8363967220682bafd1c090a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "symbols/NaN.cuh" __device__ float backwardSigmoid (float forward, float chain) { return forward * (1.0f - forward) * chain; } __global__ void backwardSigmoidKernel ( int batchSize, int numberRows, int numberEntriesPerInstance, int numberIterations, float *forward, float *chain, float *destination) { int indexInstance = blockIdx.x; int indexColumn = blockIdx.y; int startInstanceWithinBatch = indexInstance * numberEntriesPerInstance; int startColumnWithinInstance = indexColumn * numberRows; int startRowWithinColumn = threadIdx.x * numberIterations; int firstEntryWithinBatch = startInstanceWithinBatch + startColumnWithinInstance + startRowWithinColumn; int startNextColumn = startInstanceWithinBatch + startColumnWithinInstance + numberRows; if(firstEntryWithinBatch < startNextColumn) { int lastEntryWithinBatch = min(firstEntryWithinBatch + numberIterations, startNextColumn); if(indexInstance < batchSize) { for(int indexEntry = firstEntryWithinBatch; indexEntry < lastEntryWithinBatch; indexEntry++) { destination[indexEntry] = backwardSigmoid(forward[indexEntry], chain[indexEntry]); } } else { setToNan(destination, firstEntryWithinBatch, lastEntryWithinBatch); } } }
3a62c23a3240568be8363967220682bafd1c090a.cu
#include "symbols/NaN.cuh" __device__ float backwardSigmoid (float forward, float chain) { return forward * (1.0f - forward) * chain; } __global__ void backwardSigmoidKernel ( int batchSize, int numberRows, int numberEntriesPerInstance, int numberIterations, float *forward, float *chain, float *destination) { int indexInstance = blockIdx.x; int indexColumn = blockIdx.y; int startInstanceWithinBatch = indexInstance * numberEntriesPerInstance; int startColumnWithinInstance = indexColumn * numberRows; int startRowWithinColumn = threadIdx.x * numberIterations; int firstEntryWithinBatch = startInstanceWithinBatch + startColumnWithinInstance + startRowWithinColumn; int startNextColumn = startInstanceWithinBatch + startColumnWithinInstance + numberRows; if(firstEntryWithinBatch < startNextColumn) { int lastEntryWithinBatch = min(firstEntryWithinBatch + numberIterations, startNextColumn); if(indexInstance < batchSize) { for(int indexEntry = firstEntryWithinBatch; indexEntry < lastEntryWithinBatch; indexEntry++) { destination[indexEntry] = backwardSigmoid(forward[indexEntry], chain[indexEntry]); } } else { setToNan(destination, firstEntryWithinBatch, lastEntryWithinBatch); } } }
9daecdaee118d87b37a1118e829d4078381348bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void UpdateVelocitiesKernel (double *VthetaInt, double *VradInt, double *invRmed, double *Rmed, double *Rsup, double *Rinf, double *invdiffRmed, double *invdiffRsup, double *Dens, double *invRinf, double *TAURR, double *TAURP, double *TAUPP, double DeltaT, int nrad, int nsec) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = threadIdx.y + blockDim.y*blockIdx.y; double dphi, invdphi; /* Now we can update velocities with the viscous source term of Navier-Stokes equation */ /* vtheta first */ if (i > 0 && i<nrad-1 && j<nsec){ dphi = 2.0*M_PI/(double)nsec; invdphi = 1.0/dphi; VthetaInt[i*nsec +j] += DeltaT*invRmed[i]*((Rsup[i]*TAURP[(i+1)*nsec+ j] - Rinf[i]*TAURP[i*nsec +j])*invdiffRsup[i] + \ (TAUPP[i*nsec +j] - TAUPP[i*nsec + ((j-1)+nsec)%nsec])*invdphi + 0.5*(TAURP[i*nsec + j] + TAURP[(i+1)*nsec +j]))/ \ (0.5*(Dens[i*nsec +j]+Dens[i*nsec + ((j-1)+nsec)%nsec])); } /* now vrad */ if (i > 0 && i<nrad && j<nsec){ dphi = 2.0*M_PI/(double)nsec; invdphi = 1.0/dphi; VradInt[i*nsec +j] += DeltaT*invRinf[i]*((Rmed[i]*TAURR[i*nsec +j] - Rmed[i-1]*TAURR[(i-1)*nsec + j])*invdiffRmed[i] + \ (TAURP[i*nsec + (j+1)%nsec] - TAURP[i*nsec + j])*invdphi - 0.5*(TAUPP[i*nsec +j] + TAUPP[(i-1)*nsec + j]))/ \ (0.5*(Dens[i*nsec +j] + Dens[(i-1)*nsec + j])); } }
9daecdaee118d87b37a1118e829d4078381348bf.cu
#include "includes.h" __global__ void UpdateVelocitiesKernel (double *VthetaInt, double *VradInt, double *invRmed, double *Rmed, double *Rsup, double *Rinf, double *invdiffRmed, double *invdiffRsup, double *Dens, double *invRinf, double *TAURR, double *TAURP, double *TAUPP, double DeltaT, int nrad, int nsec) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = threadIdx.y + blockDim.y*blockIdx.y; double dphi, invdphi; /* Now we can update velocities with the viscous source term of Navier-Stokes equation */ /* vtheta first */ if (i > 0 && i<nrad-1 && j<nsec){ dphi = 2.0*M_PI/(double)nsec; invdphi = 1.0/dphi; VthetaInt[i*nsec +j] += DeltaT*invRmed[i]*((Rsup[i]*TAURP[(i+1)*nsec+ j] - Rinf[i]*TAURP[i*nsec +j])*invdiffRsup[i] + \ (TAUPP[i*nsec +j] - TAUPP[i*nsec + ((j-1)+nsec)%nsec])*invdphi + 0.5*(TAURP[i*nsec + j] + TAURP[(i+1)*nsec +j]))/ \ (0.5*(Dens[i*nsec +j]+Dens[i*nsec + ((j-1)+nsec)%nsec])); } /* now vrad */ if (i > 0 && i<nrad && j<nsec){ dphi = 2.0*M_PI/(double)nsec; invdphi = 1.0/dphi; VradInt[i*nsec +j] += DeltaT*invRinf[i]*((Rmed[i]*TAURR[i*nsec +j] - Rmed[i-1]*TAURR[(i-1)*nsec + j])*invdiffRmed[i] + \ (TAURP[i*nsec + (j+1)%nsec] - TAURP[i*nsec + j])*invdphi - 0.5*(TAUPP[i*nsec +j] + TAUPP[(i-1)*nsec + j]))/ \ (0.5*(Dens[i*nsec +j] + Dens[(i-1)*nsec + j])); } }
b9fe72dc75bbb9ef156340d93a2ecdb32dc9e579.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <sys/types.h> #include <unistd.h> #include "nccl.h" #include "mpi.h" #define CUDACHECK(cmd) do { \ hipError_t e = cmd; \ if( e != hipSuccess ) { \ printf("Cuda failure %s:%d '%s'\n", \ __FILE__,__LINE__,hipGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } while(false) int main(int argc, char *argv[]) { ncclUniqueId commId; int size, rank; int ret; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); int gpu = atoi(argv[rank+1]); printf("MPI Rank %d running on GPU %d\n", rank, gpu); // We have to set our device before NCCL init CUDACHECK(hipSetDevice(gpu)); MPI_Barrier(MPI_COMM_WORLD); ncclComm_t comm; // Let's use rank 0 PID as job ID ncclGetUniqueId(&commId); MPI_Bcast(&commId, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD); ret = ncclCommInitRank(&comm, size, commId, rank); if (ret != ncclSuccess) { printf("NCCL Init failed : %d\n", ret); exit(1); } int *dptr; CUDACHECK(hipMalloc(&dptr, 1024*2*sizeof(int))); int val = rank; CUDACHECK(hipMemcpy(dptr, &val, sizeof(int), hipMemcpyHostToDevice)); ncclAllReduce((const void*)dptr, (void*)(dptr+1024), 1024, ncclInt, ncclSum, comm, hipStreamDefault); CUDACHECK(hipMemcpy(&val, (dptr+1024), sizeof(int), hipMemcpyDeviceToHost)); printf("Sum is %d\n", val); CUDACHECK(hipFree(dptr)); MPI_Finalize(); ncclCommDestroy(comm); return 0; }
b9fe72dc75bbb9ef156340d93a2ecdb32dc9e579.cu
/************************************************************************* * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <sys/types.h> #include <unistd.h> #include "nccl.h" #include "mpi.h" #define CUDACHECK(cmd) do { \ cudaError_t e = cmd; \ if( e != cudaSuccess ) { \ printf("Cuda failure %s:%d '%s'\n", \ __FILE__,__LINE__,cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } while(false) int main(int argc, char *argv[]) { ncclUniqueId commId; int size, rank; int ret; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); int gpu = atoi(argv[rank+1]); printf("MPI Rank %d running on GPU %d\n", rank, gpu); // We have to set our device before NCCL init CUDACHECK(cudaSetDevice(gpu)); MPI_Barrier(MPI_COMM_WORLD); ncclComm_t comm; // Let's use rank 0 PID as job ID ncclGetUniqueId(&commId); MPI_Bcast(&commId, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD); ret = ncclCommInitRank(&comm, size, commId, rank); if (ret != ncclSuccess) { printf("NCCL Init failed : %d\n", ret); exit(1); } int *dptr; CUDACHECK(cudaMalloc(&dptr, 1024*2*sizeof(int))); int val = rank; CUDACHECK(cudaMemcpy(dptr, &val, sizeof(int), cudaMemcpyHostToDevice)); ncclAllReduce((const void*)dptr, (void*)(dptr+1024), 1024, ncclInt, ncclSum, comm, cudaStreamDefault); CUDACHECK(cudaMemcpy(&val, (dptr+1024), sizeof(int), cudaMemcpyDeviceToHost)); printf("Sum is %d\n", val); CUDACHECK(cudaFree(dptr)); MPI_Finalize(); ncclCommDestroy(comm); return 0; }
bf5516c4741dc6456c80e5354edc4a4c8ea52366.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "final_map_index_to_prefix.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; uint8_t *hashes = NULL; hipMalloc(&hashes, XSIZE*YSIZE); uint32_t *sort_indices = NULL; hipMalloc(&sort_indices, XSIZE*YSIZE); uint32_t *off_map = NULL; hipMalloc(&off_map, XSIZE*YSIZE); uint32_t *comb_count = NULL; hipMalloc(&comb_count, XSIZE*YSIZE); uint32_t *comb_sum = NULL; hipMalloc(&comb_sum, XSIZE*YSIZE); uint32_t *comb_prefix = NULL; hipMalloc(&comb_prefix, XSIZE*YSIZE); uint32_t r = 1; uint32_t size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( final_map_index_to_prefix), dim3(gridBlock),dim3(threadBlock), 0, 0, hashes,sort_indices,off_map,comb_count,comb_sum,comb_prefix,r,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( final_map_index_to_prefix), dim3(gridBlock),dim3(threadBlock), 0, 0, hashes,sort_indices,off_map,comb_count,comb_sum,comb_prefix,r,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( final_map_index_to_prefix), dim3(gridBlock),dim3(threadBlock), 0, 0, hashes,sort_indices,off_map,comb_count,comb_sum,comb_prefix,r,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
bf5516c4741dc6456c80e5354edc4a4c8ea52366.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "final_map_index_to_prefix.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; uint8_t *hashes = NULL; cudaMalloc(&hashes, XSIZE*YSIZE); uint32_t *sort_indices = NULL; cudaMalloc(&sort_indices, XSIZE*YSIZE); uint32_t *off_map = NULL; cudaMalloc(&off_map, XSIZE*YSIZE); uint32_t *comb_count = NULL; cudaMalloc(&comb_count, XSIZE*YSIZE); uint32_t *comb_sum = NULL; cudaMalloc(&comb_sum, XSIZE*YSIZE); uint32_t *comb_prefix = NULL; cudaMalloc(&comb_prefix, XSIZE*YSIZE); uint32_t r = 1; uint32_t size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); final_map_index_to_prefix<<<gridBlock,threadBlock>>>(hashes,sort_indices,off_map,comb_count,comb_sum,comb_prefix,r,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { final_map_index_to_prefix<<<gridBlock,threadBlock>>>(hashes,sort_indices,off_map,comb_count,comb_sum,comb_prefix,r,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { final_map_index_to_prefix<<<gridBlock,threadBlock>>>(hashes,sort_indices,off_map,comb_count,comb_sum,comb_prefix,r,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2e9f6fe9e5da37b4b87f50edff085b3eedba44dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Adapted from interp.cpp from Caffe util by Pauline Luc // Originally developed by George Papandreou #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/UpSample.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_trilinear3d_out_frame( const int n, const accscalar_t rdepth, const accscalar_t rheight, const accscalar_t rwidth, const bool align_corners, const PackedTensorAccessor<scalar_t, 5> idata, PackedTensorAccessor<scalar_t, 5> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int depth1 = idata.size(2); const int height1 = idata.size(3); const int width1 = idata.size(4); const int depth2 = odata.size(2); const int height2 = odata.size(3); const int width2 = odata.size(4); if (index < n) { const int w2 = (index % (height2 * width2)) % width2; // 0:width2-1 const int h2 = (index % (height2 * width2)) / width2; // 0:height2-1 const int t2 = index / (height2 * width2); // 0:depth2-1 // special case: just copy if (depth1 == depth2 && height1 == height2 && width1 == width2) { const int t1 = t2; const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][t1][h1][w1]; odata[n][c][t2][h2][w2] = val; } } return; } // const accscalar_t t1r = area_pixel_compute_source_index<accscalar_t>( rdepth, t2, align_corners, /*cubic=*/false); const int t1 = t1r; const int t1p = (t1 < depth1 - 1) ? 1 : 0; const accscalar_t t1lambda = t1r - t1; const accscalar_t t0lambda = static_cast<accscalar_t>(1) - t1lambda; // const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>( rheight, h2, align_corners, /*cubic=*/false); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const accscalar_t h1lambda = h1r - h1; const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda; // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const accscalar_t val = t0lambda * (h0lambda * (w0lambda * idata[n][c][t1][h1][w1] + w1lambda * idata[n][c][t1][h1][w1 + w1p]) + h1lambda * (w0lambda * idata[n][c][t1][h1 + h1p][w1] + w1lambda * idata[n][c][t1][h1 + h1p][w1 + w1p])) + t1lambda * (h0lambda * (w0lambda * idata[n][c][t1 + t1p][h1][w1] + w1lambda * idata[n][c][t1 + t1p][h1][w1 + w1p]) + h1lambda * (w0lambda * idata[n][c][t1 + t1p][h1 + h1p][w1] + w1lambda * idata[n][c][t1 + t1p][h1 + h1p][w1 + w1p])); odata[n][c][t2][h2][w2] = static_cast<scalar_t>(val); } } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_trilinear3d_backward_out_frame( const int n, const accscalar_t rdepth, const accscalar_t rheight, const accscalar_t rwidth, const bool align_corners, PackedTensorAccessor<scalar_t, 5> idata, const PackedTensorAccessor<scalar_t, 5> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int depth1 = idata.size(2); const int height1 = idata.size(3); const int width1 = idata.size(4); const int depth2 = odata.size(2); const int height2 = odata.size(3); const int width2 = odata.size(4); if (index < n) { const int w2 = (index % (height2 * width2)) % width2; // 0:width2-1 const int h2 = (index % (height2 * width2)) / width2; // 0:height2-1 const int t2 = index / (height2 * width2); // 0:depth2-1 // special case: just copy if (depth1 == depth2 && height1 == height2 && width1 == width2) { const int t1 = t2; const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][t1][h1][w1]; idata[n][c][t2][h2][w2] = val; } } return; } // const accscalar_t t1r = area_pixel_compute_source_index<accscalar_t>( rdepth, t2, align_corners, /*cubic=*/false); const int t1 = t1r; const int t1p = (t1 < depth1 - 1) ? 1 : 0; const accscalar_t t1lambda = t1r - t1; const accscalar_t t0lambda = static_cast<accscalar_t>(1) - t1lambda; // const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>( rheight, h2, align_corners, /*cubic=*/false); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const accscalar_t h1lambda = h1r - h1; const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda; // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t d2val = odata[n][c][t2][h2][w2]; atomicAdd( &idata[n][c][t1][h1][w1], static_cast<scalar_t>(t0lambda * h0lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1][h1][w1 + w1p], static_cast<scalar_t>(t0lambda * h0lambda * w1lambda * d2val)); atomicAdd( &idata[n][c][t1][h1 + h1p][w1], static_cast<scalar_t>(t0lambda * h1lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1][h1 + h1p][w1 + w1p], static_cast<scalar_t>(t0lambda * h1lambda * w1lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1][w1], static_cast<scalar_t>(t1lambda * h0lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1][w1 + w1p], static_cast<scalar_t>(t1lambda * h0lambda * w1lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1 + h1p][w1], static_cast<scalar_t>(t1lambda * h1lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1 + h1p][w1 + w1p], static_cast<scalar_t>(t1lambda * h1lambda * w1lambda * d2val)); } } } } static void upsample_trilinear3d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_trilinear3d_out_cuda", {input_arg, output_arg}); AT_CHECK( output_size.size() == 3, "It is expected output_size equals to 3, but got size ", output_size.size()); int output_depth = output_size[0]; int output_height = output_size[1]; int output_width = output_size[2]; int nbatch = input.size(0); int channels = input.size(1); int input_depth = input.size(2); int input_height = input.size(3); int input_width = input.size(4); upsample_3d_shape_check( input, Tensor(), nbatch, channels, input_depth, input_height, input_width, output_depth, output_height, output_width); output.resize_({input.size(0), input.size(1), output_depth, output_height, output_width}); output.zero_(); AT_ASSERT( input_depth > 0 && input_height > 0 && input_width > 0 && output_depth > 0 && output_height > 0 && output_width > 0); const int num_kernels = output_depth * output_height * output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_trilinear3d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor<scalar_t, 5>(); auto odata = output.packed_accessor<scalar_t, 5>(); const accscalar_t rdepth = area_pixel_compute_scale<accscalar_t>( input_depth, output_depth, align_corners); const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( input_height, output_height, align_corners); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners); hipLaunchKernelGGL(( upsample_trilinear3d_out_frame<scalar_t, accscalar_t>) , dim3(cuda::ATenCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rdepth, rheight, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(hipGetLastError()); } static void upsample_trilinear3d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_trilinear3d_backward_out_cuda", {grad_output_arg, grad_input_arg}); AT_CHECK( output_size.size() == 3, "It is expected output_size equals to 3, but got size ", output_size.size()); AT_CHECK( input_size.size() == 5, "It is expected input_size equals to 5, but got size ", input_size.size()); int output_depth = output_size[0]; int output_height = output_size[1]; int output_width = output_size[2]; int nbatch = input_size[0]; int channels = input_size[1]; int input_depth = input_size[2]; int input_height = input_size[3]; int input_width = input_size[4]; upsample_3d_shape_check( Tensor(), grad_output_, nbatch, channels, input_depth, input_height, input_width, output_depth, output_height, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_( {nbatch, channels, input_depth, input_height, input_width}); grad_input.zero_(); const int num_kernels = output_depth * output_height * output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_trilinear3d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor<scalar_t, 5>(); auto odata = grad_output.packed_accessor<scalar_t, 5>(); const accscalar_t rdepth = area_pixel_compute_scale<accscalar_t>( input_depth, output_depth, align_corners); const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( input_height, output_height, align_corners); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners); hipLaunchKernelGGL(( upsample_trilinear3d_backward_out_frame<scalar_t, accscalar_t>) , dim3(cuda::ATenCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rdepth, rheight, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(hipGetLastError()); } } // namespace Tensor& upsample_trilinear3d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners) { upsample_trilinear3d_out_cuda_template( output, input, output_size, align_corners); return output; } Tensor upsample_trilinear3d_cuda( const Tensor& input, IntArrayRef output_size, bool align_corners) { Tensor output = at::empty_like(input); upsample_trilinear3d_out_cuda_template( output, input, output_size, align_corners); return output; } Tensor& upsample_trilinear3d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { upsample_trilinear3d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners); return grad_input; } Tensor upsample_trilinear3d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { Tensor grad_input = at::empty_like(grad_output); upsample_trilinear3d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners); return grad_input; } } // namespace native } // namespace at
2e9f6fe9e5da37b4b87f50edff085b3eedba44dd.cu
// Adapted from interp.cpp from Caffe util by Pauline Luc // Originally developed by George Papandreou #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/UpSample.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_trilinear3d_out_frame( const int n, const accscalar_t rdepth, const accscalar_t rheight, const accscalar_t rwidth, const bool align_corners, const PackedTensorAccessor<scalar_t, 5> idata, PackedTensorAccessor<scalar_t, 5> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int depth1 = idata.size(2); const int height1 = idata.size(3); const int width1 = idata.size(4); const int depth2 = odata.size(2); const int height2 = odata.size(3); const int width2 = odata.size(4); if (index < n) { const int w2 = (index % (height2 * width2)) % width2; // 0:width2-1 const int h2 = (index % (height2 * width2)) / width2; // 0:height2-1 const int t2 = index / (height2 * width2); // 0:depth2-1 // special case: just copy if (depth1 == depth2 && height1 == height2 && width1 == width2) { const int t1 = t2; const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][t1][h1][w1]; odata[n][c][t2][h2][w2] = val; } } return; } // const accscalar_t t1r = area_pixel_compute_source_index<accscalar_t>( rdepth, t2, align_corners, /*cubic=*/false); const int t1 = t1r; const int t1p = (t1 < depth1 - 1) ? 1 : 0; const accscalar_t t1lambda = t1r - t1; const accscalar_t t0lambda = static_cast<accscalar_t>(1) - t1lambda; // const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>( rheight, h2, align_corners, /*cubic=*/false); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const accscalar_t h1lambda = h1r - h1; const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda; // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const accscalar_t val = t0lambda * (h0lambda * (w0lambda * idata[n][c][t1][h1][w1] + w1lambda * idata[n][c][t1][h1][w1 + w1p]) + h1lambda * (w0lambda * idata[n][c][t1][h1 + h1p][w1] + w1lambda * idata[n][c][t1][h1 + h1p][w1 + w1p])) + t1lambda * (h0lambda * (w0lambda * idata[n][c][t1 + t1p][h1][w1] + w1lambda * idata[n][c][t1 + t1p][h1][w1 + w1p]) + h1lambda * (w0lambda * idata[n][c][t1 + t1p][h1 + h1p][w1] + w1lambda * idata[n][c][t1 + t1p][h1 + h1p][w1 + w1p])); odata[n][c][t2][h2][w2] = static_cast<scalar_t>(val); } } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_trilinear3d_backward_out_frame( const int n, const accscalar_t rdepth, const accscalar_t rheight, const accscalar_t rwidth, const bool align_corners, PackedTensorAccessor<scalar_t, 5> idata, const PackedTensorAccessor<scalar_t, 5> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int depth1 = idata.size(2); const int height1 = idata.size(3); const int width1 = idata.size(4); const int depth2 = odata.size(2); const int height2 = odata.size(3); const int width2 = odata.size(4); if (index < n) { const int w2 = (index % (height2 * width2)) % width2; // 0:width2-1 const int h2 = (index % (height2 * width2)) / width2; // 0:height2-1 const int t2 = index / (height2 * width2); // 0:depth2-1 // special case: just copy if (depth1 == depth2 && height1 == height2 && width1 == width2) { const int t1 = t2; const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][t1][h1][w1]; idata[n][c][t2][h2][w2] = val; } } return; } // const accscalar_t t1r = area_pixel_compute_source_index<accscalar_t>( rdepth, t2, align_corners, /*cubic=*/false); const int t1 = t1r; const int t1p = (t1 < depth1 - 1) ? 1 : 0; const accscalar_t t1lambda = t1r - t1; const accscalar_t t0lambda = static_cast<accscalar_t>(1) - t1lambda; // const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>( rheight, h2, align_corners, /*cubic=*/false); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const accscalar_t h1lambda = h1r - h1; const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda; // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t d2val = odata[n][c][t2][h2][w2]; atomicAdd( &idata[n][c][t1][h1][w1], static_cast<scalar_t>(t0lambda * h0lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1][h1][w1 + w1p], static_cast<scalar_t>(t0lambda * h0lambda * w1lambda * d2val)); atomicAdd( &idata[n][c][t1][h1 + h1p][w1], static_cast<scalar_t>(t0lambda * h1lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1][h1 + h1p][w1 + w1p], static_cast<scalar_t>(t0lambda * h1lambda * w1lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1][w1], static_cast<scalar_t>(t1lambda * h0lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1][w1 + w1p], static_cast<scalar_t>(t1lambda * h0lambda * w1lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1 + h1p][w1], static_cast<scalar_t>(t1lambda * h1lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1 + h1p][w1 + w1p], static_cast<scalar_t>(t1lambda * h1lambda * w1lambda * d2val)); } } } } static void upsample_trilinear3d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_trilinear3d_out_cuda", {input_arg, output_arg}); AT_CHECK( output_size.size() == 3, "It is expected output_size equals to 3, but got size ", output_size.size()); int output_depth = output_size[0]; int output_height = output_size[1]; int output_width = output_size[2]; int nbatch = input.size(0); int channels = input.size(1); int input_depth = input.size(2); int input_height = input.size(3); int input_width = input.size(4); upsample_3d_shape_check( input, Tensor(), nbatch, channels, input_depth, input_height, input_width, output_depth, output_height, output_width); output.resize_({input.size(0), input.size(1), output_depth, output_height, output_width}); output.zero_(); AT_ASSERT( input_depth > 0 && input_height > 0 && input_width > 0 && output_depth > 0 && output_height > 0 && output_width > 0); const int num_kernels = output_depth * output_height * output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_trilinear3d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor<scalar_t, 5>(); auto odata = output.packed_accessor<scalar_t, 5>(); const accscalar_t rdepth = area_pixel_compute_scale<accscalar_t>( input_depth, output_depth, align_corners); const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( input_height, output_height, align_corners); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners); upsample_trilinear3d_out_frame<scalar_t, accscalar_t> <<<cuda::ATenCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>( num_kernels, rdepth, rheight, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(cudaGetLastError()); } static void upsample_trilinear3d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_trilinear3d_backward_out_cuda", {grad_output_arg, grad_input_arg}); AT_CHECK( output_size.size() == 3, "It is expected output_size equals to 3, but got size ", output_size.size()); AT_CHECK( input_size.size() == 5, "It is expected input_size equals to 5, but got size ", input_size.size()); int output_depth = output_size[0]; int output_height = output_size[1]; int output_width = output_size[2]; int nbatch = input_size[0]; int channels = input_size[1]; int input_depth = input_size[2]; int input_height = input_size[3]; int input_width = input_size[4]; upsample_3d_shape_check( Tensor(), grad_output_, nbatch, channels, input_depth, input_height, input_width, output_depth, output_height, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_( {nbatch, channels, input_depth, input_height, input_width}); grad_input.zero_(); const int num_kernels = output_depth * output_height * output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_trilinear3d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor<scalar_t, 5>(); auto odata = grad_output.packed_accessor<scalar_t, 5>(); const accscalar_t rdepth = area_pixel_compute_scale<accscalar_t>( input_depth, output_depth, align_corners); const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( input_height, output_height, align_corners); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners); upsample_trilinear3d_backward_out_frame<scalar_t, accscalar_t> <<<cuda::ATenCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>( num_kernels, rdepth, rheight, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(cudaGetLastError()); } } // namespace Tensor& upsample_trilinear3d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners) { upsample_trilinear3d_out_cuda_template( output, input, output_size, align_corners); return output; } Tensor upsample_trilinear3d_cuda( const Tensor& input, IntArrayRef output_size, bool align_corners) { Tensor output = at::empty_like(input); upsample_trilinear3d_out_cuda_template( output, input, output_size, align_corners); return output; } Tensor& upsample_trilinear3d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { upsample_trilinear3d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners); return grad_input; } Tensor upsample_trilinear3d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { Tensor grad_input = at::empty_like(grad_output); upsample_trilinear3d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners); return grad_input; } } // namespace native } // namespace at
20548f228f9314e7df04c8401650634e53c0c73c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <sys/time.h> #include <hip/hip_runtime.h> const float eps = 0.0001f; const float dt = 0.01f; const int N = 128 * 1024; #define coord float3 __global__ void integrate(coord *new_p, coord *new_v, coord *p, coord *v, int n, float dt) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) return; coord body_pos = p[index]; coord body_vel = v[index]; coord f; f.x = 0; f.y = 0; f.z = 0; for (int i = 0; i < n; i++) { coord pi = p[i]; coord r; // Vector from p[i] to body r.x = pi.x - body_pos.x; r.y = pi.y - body_pos.y; r.z = pi.z - body_pos.z; float invDist = 1.0 / sqrtf(r.x * r.x + r.y * r.y + r.z * r.z + eps * eps); float s = invDist * invDist * invDist; // Add force of body i f.x += r.x * s; f.y += r.y * s; f.z += r.z * s; } // Correct velocity body_vel.x += f.x * dt; body_vel.y += f.y * dt; body_vel.z += f.z * dt; body_pos.x += body_vel.x * dt; body_pos.y += body_vel.y * dt; body_pos.z += body_vel.z * dt; new_p[index] = body_pos; new_v[index] = body_vel; } double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } void init_rand(coord *v, int n) { for (int i = 0; i < n; i++) { v[i].x = rand() / (float)RAND_MAX - 0.5f; v[i].y = rand() / (float)RAND_MAX - 0.5f; v[i].z = rand() / (float)RAND_MAX - 0.5f; } } int main() { double tgpu = 0, tmem = 0; size_t size = sizeof(coord) * N; coord *p = (coord *)malloc(size); coord *v = (coord *)malloc(size); coord *d_p[2] = {NULL, NULL}; coord *d_v[2] = {NULL, NULL}; init_rand(p, N); init_rand(v, N); tmem = -wtime(); hipMalloc((void **)&d_p[0], size); hipMalloc((void **)&d_p[1], size); hipMalloc((void **)&d_v[0], size); hipMalloc((void **)&d_v[1], size); hipMemcpy(d_p[0], p, size, hipMemcpyHostToDevice); hipMemcpy(d_v[0], v, size, hipMemcpyHostToDevice); tmem += wtime(); tgpu = -wtime(); int threadsPerBlock = 1024; dim3 block(threadsPerBlock); dim3 grid((N + threadsPerBlock - 1) / threadsPerBlock); int index = 0; for (int i = 0; i < 2; i++, index ^= 1) { hipLaunchKernelGGL(( integrate), dim3(grid), dim3(block), 0, 0, d_p[index ^ 1], d_v[index ^ 1], d_p[index], d_v[index], N, dt); } hipDeviceSynchronize(); tgpu += wtime(); tmem -= wtime(); hipMemcpy(p, d_p[index], size, hipMemcpyDeviceToHost); hipMemcpy(v, d_v[index], size, hipMemcpyDeviceToHost); tmem += wtime(); /* for (int i = 0; i < N; i++) { printf("%4d: %f %f %f %f %f %f\n", i, p[i].x, p[i].y, p[i].z, v[i].x, v[i].y, v[i].z); } */ printf("sizeof(coord) = %d\n", sizeof(coord)); printf("GPU version (sec.): %.6f\n", tgpu); printf("Memory ops. (sec.): %.6f\n", tmem); printf(" Total time (sec.): %.6f\n", tgpu + tmem); hipFree(d_p[0]); hipFree(d_p[1]); hipFree(d_v[0]); hipFree(d_v[1]); free(p); free(v); hipDeviceReset(); return 0; }
20548f228f9314e7df04c8401650634e53c0c73c.cu
#include <stdio.h> #include <sys/time.h> #include <cuda_runtime.h> const float eps = 0.0001f; const float dt = 0.01f; const int N = 128 * 1024; #define coord float3 __global__ void integrate(coord *new_p, coord *new_v, coord *p, coord *v, int n, float dt) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) return; coord body_pos = p[index]; coord body_vel = v[index]; coord f; f.x = 0; f.y = 0; f.z = 0; for (int i = 0; i < n; i++) { coord pi = p[i]; coord r; // Vector from p[i] to body r.x = pi.x - body_pos.x; r.y = pi.y - body_pos.y; r.z = pi.z - body_pos.z; float invDist = 1.0 / sqrtf(r.x * r.x + r.y * r.y + r.z * r.z + eps * eps); float s = invDist * invDist * invDist; // Add force of body i f.x += r.x * s; f.y += r.y * s; f.z += r.z * s; } // Correct velocity body_vel.x += f.x * dt; body_vel.y += f.y * dt; body_vel.z += f.z * dt; body_pos.x += body_vel.x * dt; body_pos.y += body_vel.y * dt; body_pos.z += body_vel.z * dt; new_p[index] = body_pos; new_v[index] = body_vel; } double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } void init_rand(coord *v, int n) { for (int i = 0; i < n; i++) { v[i].x = rand() / (float)RAND_MAX - 0.5f; v[i].y = rand() / (float)RAND_MAX - 0.5f; v[i].z = rand() / (float)RAND_MAX - 0.5f; } } int main() { double tgpu = 0, tmem = 0; size_t size = sizeof(coord) * N; coord *p = (coord *)malloc(size); coord *v = (coord *)malloc(size); coord *d_p[2] = {NULL, NULL}; coord *d_v[2] = {NULL, NULL}; init_rand(p, N); init_rand(v, N); tmem = -wtime(); cudaMalloc((void **)&d_p[0], size); cudaMalloc((void **)&d_p[1], size); cudaMalloc((void **)&d_v[0], size); cudaMalloc((void **)&d_v[1], size); cudaMemcpy(d_p[0], p, size, cudaMemcpyHostToDevice); cudaMemcpy(d_v[0], v, size, cudaMemcpyHostToDevice); tmem += wtime(); tgpu = -wtime(); int threadsPerBlock = 1024; dim3 block(threadsPerBlock); dim3 grid((N + threadsPerBlock - 1) / threadsPerBlock); int index = 0; for (int i = 0; i < 2; i++, index ^= 1) { integrate<<<grid, block>>>(d_p[index ^ 1], d_v[index ^ 1], d_p[index], d_v[index], N, dt); } cudaDeviceSynchronize(); tgpu += wtime(); tmem -= wtime(); cudaMemcpy(p, d_p[index], size, cudaMemcpyDeviceToHost); cudaMemcpy(v, d_v[index], size, cudaMemcpyDeviceToHost); tmem += wtime(); /* for (int i = 0; i < N; i++) { printf("%4d: %f %f %f %f %f %f\n", i, p[i].x, p[i].y, p[i].z, v[i].x, v[i].y, v[i].z); } */ printf("sizeof(coord) = %d\n", sizeof(coord)); printf("GPU version (sec.): %.6f\n", tgpu); printf("Memory ops. (sec.): %.6f\n", tmem); printf(" Total time (sec.): %.6f\n", tgpu + tmem); cudaFree(d_p[0]); cudaFree(d_p[1]); cudaFree(d_v[0]); cudaFree(d_v[1]); free(p); free(v); cudaDeviceReset(); return 0; }
6be9e0c4da8df279e983af6cc22145c4327eb6b6.hip
// !!! This is a file automatically generated by hipify!!! /** * @brief Breadth-first Search Top-Down test program * @file */ #include "Static/BFS-Reverse/BFS-Reverse.cuh" #include <StandardAPI.hpp> #include <Graph/GraphStd.hpp> #include <Util/CommandLineParam.hpp> #include <hip/hip_runtime_api.h> //--profile-from-start off #include <BufferPool.cuh> int exec(int argc, char* argv[]) { using namespace timer; using namespace hornets_nest; using vid_t = int; using dst_t = int; BufferPool pool; using namespace graph::structure_prop; using namespace graph::parsing_prop; // graph::GraphStd<vid_t, eoff_t> graph; graph::GraphStd<vid_t, eoff_t> graph(DIRECTED | ENABLE_INGOING); CommandLineParam cmd(graph, argc, argv,false); HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetInit hornet_init_inverse(graph.nV(), graph.nE(), graph.csr_in_offsets(), graph.csr_in_edges()); std::vector<wgt0_t> edge_meta_0(graph.nE(), 0); hornet_init.insertEdgeData(edge_meta_0.data()); hornet_init_inverse.insertEdgeData(edge_meta_0.data()); HornetGraph hornet_graph_inv(hornet_init_inverse); // std::cout << "hornet_graph_inv : " << hornet_graph_inv.nV() << " " << hornet_graph_inv.nE() << std::endl; // std::cout << "hornet_graph : " << hornet_graph.nV() << " " << hornet_graph.nE() << std::endl; // for(int i=0; i<10; i++){ // std::cout << graph.csr_in_offsets()[i] << " : " << graph.csr_out_offsets()[i] << std::endl; // } vid_t root = graph.max_out_degree_id(); // if (argc==3) // root = atoi(argv[2]); int numberRoots = 10; if (argc>=3) numberRoots = atoi(argv[2]); int alg = 0; if (argc>=4) alg = atoi(argv[3]); int deletion = 0; if (argc>=5) deletion = atoi(argv[4]); int timeSection = 0; if (argc>=6) timeSection = atoi(argv[5]); std::cout << "My root is " << root << std::endl; // rev_del_bfs.set_parameters(root); float totalTime = 0.0; Timer<DEVICE> TM; HornetGraph hornet_graph(hornet_init); ReverseDeleteBFS1 rev_del_bfs(hornet_graph, hornet_graph_inv, graph.csr_out_offsets(),graph.csr_in_offsets()); rev_del_bfs.sortHornets(hornet_graph_inv); rev_del_bfs.sortHornets(hornet_graph); rev_del_bfs.SetInverseIndices(hornet_graph_inv); for (int i=0; i<numberRoots;i++){ // HornetGraph hornet_graph(hornet_init); // ReverseDeleteBFS rev_del_bfs(hornet_graph, hornet_graph_inv); rev_del_bfs.reset(); if(deletion!=0){ // rev_del_bfs.sortHornets(hornet_graph_inv); // if(i==0){ // rev_del_bfs.sortHornets(hornet_graph_inv); // rev_del_bfs.sortHornets(hornet_graph); // } // rev_del_bfs.SetInverseIndices(hornet_graph_inv); } // hipProfilerStart(); TM.start(); rev_del_bfs.set_parameters((root+i)%graph.nV()); if(deletion==0){ rev_del_bfs.run(hornet_graph_inv,alg,timeSection); } else{ rev_del_bfs.runNoDelete(hornet_graph_inv,alg,timeSection); } TM.stop(); // printf("duration %f\n",TM.duration()); totalTime += TM.duration(); if (alg==1){ timeSection+=0; totalTime -= 0.0000001; } // hipProfilerStop(); } printf("\nReverse BFS time: %f ms\n",totalTime); int N=graph.nE(); vid_t* d_temp; pool.allocate(&d_temp,N); TM.start(); for (int i=0; i<5000; i++){ hipMemset(d_temp,0,sizeof(vid_t)*N); } TM.stop(); // gpu::free(d_temp); // TM.print("Reverse BFS"); return 0; } int main(int argc, char* argv[]) { int ret = 0; // hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory. {//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. ret = exec(argc, argv); }//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. // hornets_nest::gpu::finalizeRMMPoolAllocation(); return ret; }
6be9e0c4da8df279e983af6cc22145c4327eb6b6.cu
/** * @brief Breadth-first Search Top-Down test program * @file */ #include "Static/BFS-Reverse/BFS-Reverse.cuh" #include <StandardAPI.hpp> #include <Graph/GraphStd.hpp> #include <Util/CommandLineParam.hpp> #include <cuda_profiler_api.h> //--profile-from-start off #include <BufferPool.cuh> int exec(int argc, char* argv[]) { using namespace timer; using namespace hornets_nest; using vid_t = int; using dst_t = int; BufferPool pool; using namespace graph::structure_prop; using namespace graph::parsing_prop; // graph::GraphStd<vid_t, eoff_t> graph; graph::GraphStd<vid_t, eoff_t> graph(DIRECTED | ENABLE_INGOING); CommandLineParam cmd(graph, argc, argv,false); HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetInit hornet_init_inverse(graph.nV(), graph.nE(), graph.csr_in_offsets(), graph.csr_in_edges()); std::vector<wgt0_t> edge_meta_0(graph.nE(), 0); hornet_init.insertEdgeData(edge_meta_0.data()); hornet_init_inverse.insertEdgeData(edge_meta_0.data()); HornetGraph hornet_graph_inv(hornet_init_inverse); // std::cout << "hornet_graph_inv : " << hornet_graph_inv.nV() << " " << hornet_graph_inv.nE() << std::endl; // std::cout << "hornet_graph : " << hornet_graph.nV() << " " << hornet_graph.nE() << std::endl; // for(int i=0; i<10; i++){ // std::cout << graph.csr_in_offsets()[i] << " : " << graph.csr_out_offsets()[i] << std::endl; // } vid_t root = graph.max_out_degree_id(); // if (argc==3) // root = atoi(argv[2]); int numberRoots = 10; if (argc>=3) numberRoots = atoi(argv[2]); int alg = 0; if (argc>=4) alg = atoi(argv[3]); int deletion = 0; if (argc>=5) deletion = atoi(argv[4]); int timeSection = 0; if (argc>=6) timeSection = atoi(argv[5]); std::cout << "My root is " << root << std::endl; // rev_del_bfs.set_parameters(root); float totalTime = 0.0; Timer<DEVICE> TM; HornetGraph hornet_graph(hornet_init); ReverseDeleteBFS1 rev_del_bfs(hornet_graph, hornet_graph_inv, graph.csr_out_offsets(),graph.csr_in_offsets()); rev_del_bfs.sortHornets(hornet_graph_inv); rev_del_bfs.sortHornets(hornet_graph); rev_del_bfs.SetInverseIndices(hornet_graph_inv); for (int i=0; i<numberRoots;i++){ // HornetGraph hornet_graph(hornet_init); // ReverseDeleteBFS rev_del_bfs(hornet_graph, hornet_graph_inv); rev_del_bfs.reset(); if(deletion!=0){ // rev_del_bfs.sortHornets(hornet_graph_inv); // if(i==0){ // rev_del_bfs.sortHornets(hornet_graph_inv); // rev_del_bfs.sortHornets(hornet_graph); // } // rev_del_bfs.SetInverseIndices(hornet_graph_inv); } // cudaProfilerStart(); TM.start(); rev_del_bfs.set_parameters((root+i)%graph.nV()); if(deletion==0){ rev_del_bfs.run(hornet_graph_inv,alg,timeSection); } else{ rev_del_bfs.runNoDelete(hornet_graph_inv,alg,timeSection); } TM.stop(); // printf("duration %f\n",TM.duration()); totalTime += TM.duration(); if (alg==1){ timeSection+=0; totalTime -= 0.0000001; } // cudaProfilerStop(); } printf("\nReverse BFS time: %f ms\n",totalTime); int N=graph.nE(); vid_t* d_temp; pool.allocate(&d_temp,N); TM.start(); for (int i=0; i<5000; i++){ cudaMemset(d_temp,0,sizeof(vid_t)*N); } TM.stop(); // gpu::free(d_temp); // TM.print("Reverse BFS"); return 0; } int main(int argc, char* argv[]) { int ret = 0; // hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory. {//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. ret = exec(argc, argv); }//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. // hornets_nest::gpu::finalizeRMMPoolAllocation(); return ret; }
b24fed938609d8853f0357bdeeb2b8dd6fd76110.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ long long int mod(int base, int exponent, int den) { long long int ret; ret = 1; for (int i = 0; i < exponent; i++) { ret *= base; ret = ret % den; } return ret; } __device__ unsigned int mod_optimized(unsigned int base,unsigned long int exp,unsigned long int modulus){ unsigned long p=1; unsigned long tmp=base; while(exp){ tmp%=modulus; if(exp%2){ p*=tmp; p%=modulus; } tmp=tmp*tmp; exp/=2; } return p; } __global__ void rsa(unsigned int * num,unsigned long int * key,unsigned long int * den,const int len) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i>=len)return; unsigned int temp; temp = mod_optimized(num[i], *key, *den); //temp = mod_optimized(num[i], *key, *den); atomicExch(&num[i], temp); }
b24fed938609d8853f0357bdeeb2b8dd6fd76110.cu
__device__ long long int mod(int base, int exponent, int den) { long long int ret; ret = 1; for (int i = 0; i < exponent; i++) { ret *= base; ret = ret % den; } return ret; } __device__ unsigned int mod_optimized(unsigned int base,unsigned long int exp,unsigned long int modulus){ unsigned long p=1; unsigned long tmp=base; while(exp){ tmp%=modulus; if(exp%2){ p*=tmp; p%=modulus; } tmp=tmp*tmp; exp/=2; } return p; } __global__ void rsa(unsigned int * num,unsigned long int * key,unsigned long int * den,const int len) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i>=len)return; unsigned int temp; temp = mod_optimized(num[i], *key, *den); //temp = mod_optimized(num[i], *key, *den); atomicExch(&num[i], temp); }
f338df279a756e14435c384cbac0241e325c84a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=128 --blockDim=256 __global__ void modulateKernel(float *d_A, float *d_B, int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int numThreads = blockDim.x * gridDim.x; float rcpN = 1.0f / (float)N; for (int pos = tid; pos < N; pos += numThreads) { d_A[pos] *= d_B[pos] * rcpN; } }
f338df279a756e14435c384cbac0241e325c84a4.cu
//pass //--gridDim=128 --blockDim=256 __global__ void modulateKernel(float *d_A, float *d_B, int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int numThreads = blockDim.x * gridDim.x; float rcpN = 1.0f / (float)N; for (int pos = tid; pos < N; pos += numThreads) { d_A[pos] *= d_B[pos] * rcpN; } }
bb901890f7c17a7ec886c1cc237767b614b78aa1.hip
// !!! This is a file automatically generated by hipify!!! /* * */ #include "DeviceProperties.h" namespace culgt { int DeviceProperties::getComputeCapability() { if( !isAvailable ) { update(); } return computeCapability; } int DeviceProperties::getMaxGridSize() { if( !isAvailable ) { update(); } return maxGridSize; } int DeviceProperties::getMaxBlockSize() { if( !isAvailable ) { update(); } return maxBlockSize; } std::string DeviceProperties::getName() { if( !isAvailable ) { update(); } return name; } int DeviceProperties::getDeviceNumber() { if( !isAvailable ) { update(); } return deviceNumber; } void DeviceProperties::update() { hipDeviceProp_t deviceProp; hipGetDevice( &deviceNumber ); hipGetDeviceProperties(&deviceProp, deviceNumber ); computeCapability = deviceProp.major*100+deviceProp.minor*10; maxGridSize = deviceProp.maxGridSize[0]; maxBlockSize = deviceProp.maxThreadsPerBlock; name = deviceProp.name; isAvailable = true; } bool DeviceProperties::isAvailable = false; int DeviceProperties::computeCapability; int DeviceProperties::maxGridSize; int DeviceProperties::maxBlockSize; std::string DeviceProperties::name; int DeviceProperties::deviceNumber; }
bb901890f7c17a7ec886c1cc237767b614b78aa1.cu
/* * */ #include "DeviceProperties.h" namespace culgt { int DeviceProperties::getComputeCapability() { if( !isAvailable ) { update(); } return computeCapability; } int DeviceProperties::getMaxGridSize() { if( !isAvailable ) { update(); } return maxGridSize; } int DeviceProperties::getMaxBlockSize() { if( !isAvailable ) { update(); } return maxBlockSize; } std::string DeviceProperties::getName() { if( !isAvailable ) { update(); } return name; } int DeviceProperties::getDeviceNumber() { if( !isAvailable ) { update(); } return deviceNumber; } void DeviceProperties::update() { cudaDeviceProp deviceProp; cudaGetDevice( &deviceNumber ); cudaGetDeviceProperties(&deviceProp, deviceNumber ); computeCapability = deviceProp.major*100+deviceProp.minor*10; maxGridSize = deviceProp.maxGridSize[0]; maxBlockSize = deviceProp.maxThreadsPerBlock; name = deviceProp.name; isAvailable = true; } bool DeviceProperties::isAvailable = false; int DeviceProperties::computeCapability; int DeviceProperties::maxGridSize; int DeviceProperties::maxBlockSize; std::string DeviceProperties::name; int DeviceProperties::deviceNumber; }
e38d8ae746d22db53ca9ad748610a63049536c0a.hip
// !!! This is a file automatically generated by hipify!!! #include "./common/helpers.h" #define SIZE (10 * 1024 * 1024) float cuda_malloc_test(int size, bool up) { hipEvent_t start, stop; int *a, *dev_a; float elapsedTime; HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); //a = (int*)malloc(size * sizeof(*a)); HANDLE_ERROR(hipHostMalloc((void**)&a, size * sizeof(*a), 0)); HANDLE_NULL(a); HANDLE_ERROR(hipMalloc((void**)&dev_a, size * sizeof(*dev_a))); HANDLE_ERROR(hipEventRecord(start, 0)); for (int i = 0; i < 100; i++) { if (up) { HANDLE_ERROR(hipMemcpy(dev_a, a, size * sizeof(*dev_a), hipMemcpyHostToDevice)); } else { HANDLE_ERROR(hipMemcpy(a, dev_a, size * sizeof(*dev_a), hipMemcpyDeviceToHost)); } } HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop)); HANDLE_ERROR(hipHostFree(a)); HANDLE_ERROR(hipFree(dev_a)); HANDLE_ERROR(hipEventDestroy(start)); HANDLE_ERROR(hipEventDestroy(stop)); return elapsedTime; } int main(void) { float elapsedTime; float MB = (float)100 * SIZE * sizeof(int) / 1024 / 1024; elapsedTime = cuda_malloc_test(SIZE, true); printf("Total time for copy up: %3.1f ms\n", elapsedTime); printf("\tMB/s during copy up: %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = cuda_malloc_test(SIZE, false); printf("Total time for copy down: %3.1f ms\n", elapsedTime); printf("\tMB/s during copy down: %3.1f\n", MB / (elapsedTime / 1000)); return 0; }
e38d8ae746d22db53ca9ad748610a63049536c0a.cu
#include "./common/helpers.h" #define SIZE (10 * 1024 * 1024) float cuda_malloc_test(int size, bool up) { cudaEvent_t start, stop; int *a, *dev_a; float elapsedTime; HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); //a = (int*)malloc(size * sizeof(*a)); HANDLE_ERROR(cudaHostAlloc((void**)&a, size * sizeof(*a), 0)); HANDLE_NULL(a); HANDLE_ERROR(cudaMalloc((void**)&dev_a, size * sizeof(*dev_a))); HANDLE_ERROR(cudaEventRecord(start, 0)); for (int i = 0; i < 100; i++) { if (up) { HANDLE_ERROR(cudaMemcpy(dev_a, a, size * sizeof(*dev_a), cudaMemcpyHostToDevice)); } else { HANDLE_ERROR(cudaMemcpy(a, dev_a, size * sizeof(*dev_a), cudaMemcpyDeviceToHost)); } } HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop)); HANDLE_ERROR(cudaFreeHost(a)); HANDLE_ERROR(cudaFree(dev_a)); HANDLE_ERROR(cudaEventDestroy(start)); HANDLE_ERROR(cudaEventDestroy(stop)); return elapsedTime; } int main(void) { float elapsedTime; float MB = (float)100 * SIZE * sizeof(int) / 1024 / 1024; elapsedTime = cuda_malloc_test(SIZE, true); printf("Total time for copy up: %3.1f ms\n", elapsedTime); printf("\tMB/s during copy up: %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = cuda_malloc_test(SIZE, false); printf("Total time for copy down: %3.1f ms\n", elapsedTime); printf("\tMB/s during copy down: %3.1f\n", MB / (elapsedTime / 1000)); return 0; }
5826e26d35b2294cd87097b984cc64ff4476d862.hip
// !!! This is a file automatically generated by hipify!!! #include<iostream> #include <hip/hip_runtime.h> __global__ void hello_world() { std::printf("Hello World! I am thread %d.\n", threadIdx.x); } int main() { hipLaunchKernelGGL(( hello_world), dim3(1),dim3(4), 0, 0, ); hipDeviceSynchronize(); return 0; }
5826e26d35b2294cd87097b984cc64ff4476d862.cu
#include<iostream> #include <cuda.h> __global__ void hello_world() { std::printf("Hello World! I am thread %d.\n", threadIdx.x); } int main() { hello_world<<<1,4>>>(); cudaDeviceSynchronize(); return 0; }
056bd2bc7d7c9d49eb1e11823e1923e09848b15a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <fstream> #define BILLION 1E9; const int n=300; __global__ void grayscaleKernel(int *ms, int *aux, int n){ int i = threadIdx.x+blockDim.x*blockIdx.x; int k=0; int grayscale=0; if(i<n){ for(k=0; k<n-3; k+=3){ grayscale = 0.299*ms[i*n+k] + 0.5876*ms[i*n+k+1] + 0.114*ms[i*n+k+2]; aux[i*n+k] = aux[i*n+k+1] = aux[i*n+k+2] = grayscale; } } } int main(){ int m[n][n], a[n][n]; int *d_m, *d_a; struct timespec requestStart, requestEnd, reqSt, reqEd; static unsigned int color[3]; FILE *fp = fopen("picture_col.ppm", "w"); (void) fprintf(fp,"P6\n%d %d\n255\n", n, n); FILE *sp = fopen("picture_gray.ppm", "w"); (void) fprintf(sp,"P6\n%d %d\n255\n", n, n); // Inicializacin y construccin de imagen a color for(int i=0; i<n; i++) for(int j=0; j<n-3; j+=3){ m[i][j] = j%256; m[i][j+1] = i%256; m[i][j+2] = i*j%256; color[0] = m[i][j]; color[1] = m[i][j+1]; color[2] = m[i][j+2]; (void) fwrite(color, sizeof(int), 3, fp); } int size = sizeof(int)*n*n; clock_gettime(CLOCK_REALTIME, &reqSt); hipMalloc((void **) &d_m, size); hipMalloc((void **) &d_a, size); clock_gettime(CLOCK_REALTIME, &reqEd); hipMemcpy(d_m, m, size, hipMemcpyHostToDevice); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); dim3 dimBlock(n/4, n/4); dim3 dimGrid(n/dimBlock.x, n/dimBlock.y); clock_gettime(CLOCK_REALTIME, &requestStart); hipLaunchKernelGGL(( grayscaleKernel), dim3(dimBlock), dim3(dimGrid), 0, 0, d_m, d_a, n); clock_gettime(CLOCK_REALTIME, &requestEnd); hipMemcpy(a, d_a, size, hipMemcpyDeviceToHost); printf("%d %d\n", a[0][0], a[n-1][n-1]); hipFree(d_a); hipFree(d_m); for(int i=0; i<n; i++) for(int j=0; j<n-3; j+=3){ color[0] = a[i][j]; color[1] = a[i][j+1]; color[2] = a[i][j+2]; (void) fwrite(color, sizeof(int), 3, sp); } double accum = (double) (requestEnd.tv_sec - requestStart.tv_sec) + (requestEnd.tv_nsec - requestStart.tv_nsec)/BILLION; printf( "Tiempo de ejecucin: %.15lf\n", accum ); double accum2 = (double) (reqEd.tv_sec - reqSt.tv_sec) + (reqEd.tv_nsec - reqSt.tv_nsec)/BILLION; printf( "Tiempo empleado en la reserva de memoria: %.15lf\n", accum2 ); return hipDeviceReset(); }
056bd2bc7d7c9d49eb1e11823e1923e09848b15a.cu
#include <cuda_runtime.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <fstream> #define BILLION 1E9; const int n=300; __global__ void grayscaleKernel(int *ms, int *aux, int n){ int i = threadIdx.x+blockDim.x*blockIdx.x; int k=0; int grayscale=0; if(i<n){ for(k=0; k<n-3; k+=3){ grayscale = 0.299*ms[i*n+k] + 0.5876*ms[i*n+k+1] + 0.114*ms[i*n+k+2]; aux[i*n+k] = aux[i*n+k+1] = aux[i*n+k+2] = grayscale; } } } int main(){ int m[n][n], a[n][n]; int *d_m, *d_a; struct timespec requestStart, requestEnd, reqSt, reqEd; static unsigned int color[3]; FILE *fp = fopen("picture_col.ppm", "w"); (void) fprintf(fp,"P6\n%d %d\n255\n", n, n); FILE *sp = fopen("picture_gray.ppm", "w"); (void) fprintf(sp,"P6\n%d %d\n255\n", n, n); // Inicialización y construcción de imagen a color for(int i=0; i<n; i++) for(int j=0; j<n-3; j+=3){ m[i][j] = j%256; m[i][j+1] = i%256; m[i][j+2] = i*j%256; color[0] = m[i][j]; color[1] = m[i][j+1]; color[2] = m[i][j+2]; (void) fwrite(color, sizeof(int), 3, fp); } int size = sizeof(int)*n*n; clock_gettime(CLOCK_REALTIME, &reqSt); cudaMalloc((void **) &d_m, size); cudaMalloc((void **) &d_a, size); clock_gettime(CLOCK_REALTIME, &reqEd); cudaMemcpy(d_m, m, size, cudaMemcpyHostToDevice); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); dim3 dimBlock(n/4, n/4); dim3 dimGrid(n/dimBlock.x, n/dimBlock.y); clock_gettime(CLOCK_REALTIME, &requestStart); grayscaleKernel<<<dimBlock, dimGrid>>>(d_m, d_a, n); clock_gettime(CLOCK_REALTIME, &requestEnd); cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost); printf("%d %d\n", a[0][0], a[n-1][n-1]); cudaFree(d_a); cudaFree(d_m); for(int i=0; i<n; i++) for(int j=0; j<n-3; j+=3){ color[0] = a[i][j]; color[1] = a[i][j+1]; color[2] = a[i][j+2]; (void) fwrite(color, sizeof(int), 3, sp); } double accum = (double) (requestEnd.tv_sec - requestStart.tv_sec) + (requestEnd.tv_nsec - requestStart.tv_nsec)/BILLION; printf( "Tiempo de ejecución: %.15lf\n", accum ); double accum2 = (double) (reqEd.tv_sec - reqSt.tv_sec) + (reqEd.tv_nsec - reqSt.tv_nsec)/BILLION; printf( "Tiempo empleado en la reserva de memoria: %.15lf\n", accum2 ); return cudaThreadExit(); }
e46647284b3ff2568cc397f07023a93009ac9b99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************************* Implementing Breadth first search on CUDA using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish **********************************************************************************/ #ifndef _KERNEL_H_ #define _KERNEL_H_ __global__ void Kernel(int *trace, Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_updating_graph_mask, bool *g_graph_visited, int* g_cost, int no_of_nodes) { int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x; unsigned long j = tid * 40; if( tid<no_of_nodes && g_graph_mask[tid]) { g_graph_mask[tid]=false; for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++) { trace[j] = i; j++; int id = g_graph_edges[i]; trace[j] = id; j++; if(!g_graph_visited[id]) { g_cost[id]=g_cost[tid]+1; g_updating_graph_mask[id]=true; } } } } #endif
e46647284b3ff2568cc397f07023a93009ac9b99.cu
/********************************************************************************* Implementing Breadth first search on CUDA using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish **********************************************************************************/ #ifndef _KERNEL_H_ #define _KERNEL_H_ __global__ void Kernel(int *trace, Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_updating_graph_mask, bool *g_graph_visited, int* g_cost, int no_of_nodes) { int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x; unsigned long j = tid * 40; if( tid<no_of_nodes && g_graph_mask[tid]) { g_graph_mask[tid]=false; for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++) { trace[j] = i; j++; int id = g_graph_edges[i]; trace[j] = id; j++; if(!g_graph_visited[id]) { g_cost[id]=g_cost[tid]+1; g_updating_graph_mask[id]=true; } } } } #endif
9a8fbaceb78f835d90f55ab66fcb7a4430cdae79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ /* Example of integrating CUDA functions into an existing * application / framework. * Host part of the device code. * Compiled with Cuda compiler. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil_inline.h> //#include <shrUtils.h> // includes, kernels #include <cppIntegration_kernel.cu> // //////////////////////////////////////////////////////////////////////////////// // declaration, forward extern "C" void computeGold(char* reference, char* idata, const unsigned int len); extern "C" void computeGold2(int2* reference, int2* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// //! Entry point for Cuda functionality on host side //! @param argc command line argument count //! @param argv command line arguments //! @param data data to process on the device //! @param len len of \a data //////////////////////////////////////////////////////////////////////////////// extern "C" void runTest(const int argc, const char** argv, char* data, int2* data_int2, unsigned int len, float* h_C) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, (char**)argv); else hipSetDevice( cutGetMaxGflopsDeviceId() ); const unsigned int num_threads = len / 4; cutilCondition(0 == (len % 4)); const unsigned int mem_size = sizeof(char) * len; const unsigned int mem_size_int2 = sizeof(int2) * len; // allocate device memory char* d_data; cutilSafeCall(hipMalloc((void**) &d_data, mem_size)); // copy host memory to device cutilSafeCall(hipMemcpy(d_data, data, mem_size, hipMemcpyHostToDevice) ); // allocate device memory for int2 version int2* d_data_int2; cutilSafeCall(hipMalloc((void**) &d_data_int2, mem_size_int2)); // copy host memory to device cutilSafeCall(hipMemcpy(d_data_int2, data_int2, mem_size_int2, hipMemcpyHostToDevice) ); // setup execution parameters dim3 grid(1, 1, 1); dim3 threads(num_threads, 1, 1); dim3 threads2(len, 1, 1); // more threads needed fir separate int2 version // execute the kernel hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads) , 0, 0, (int*) d_data); hipLaunchKernelGGL(( kernel2), dim3(grid), dim3(threads2) , 0, 0, d_data_int2); // check if kernel execution generated and error cutilCheckMsg("Kernel execution failed"); // compute reference solutions char* reference = (char*) malloc(mem_size); computeGold(reference, data, len); int2* reference2 = (int2*) malloc(mem_size_int2); computeGold2(reference2, data_int2, len); // copy results from device to host cutilSafeCall(hipMemcpy(data, d_data, mem_size, hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(data_int2, d_data_int2, mem_size_int2, hipMemcpyDeviceToHost)); // check result bool success = true; for(unsigned int i = 0; i < len; i++ ) { if( reference[i] != data[i] || reference2[i].x != data_int2[i].x || reference2[i].y != data_int2[i].y) success = false; } printf("Test %s\n", success ? "PASSED" : "FAILED"); // cleanup memory cutilSafeCall(hipFree(d_data)); cutilSafeCall(hipFree(d_data_int2)); free(reference); free(reference2); int N=10; size_t size = N * sizeof(float); float* d_A; hipMalloc((void**)&d_A, size); float* d_B; hipMalloc((void**)&d_B, size); float* d_C; hipMalloc((void**)&d_C, size); float h_A_v =3.0; float h_B_v =4.0; //float h_C_v =0; float* h_A; float* h_B; // float* h_C; h_A = &h_A_v; h_B = &h_B_v; //h_C = &h_C_v; hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); // Invoke kernel int threadsPerBlock = 256; // int blocksPerGrid = (N + threadsPerBlock 1) / threadsPerBlock; hipLaunchKernelGGL(( simpleAdd), dim3(1), dim3(N), 0, 0, d_A, d_B, d_C); // Copy result from device memory to host memory // h_C contains the result in host memory hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); // Free device memory hipFree(d_A); hipFree(d_B); hipFree(d_C); hipDeviceReset(); }
9a8fbaceb78f835d90f55ab66fcb7a4430cdae79.cu
/* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ /* Example of integrating CUDA functions into an existing * application / framework. * Host part of the device code. * Compiled with Cuda compiler. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil_inline.h> //#include <shrUtils.h> // includes, kernels #include <cppIntegration_kernel.cu> // //////////////////////////////////////////////////////////////////////////////// // declaration, forward extern "C" void computeGold(char* reference, char* idata, const unsigned int len); extern "C" void computeGold2(int2* reference, int2* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// //! Entry point for Cuda functionality on host side //! @param argc command line argument count //! @param argv command line arguments //! @param data data to process on the device //! @param len len of \a data //////////////////////////////////////////////////////////////////////////////// extern "C" void runTest(const int argc, const char** argv, char* data, int2* data_int2, unsigned int len, float* h_C) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, (char**)argv); else cudaSetDevice( cutGetMaxGflopsDeviceId() ); const unsigned int num_threads = len / 4; cutilCondition(0 == (len % 4)); const unsigned int mem_size = sizeof(char) * len; const unsigned int mem_size_int2 = sizeof(int2) * len; // allocate device memory char* d_data; cutilSafeCall(cudaMalloc((void**) &d_data, mem_size)); // copy host memory to device cutilSafeCall(cudaMemcpy(d_data, data, mem_size, cudaMemcpyHostToDevice) ); // allocate device memory for int2 version int2* d_data_int2; cutilSafeCall(cudaMalloc((void**) &d_data_int2, mem_size_int2)); // copy host memory to device cutilSafeCall(cudaMemcpy(d_data_int2, data_int2, mem_size_int2, cudaMemcpyHostToDevice) ); // setup execution parameters dim3 grid(1, 1, 1); dim3 threads(num_threads, 1, 1); dim3 threads2(len, 1, 1); // more threads needed fir separate int2 version // execute the kernel kernel<<< grid, threads >>>((int*) d_data); kernel2<<< grid, threads2 >>>(d_data_int2); // check if kernel execution generated and error cutilCheckMsg("Kernel execution failed"); // compute reference solutions char* reference = (char*) malloc(mem_size); computeGold(reference, data, len); int2* reference2 = (int2*) malloc(mem_size_int2); computeGold2(reference2, data_int2, len); // copy results from device to host cutilSafeCall(cudaMemcpy(data, d_data, mem_size, cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(data_int2, d_data_int2, mem_size_int2, cudaMemcpyDeviceToHost)); // check result bool success = true; for(unsigned int i = 0; i < len; i++ ) { if( reference[i] != data[i] || reference2[i].x != data_int2[i].x || reference2[i].y != data_int2[i].y) success = false; } printf("Test %s\n", success ? "PASSED" : "FAILED"); // cleanup memory cutilSafeCall(cudaFree(d_data)); cutilSafeCall(cudaFree(d_data_int2)); free(reference); free(reference2); int N=10; size_t size = N * sizeof(float); float* d_A; cudaMalloc((void**)&d_A, size); float* d_B; cudaMalloc((void**)&d_B, size); float* d_C; cudaMalloc((void**)&d_C, size); float h_A_v =3.0; float h_B_v =4.0; //float h_C_v =0; float* h_A; float* h_B; // float* h_C; h_A = &h_A_v; h_B = &h_B_v; //h_C = &h_C_v; cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); // Invoke kernel int threadsPerBlock = 256; // int blocksPerGrid = (N + threadsPerBlock – 1) / threadsPerBlock; simpleAdd<<<1, N>>>(d_A, d_B, d_C); // Copy result from device memory to host memory // h_C contains the result in host memory cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaThreadExit(); }
9501dd20988008f47381c3f0056e6e3353babe38.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "updateBiasKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *dZ = NULL; hipMalloc(&dZ, XSIZE*YSIZE); float *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int cols = YSIZE; int row = 1; float learning_rate = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( updateBiasKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dZ,b,cols,row,learning_rate); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( updateBiasKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dZ,b,cols,row,learning_rate); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( updateBiasKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dZ,b,cols,row,learning_rate); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9501dd20988008f47381c3f0056e6e3353babe38.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "updateBiasKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *dZ = NULL; cudaMalloc(&dZ, XSIZE*YSIZE); float *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int cols = YSIZE; int row = 1; float learning_rate = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); updateBiasKernel<<<gridBlock,threadBlock>>>(dZ,b,cols,row,learning_rate); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { updateBiasKernel<<<gridBlock,threadBlock>>>(dZ,b,cols,row,learning_rate); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { updateBiasKernel<<<gridBlock,threadBlock>>>(dZ,b,cols,row,learning_rate); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
84e6e8f50baca159c16fdfdc4ef25253f24862ed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <cutil.h> #include "cuda002_kernel.cu" int main(int argc, char** argv) { CUT_DEVICE_INIT(argc, argv); unsigned int timer = 0; CUT_SAFE_CALL(cutCreateTimer(&timer)); CUT_SAFE_CALL(cutStartTimer(timer)); float* h_idata = (float*) malloc(sizeof(float) * 100); for(int i = 0; i < 100; i++) { h_idata[i] = i; } float* d_idata; CUDA_SAFE_CALL(hipMalloc((void**) &d_idata, sizeof(float) * 100)); CUDA_SAFE_CALL(hipMemcpy(d_idata, h_idata, sizeof(float) * 100, hipMemcpyHostToDevice)); float* d_odata; CUDA_SAFE_CALL(hipMalloc((void**) &d_odata, sizeof(float) * 100)); dim3 grid(1, 1, 1); dim3 threads(100, 1, 1); hipLaunchKernelGGL(( cuda002Kernel), dim3(grid), dim3(threads), sizeof(float) * 100 , 0, d_idata, d_odata); float* h_odata = (float*) malloc(sizeof(float) * 100); CUDA_SAFE_CALL(hipMemcpy(h_odata, d_odata, sizeof(float) * 100, hipMemcpyDeviceToHost)); printf("input data, output data\n"); for(int i = 0; i < 100; i++) { printf("%f, %f\n", h_idata[i], h_odata[i]); } CUT_SAFE_CALL(cutStopTimer(timer)); printf("Processing time: %f(ms)\n", cutGetTimerValue(timer)); CUT_SAFE_CALL(cutDeleteTimer(timer)); free(h_idata); free(h_odata); CUDA_SAFE_CALL(hipFree(d_idata)); CUDA_SAFE_CALL(hipFree(d_odata)); CUT_EXIT(argc, argv); }
84e6e8f50baca159c16fdfdc4ef25253f24862ed.cu
#include <stdio.h> #include <cutil.h> #include "cuda002_kernel.cu" int main(int argc, char** argv) { CUT_DEVICE_INIT(argc, argv); unsigned int timer = 0; CUT_SAFE_CALL(cutCreateTimer(&timer)); CUT_SAFE_CALL(cutStartTimer(timer)); float* h_idata = (float*) malloc(sizeof(float) * 100); for(int i = 0; i < 100; i++) { h_idata[i] = i; } float* d_idata; CUDA_SAFE_CALL(cudaMalloc((void**) &d_idata, sizeof(float) * 100)); CUDA_SAFE_CALL(cudaMemcpy(d_idata, h_idata, sizeof(float) * 100, cudaMemcpyHostToDevice)); float* d_odata; CUDA_SAFE_CALL(cudaMalloc((void**) &d_odata, sizeof(float) * 100)); dim3 grid(1, 1, 1); dim3 threads(100, 1, 1); cuda002Kernel<<< grid, threads, sizeof(float) * 100 >>>(d_idata, d_odata); float* h_odata = (float*) malloc(sizeof(float) * 100); CUDA_SAFE_CALL(cudaMemcpy(h_odata, d_odata, sizeof(float) * 100, cudaMemcpyDeviceToHost)); printf("input data, output data\n"); for(int i = 0; i < 100; i++) { printf("%f, %f\n", h_idata[i], h_odata[i]); } CUT_SAFE_CALL(cutStopTimer(timer)); printf("Processing time: %f(ms)\n", cutGetTimerValue(timer)); CUT_SAFE_CALL(cutDeleteTimer(timer)); free(h_idata); free(h_odata); CUDA_SAFE_CALL(cudaFree(d_idata)); CUDA_SAFE_CALL(cudaFree(d_odata)); CUT_EXIT(argc, argv); }
6245bdedcf1710817c5b7c30b95d424f5b8fdea5.hip
// !!! This is a file automatically generated by hipify!!! #include "cupoch/visualization/shader/normal_shader.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/visualization/shader/shader.h" #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f* vertices, const Eigen::Vector3f* vertex_normals, const int* triangles, const Eigen::Vector3f* triangle_normals, RenderOption::MeshShadeOption shade_option) : vertices_(vertices), vertex_normals_(vertex_normals), triangles_(triangles), triangle_normals_(triangle_normals), shade_option_(shade_option) {}; const Eigen::Vector3f* vertices_; const Eigen::Vector3f* vertex_normals_; const int* triangles_; const Eigen::Vector3f* triangle_normals_; const RenderOption::MeshShadeOption shade_option_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t k) const { int i = k / 3; int vi = triangles_[k]; const auto &vertex = vertices_[vi]; if (shade_option_ == RenderOption::MeshShadeOption::FlatShade) { return thrust::make_tuple(vertex, triangle_normals_[i]); } else { return thrust::make_tuple(vertex, vertex_normals_[vi]); } } }; } bool NormalShader::Compile() { if (CompileShaders(normal_vertex_shader, NULL, normal_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_normal_ = glGetAttribLocation(program_, "vertex_normal"); MVP_ = glGetUniformLocation(program_, "MVP"); V_ = glGetUniformLocation(program_, "V"); M_ = glGetUniformLocation(program_, "M"); return true; } void NormalShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool NormalShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, hipGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_normal_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, hipGraphicsMapFlagsNone)); Eigen::Vector3f* raw_points_ptr; Eigen::Vector3f* raw_normals_ptr; size_t n_bytes; cudaSafeCall(hipGraphicsMapResources(2, cuda_graphics_resources_)); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(2); bound_ = true; return true; } bool NormalShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data()); glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_normal_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_normal_); return true; } void NormalShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[1])); } glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_normal_buffer_); bound_ = false; } } bool NormalShaderForPointCloud::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPointSize(GLfloat(option.point_size_)); return true; } bool NormalShaderForPointCloud::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } if (pointcloud.HasNormals() == false) { PrintShaderWarning("Binding failed with pointcloud with no normals."); return false; } thrust::copy(pointcloud.points_.begin(), pointcloud.points_.end(), points); thrust::copy(pointcloud.normals_.begin(), pointcloud.normals_.end(), normals); draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(pointcloud.points_.size()); return true; } size_t NormalShaderForPointCloud::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size(); } bool NormalShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool NormalShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } if (mesh.HasTriangleNormals() == false || mesh.HasVertexNormals() == false) { PrintShaderWarning("Binding failed because mesh has no normals."); PrintShaderWarning("Call ComputeVertexNormals() before binding."); return false; } copy_trianglemesh_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()), thrust::raw_pointer_cast(mesh.vertex_normals_.data()), (int*)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.triangle_normals_.data()), option.mesh_shade_option_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, normals), func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t NormalShaderForTriangleMesh::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; }
6245bdedcf1710817c5b7c30b95d424f5b8fdea5.cu
#include "cupoch/visualization/shader/normal_shader.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/visualization/shader/shader.h" #include <cuda_runtime.h> #include <cuda_gl_interop.h> using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f* vertices, const Eigen::Vector3f* vertex_normals, const int* triangles, const Eigen::Vector3f* triangle_normals, RenderOption::MeshShadeOption shade_option) : vertices_(vertices), vertex_normals_(vertex_normals), triangles_(triangles), triangle_normals_(triangle_normals), shade_option_(shade_option) {}; const Eigen::Vector3f* vertices_; const Eigen::Vector3f* vertex_normals_; const int* triangles_; const Eigen::Vector3f* triangle_normals_; const RenderOption::MeshShadeOption shade_option_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t k) const { int i = k / 3; int vi = triangles_[k]; const auto &vertex = vertices_[vi]; if (shade_option_ == RenderOption::MeshShadeOption::FlatShade) { return thrust::make_tuple(vertex, triangle_normals_[i]); } else { return thrust::make_tuple(vertex, vertex_normals_[vi]); } } }; } bool NormalShader::Compile() { if (CompileShaders(normal_vertex_shader, NULL, normal_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_normal_ = glGetAttribLocation(program_, "vertex_normal"); MVP_ = glGetUniformLocation(program_, "MVP"); V_ = glGetUniformLocation(program_, "V"); M_ = glGetUniformLocation(program_, "M"); return true; } void NormalShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool NormalShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_normal_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, cudaGraphicsMapFlagsNone)); Eigen::Vector3f* raw_points_ptr; Eigen::Vector3f* raw_normals_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(2, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(2); bound_ = true; return true; } bool NormalShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data()); glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_normal_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_normal_); return true; } void NormalShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[1])); } glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_normal_buffer_); bound_ = false; } } bool NormalShaderForPointCloud::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPointSize(GLfloat(option.point_size_)); return true; } bool NormalShaderForPointCloud::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } if (pointcloud.HasNormals() == false) { PrintShaderWarning("Binding failed with pointcloud with no normals."); return false; } thrust::copy(pointcloud.points_.begin(), pointcloud.points_.end(), points); thrust::copy(pointcloud.normals_.begin(), pointcloud.normals_.end(), normals); draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(pointcloud.points_.size()); return true; } size_t NormalShaderForPointCloud::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size(); } bool NormalShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool NormalShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } if (mesh.HasTriangleNormals() == false || mesh.HasVertexNormals() == false) { PrintShaderWarning("Binding failed because mesh has no normals."); PrintShaderWarning("Call ComputeVertexNormals() before binding."); return false; } copy_trianglemesh_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()), thrust::raw_pointer_cast(mesh.vertex_normals_.data()), (int*)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.triangle_normals_.data()), option.mesh_shade_option_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, normals), func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t NormalShaderForTriangleMesh::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; }
28d5738e2365b600029ed21c78f3e0398aca3f69.hip
// !!! This is a file automatically generated by hipify!!! #include "funset.hpp" #include <iostream> #include <algorithm> #include <memory> #include <vector> #include <hip/hip_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <device_launch_parameters.h> #include "common.hpp" /* __global__: ;;,3.2 ;void;, ;, gridblock,(<<< >>>); a kernel,(GPUCUDAkernel( ),__global__); */ __global__ static void calculate_histogram(const unsigned char* data, int length, unsigned int* hist) { /* __shared__: __shared____device__ blockblock block__shared____constant__ __shared__extern __shared__CUDA C __shared__CUDA C */ // clear out the accumulation buffer called temp since we are launched with // 256 threads, it is easy to clear that memory with one write per thread __shared__ unsigned int temp[256]; // temp[threadIdx.x] = 0; /* __syncthreads: CUDA __syncthreads() __syncthreads();block(shared memory)(kernel __syncthreads())clock() clock() __syncthreads()block threadblock thread */ __syncthreads(); /* gridDim: ,,, ,,. dim3 blockDim: ,block.dim3, block;,, ; blockIdx: ,; threadblockgrid,blockIdx.x [0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3, blockgrid; threadIdx: ,; threadblock;threadIdx.x, threadIdx.y,threadIdx.z;uint3 ,threadblock */ // calculate the starting index and the offset to the next block that each thread will be processing int i = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while (i < length) { /* atomicAdd: , addr(atomic function)3264 read-modify-write atomicAdd(addr,y) addryaddr */ atomicAdd(&temp[data[i]], 1); i += stride; } // sync the data from the above writes to shared memory then add the shared memory values to the values from // the other thread blocks using global memory atomic adds same as before, since we have 256 threads, // updating the global histogram is just one write per thread! __syncthreads(); // atomicAdd(&(hist[threadIdx.x]), temp[threadIdx.x]); } int calculate_histogram_gpu(const unsigned char* data, int length, unsigned int* hist, unsigned int& value, float* elapsed_time) { /* hipEvent_t: CUDA event types,, CUDA,GPU ,CUDAGPU,CUDA GPU, */ hipEvent_t start, stop; // hipEventCreate: , hipEventCreate(&start); hipEventCreate(&stop); // hipEventRecord: ,,start hipEventRecord(start, 0); unsigned char* dev_buffer{ nullptr }; unsigned int* dev_hist{ nullptr }; // hipMalloc: hipMalloc(&dev_buffer, length); hipMalloc(&dev_hist, 256 * sizeof(unsigned int)); /* hipMemcpy: ,: (1). hipMemcpyHostToHost: (2). hipMemcpyHostToDevice: (3). hipMemcpyDeviceToHost: (4). hipMemcpyDeviceToDevice: (5). hipMemcpyDefault: , (CUDA6.0) cudaMemcpy */ hipMemcpy(dev_buffer, data, length, hipMemcpyHostToDevice); /* hipMemset: ,GPU */ hipMemset(dev_hist, 0, 256 * sizeof(unsigned int)); // hipDeviceProp_t: cuda // kernel launch - 2x the number of mps gave best timing hipDeviceProp_t prop; // hipGetDeviceProperties: GPU hipGetDeviceProperties(&prop, 0); // hipDeviceProp_t::multiProcessorCount: int blocks = prop.multiProcessorCount; fprintf(stderr, "multiProcessorCount: %d\n", blocks); /* <<< >>>: CUDA,, CUDA,, ;, ,, ;; kernel,kernel, GPU,; API,<<<Dg,Db,Ns,S>>> ,Dgdim3,grid .Dg,gridDg.x*Dg.y*Dg.zblock;Db dim3,block.Db, blockDb.x*Db.y*Db.zthread;Nsunsigned int, , (extern __shared__);Ns,0;S cudaStream_t,.S,0. */ // GPU2 calculate_histogram << <blocks * 2, 256 >> >(dev_buffer, length, dev_hist); hipMemcpy(hist, dev_hist, 256 * sizeof(unsigned int), hipMemcpyDeviceToHost); value = 0; for (int i = 0; i < 256; ++i) { value += hist[i]; } // hipFree: cudaMalloc hipFree(dev_buffer); hipFree(dev_hist); // hipEventRecord: ,,stop hipEventRecord(stop, 0); // hipEventSynchronize: ,, hipEventSynchronize(stop); // cudaEventElapseTime: ,, hipEventElapsedTime(elapsed_time, start, stop); // hipEventDestroy: , hipEventDestroy(start); hipEventDestroy(stop); return 0; }
28d5738e2365b600029ed21c78f3e0398aca3f69.cu
#include "funset.hpp" #include <iostream> #include <algorithm> #include <memory> #include <vector> #include <cuda_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <device_launch_parameters.h> #include "common.hpp" /* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在 设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在 设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在 设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符); a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函 数),内核函数必须通过__global__函数类型限定符定义); */ __global__ static void calculate_histogram(const unsigned char* data, int length, unsigned int* hist) { /* __shared__: 变量类型限定符;使用__shared__限定符,或者与__device__限 定符连用,此时声明的变量位于block中的共享存储器空间中,与block具有相同 的生命周期,仅可通过block内的所有线程访问;__shared__和__constant__变量 默认为是静态存储;在__shared__前可以加extern关键字,但表示的是变量大小 由执行参数确定;__shared__变量在声明时不能初始化;可以将CUDA C的关键字 __shared__添加到变量声明中,这将使这个变量驻留在共享内存中;CUDA C编译 器对共享内存中的变量与普通变量将分别采取不同的处理方式 */ // clear out the accumulation buffer called temp since we are launched with // 256 threads, it is easy to clear that memory with one write per thread __shared__ unsigned int temp[256]; // 共享内存缓冲区 temp[threadIdx.x] = 0; /* __syncthreads: 对线程块中的线程进行同步;CUDA架构将确保,除非线程块 中的每个线程都执行了__syncthreads(),否则没有任何线程能执行 __syncthreads()之后的指令;在同一个block中的线程通过共享存储器(shared memory)交换数据,并通过栅栏同步(可以在kernel函数中需要同步的位置调用 __syncthreads()函数)保证线程间能够正确地共享数据;使用clock()函数计时, 在内核函数中要测量的一段代码的开始和结束的位置分别调用一次clock()函数, 并将结果记录下来。由于调用__syncthreads()函数后,一个block中的所有 thread需要的时间是相同的,因此只需要记录每个block执行需要的时间就行了, 而不需要记录每个thread的时间 */ __syncthreads(); /* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个 变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量. 为dim3类型; blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含 了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数, 保存的是线程块中每一维的线程数量; blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用 于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是 [0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型, 包含了一个block在grid中各个维度上的索引信息; threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于 说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果 是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类 型,包含了一个thread在block中各个维度的索引信息 */ // calculate the starting index and the offset to the next block that each thread will be processing int i = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while (i < length) { /* atomicAdd: 原子操作,底层硬件将确保当执行这些原子操作时,其 它任何线程都不会读取或写入地址addr上的值。原子函数(atomic function)对位于全局或共享存储器的一个32位或64位字执行 read-modify-write的原子操作。也就是说,当多个线程同时访问全局或 共享存储器的同一位置时,保证每个线程能够实现对共享可写数据的互 斥操作:在一个操作完成之前,其它任何线程都无法访问此地址。之所 以将这一过程称为原子操作,是因为每个线程的操作都不会影响到其它 线程。换句话说,原子操作能够保证对一个地址的当前操作完成之前, 其它线程都不能访问这个地址。 atomicAdd(addr,y):将生成一个原子的操作序列,这个操作序列包括读 取地址addr处的值,将y增加到这个值,以及将结果保存回地址addr。 */ atomicAdd(&temp[data[i]], 1); i += stride; } // sync the data from the above writes to shared memory then add the shared memory values to the values from // the other thread blocks using global memory atomic adds same as before, since we have 256 threads, // updating the global histogram is just one write per thread! __syncthreads(); // 将每个线程块的直方图合并为单个最终的直方图 atomicAdd(&(hist[threadIdx.x]), temp[threadIdx.x]); } int calculate_histogram_gpu(const unsigned char* data, int length, unsigned int* hist, unsigned int& value, float* elapsed_time) { /* cudaEvent_t: CUDA event types,结构体类型, CUDA事件,用于测量GPU在某 个任务上花费的时间,CUDA中的事件本质上是一个GPU时间戳,由于CUDA事件是在 GPU上实现的,因此它们不适于对同时包含设备代码和主机代码的混合代码计时 */ cudaEvent_t start, stop; // cudaEventCreate: 创建一个事件对象,异步启动 cudaEventCreate(&start); cudaEventCreate(&stop); // cudaEventRecord: 记录一个事件,异步启动,start记录起始时间 cudaEventRecord(start, 0); unsigned char* dev_buffer{ nullptr }; unsigned int* dev_hist{ nullptr }; // cudaMalloc: 在设备端分配内存 cudaMalloc(&dev_buffer, length); cudaMalloc(&dev_hist, 256 * sizeof(unsigned int)); /* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一: (1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端 (2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端 (3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端 (4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端 (5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持 统一虚拟寻址(CUDA6.0及以上版本) cudaMemcpy函数对于主机是同步的 */ cudaMemcpy(dev_buffer, data, length, cudaMemcpyHostToDevice); /* cudaMemset: 存储器初始化函数,在GPU内存上执行。用指定的值初始化或设置 设备内存 */ cudaMemset(dev_hist, 0, 256 * sizeof(unsigned int)); // cudaDeviceProp: cuda设备属性结构体 // kernel launch - 2x the number of mps gave best timing cudaDeviceProp prop; // cudaGetDeviceProperties: 获取GPU设备相关信息 cudaGetDeviceProperties(&prop, 0); // cudaDeviceProp::multiProcessorCount: 设备上多处理器的数量 int blocks = prop.multiProcessorCount; fprintf(stderr, "multiProcessorCount: %d\n", blocks); /* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参 数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何 组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何 启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函 数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须 先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在 GPU计算时会发生错误,例如越界等; 使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>> 的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个 维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是 一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个 block中将有Db.x*Db.y*Db.z个thread;Ns是一个unsigned int型变量,指定各块为此调 用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组 (extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为 cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */ // 当线程块的数量为GPU中处理器数量的2倍时,将达到最优性能 calculate_histogram << <blocks * 2, 256 >> >(dev_buffer, length, dev_hist); cudaMemcpy(hist, dev_hist, 256 * sizeof(unsigned int), cudaMemcpyDeviceToHost); value = 0; for (int i = 0; i < 256; ++i) { value += hist[i]; } // cudaFree: 释放设备上由cudaMalloc函数分配的内存 cudaFree(dev_buffer); cudaFree(dev_hist); // cudaEventRecord: 记录一个事件,异步启动,stop记录结束时间 cudaEventRecord(stop, 0); // cudaEventSynchronize: 事件同步,等待一个事件完成,异步启动 cudaEventSynchronize(stop); // cudaEventElapseTime: 计算两个事件之间经历的时间,单位为毫秒,异步启动 cudaEventElapsedTime(elapsed_time, start, stop); // cudaEventDestroy: 销毁事件对象,异步启动 cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
64cd8464f9a38163068cdcc622306006d8257a71.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #ifndef _VOLUMEFILTER_KERNEL_CU_ #define _VOLUMEFILTER_KERNEL_CU_ #include <helper_cuda.h> #include <helper_math.h> #include "volumeFilter.h" typedef unsigned int uint; typedef unsigned char uchar; typedef unsigned short ushort; texture<VolumeType, 3, VolumeTypeInfo<VolumeType>::readMode> volumeTexIn; surface<void, 3> volumeTexOut; __constant__ float4 c_filterData[VOLUMEFILTER_MAXWEIGHTS]; __global__ void d_filter_surface3d(int filterSize, float filter_offset, hipExtent volumeSize) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int z = blockIdx.z*blockDim.z + threadIdx.z; if (x >= volumeSize.width || y >= volumeSize.height || z >= volumeSize.depth) { return; } float filtered = 0; float4 basecoord = make_float4(x,y,z,0); for (int i = 0; i < filterSize; i++) { float4 coord = basecoord + c_filterData[i]; filtered += tex3D(volumeTexIn,coord.x,coord.y,coord.z) * c_filterData[i].w; } filtered += filter_offset; VolumeType output = VolumeTypeInfo<VolumeType>::convert(filtered); // surface writes need byte offsets for x! surf3Dwrite(output,volumeTexOut,x * sizeof(VolumeType),y,z); } static int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } extern "C" Volume *VolumeFilter_runFilter(Volume *input, Volume *output0, Volume *output1, int iterations, int numWeights, float4 *weights, float postWeightOffset) { Volume *swap = 0; hipExtent size = input->size; unsigned int dim = 32/sizeof(VolumeType); dim3 blockSize(dim,dim,1); dim3 gridSize(iDivUp(size.width,blockSize.x),iDivUp(size.height,blockSize.y),iDivUp(size.depth,blockSize.z)); // set weights checkCudaErrors(hipMemcpyToSymbol(c_filterData, weights, sizeof(float4)*numWeights)); for (int i = 0; i < iterations; i++) { // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(volumeTexIn, input->content, input->channelDesc)); checkCudaErrors(hipBindSurfaceToArray(volumeTexOut,output0->content)); hipLaunchKernelGGL(( d_filter_surface3d), dim3(gridSize), dim3(blockSize), 0, 0, numWeights,postWeightOffset, size); getLastCudaError("filter kernel failed"); swap = input; input = output0; output0 = swap; if (i == 0) { output0 = output1; } } return input; } #endif
64cd8464f9a38163068cdcc622306006d8257a71.cu
/** * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #ifndef _VOLUMEFILTER_KERNEL_CU_ #define _VOLUMEFILTER_KERNEL_CU_ #include <helper_cuda.h> #include <helper_math.h> #include "volumeFilter.h" typedef unsigned int uint; typedef unsigned char uchar; typedef unsigned short ushort; texture<VolumeType, 3, VolumeTypeInfo<VolumeType>::readMode> volumeTexIn; surface<void, 3> volumeTexOut; __constant__ float4 c_filterData[VOLUMEFILTER_MAXWEIGHTS]; __global__ void d_filter_surface3d(int filterSize, float filter_offset, cudaExtent volumeSize) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int z = blockIdx.z*blockDim.z + threadIdx.z; if (x >= volumeSize.width || y >= volumeSize.height || z >= volumeSize.depth) { return; } float filtered = 0; float4 basecoord = make_float4(x,y,z,0); for (int i = 0; i < filterSize; i++) { float4 coord = basecoord + c_filterData[i]; filtered += tex3D(volumeTexIn,coord.x,coord.y,coord.z) * c_filterData[i].w; } filtered += filter_offset; VolumeType output = VolumeTypeInfo<VolumeType>::convert(filtered); // surface writes need byte offsets for x! surf3Dwrite(output,volumeTexOut,x * sizeof(VolumeType),y,z); } static int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } extern "C" Volume *VolumeFilter_runFilter(Volume *input, Volume *output0, Volume *output1, int iterations, int numWeights, float4 *weights, float postWeightOffset) { Volume *swap = 0; cudaExtent size = input->size; unsigned int dim = 32/sizeof(VolumeType); dim3 blockSize(dim,dim,1); dim3 gridSize(iDivUp(size.width,blockSize.x),iDivUp(size.height,blockSize.y),iDivUp(size.depth,blockSize.z)); // set weights checkCudaErrors(cudaMemcpyToSymbol(c_filterData, weights, sizeof(float4)*numWeights)); for (int i = 0; i < iterations; i++) { // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(volumeTexIn, input->content, input->channelDesc)); checkCudaErrors(cudaBindSurfaceToArray(volumeTexOut,output0->content)); d_filter_surface3d<<<gridSize, blockSize>>>(numWeights,postWeightOffset, size); getLastCudaError("filter kernel failed"); swap = input; input = output0; output0 = swap; if (i == 0) { output0 = output1; } } return input; } #endif
baadce456bd3091cbd0ce77ada9b6fc4166ace85.hip
// !!! This is a file automatically generated by hipify!!! //raytracer.mustafaisik.net// #include "mesh.cuh" #include "memory_handler.cuh" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __host__ Mesh::Mesh() : m_instance() , m_bbox() , m_triangles(nullptr, 0) , m_nodes(nullptr, 0) , m_material_id(0) {} __host__ Mesh::Mesh(const Instance& instance, const BBox& bbox, unsigned int material_id) : m_instance(instance) , m_bbox(bbox) , m_triangles(nullptr, 0) , m_nodes(nullptr, 0) , m_material_id(material_id) {} //This function should be called only when the mesh is wanted be allocated on device memory. //If an instance of a mesh is being created and this function is called then //same set of triangles are allocated again on the device memory, which is unnecessary. __host__ void Mesh::createBaseMesh(const std::vector<Triangle>& mesh_triangles) { std::vector<Triangle> mesh_triangles_copy; if (!m_nodes.first) { std::vector<BVH::BVHNode> nodes; mesh_triangles_copy = mesh_triangles; BVH::buildMeshBvh(mesh_triangles_copy, nodes); auto size = nodes.size(); if (size) { m_nodes.second = size; size_t data_size = m_nodes.second * sizeof(BVH::BVHNode); auto memory = MemoryHandler::Handler().allocateOnDevice(data_size, Memory(Memory::HOST, nodes.begin()._Ptr)); m_nodes.first = static_cast<BVH::BVHNode*>(memory.pointer); } } auto size = mesh_triangles_copy.size(); if (!m_triangles.first && size) { m_triangles.second = size; size_t data_size = m_triangles.second * sizeof(Triangle); auto memory = MemoryHandler::Handler().allocateOnDevice(data_size, Memory(Memory::HOST, mesh_triangles_copy.begin()._Ptr)); m_triangles.first = static_cast<Triangle*>(memory.pointer); } } __host__ void Mesh::createInstanceMesh(const Mesh& base_mesh) { m_triangles = base_mesh.m_triangles; m_nodes = base_mesh.m_nodes; }
baadce456bd3091cbd0ce77ada9b6fc4166ace85.cu
//raytracer.mustafaisik.net// #include "mesh.cuh" #include "memory_handler.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" __host__ Mesh::Mesh() : m_instance() , m_bbox() , m_triangles(nullptr, 0) , m_nodes(nullptr, 0) , m_material_id(0) {} __host__ Mesh::Mesh(const Instance& instance, const BBox& bbox, unsigned int material_id) : m_instance(instance) , m_bbox(bbox) , m_triangles(nullptr, 0) , m_nodes(nullptr, 0) , m_material_id(material_id) {} //This function should be called only when the mesh is wanted be allocated on device memory. //If an instance of a mesh is being created and this function is called then //same set of triangles are allocated again on the device memory, which is unnecessary. __host__ void Mesh::createBaseMesh(const std::vector<Triangle>& mesh_triangles) { std::vector<Triangle> mesh_triangles_copy; if (!m_nodes.first) { std::vector<BVH::BVHNode> nodes; mesh_triangles_copy = mesh_triangles; BVH::buildMeshBvh(mesh_triangles_copy, nodes); auto size = nodes.size(); if (size) { m_nodes.second = size; size_t data_size = m_nodes.second * sizeof(BVH::BVHNode); auto memory = MemoryHandler::Handler().allocateOnDevice(data_size, Memory(Memory::HOST, nodes.begin()._Ptr)); m_nodes.first = static_cast<BVH::BVHNode*>(memory.pointer); } } auto size = mesh_triangles_copy.size(); if (!m_triangles.first && size) { m_triangles.second = size; size_t data_size = m_triangles.second * sizeof(Triangle); auto memory = MemoryHandler::Handler().allocateOnDevice(data_size, Memory(Memory::HOST, mesh_triangles_copy.begin()._Ptr)); m_triangles.first = static_cast<Triangle*>(memory.pointer); } } __host__ void Mesh::createInstanceMesh(const Mesh& base_mesh) { m_triangles = base_mesh.m_triangles; m_nodes = base_mesh.m_nodes; }
f8ef1a1969146b290ad06c5e8a9788c9685d22ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "saber/funcs/impl/cuda/saber_fake_quantize_abs_max.h" #include "hip/hip_fp16.h" #include "saber/funcs/impl/cuda/cudnn_helper.h" namespace anakin { namespace saber { template <> SaberStatus SaberFakeQuantizeAbsMax<NV, AK_FLOAT>::\ create(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, FakeQuantizeAbsMaxParam<NV>& param, Context<NV>& ctx) { if (&ctx != this->_ctx) { if (_handle != NULL) { CUDNN_CHECK(cudnnDestroy(_handle)); } this->_ctx = &ctx; hipStream_t cuda_stream; cuda_stream = ctx.get_compute_stream(); CUDNN_CHECK(cudnnCreate(&_handle)); CUDNN_CHECK(cudnnSetStream(_handle, cuda_stream)); } int input_num = inputs[0]->num(); int input_channel = inputs[0]->channel(); int input_height = inputs[0]->height(); int input_width = inputs[0]->width(); Shape in_stride = inputs[0]->get_stride(); Shape max_abs_stride = std::vector<int>{1, 1, 1, 1}; int dim_a[] = {input_num, input_channel, input_height, input_width}; int dim_b[] = {1, 1, 1, 1}; cudnn::setTensorNdDesc<float >(&_input_descs, inputs[0]->dims(), dim_a, &in_stride[0]); cudnn::setTensorNdDesc<float>(&_output_descs, _max_abs.dims(), dim_b, &max_abs_stride[0]); cudnn::setReduceTensorDesc<OpDataType >(&_reduce_tensor_descs, CUDNN_REDUCE_TENSOR_AMAX, CUDNN_PROPAGATE_NAN, CUDNN_REDUCE_TENSOR_NO_INDICES, CUDNN_64BIT_INDICES); // Get fastest implement of cudnn // set up algo and workspace size size_t workspace_size = 0; CUDNN_CHECK(cudnnGetReductionWorkspaceSize( _handle, _reduce_tensor_descs, _input_descs, _output_descs, &workspace_size)); if (workspace_size > _workspaceSizeInBytes) { _workspaceSizeInBytes = workspace_size; if (_workspace != NULL) { hipFree(_workspace); } hipMalloc(&_workspace, _workspaceSizeInBytes); } size_t indices_size = 0; CUDNN_CHECK(cudnnGetReductionIndicesSize(_handle, _reduce_tensor_descs, _input_descs, _output_descs, &indices_size)); if (indices_size > _indices_size) { _indices_size = indices_size; if (_indices != NULL) { hipFree(_indices); } hipMalloc(&_indices, _indices_size); } return SaberSuccess; } template <> SaberStatus SaberFakeQuantizeAbsMax<NV, AK_FLOAT>::\ init(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, FakeQuantizeAbsMaxParam<NV>& param, Context<NV>& ctx) { _workspaceSizeInBytes = 0; _workspace = NULL; _indices = NULL; _indices_size = 0; this->_ctx = &ctx; // ---- get cuda resources ---- hipStream_t cuda_stream; cuda_stream = ctx.get_compute_stream(); CUDNN_CHECK(cudnnCreate(&_handle)); CUDNN_CHECK(cudnnSetStream(_handle, cuda_stream)); int in_channels = inputs[0]->channel(); // ---- create cudnn Descs ---- cudnn::createReduceTensorDesc<OpDataType>(&_reduce_tensor_descs); cudnn::createTensorDesc<OpDataType>(&_input_descs); cudnn::createTensorDesc<OpDataType>(&_output_descs); Shape max_abs_shape = std::vector<int>{1, 1, 1, 1}; _max_abs.reshape(max_abs_shape); return create(inputs, outputs, param, ctx); } template <typename Dtype, typename Ttype> __global__ void ker_fake_quantize_max_abs_fwd(Ttype * out_data, \ const Dtype* in_data, const Dtype scale, const int count) { CUDA_KERNEL_LOOP(tid, count){ out_data[tid] = round(in_data[tid] * scale); //printf("%d, %d\n", tid, (int)out_data[tid]); } } template <DataType OpDtype> SaberStatus SaberFakeQuantizeAbsMax<NV, OpDtype>::dispatch(\ const std::vector<Tensor<NV> *>& inputs, \ std::vector<Tensor<NV> *>& outputs, \ FakeQuantizeAbsMaxParam<NV>& param) { const OpDataType* in_data = (const OpDataType*)inputs[0]->data(); OpDataType* max_abs_data = (OpDataType*) _max_abs.mutable_data(); hipStream_t cuda_stream = this->_ctx->get_compute_stream(); int count = outputs[0]->valid_size(); float alpha = 1.0f; float beta = 0.f; OpDataType cpu_max_abs_data; if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) { cudnnReduceTensor(_handle, _reduce_tensor_descs, _indices, _indices_size, _workspace, _workspaceSizeInBytes, &alpha, _input_descs, in_data, &beta, _output_descs, max_abs_data); hipMemcpyAsync((void*)&cpu_max_abs_data, (void*)max_abs_data, sizeof(OpDataType) * 1, hipMemcpyDeviceToHost, cuda_stream); OpDataType scale = ((1 << (param.bit_length - 1)) - 1) / cpu_max_abs_data; auto out_data = outputs[0]->mutable_data(); //LOG(INFO) <<"gpu max_data" << cpu_max_abs_data; if (param.bit_length == 8) { hipLaunchKernelGGL(( ker_fake_quantize_max_abs_fwd<OpDataType, char>)\ , dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \ (char*)out_data, in_data, \ scale, count); } else if (param.bit_length == 16) { hipLaunchKernelGGL(( ker_fake_quantize_max_abs_fwd<OpDataType, int16_t>)\ , dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \ (int16_t*)out_data, in_data, \ scale, count); } else { LOG(FATAL) << "other bit length has not been supported"; } } return SaberSuccess; } DEFINE_OP_TEMPLATE(SaberFakeQuantizeAbsMax, FakeQuantizeAbsMaxParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberFakeQuantizeAbsMax, FakeQuantizeAbsMaxParam, NV, AK_INT8); } }
f8ef1a1969146b290ad06c5e8a9788c9685d22ab.cu
#include "saber/funcs/impl/cuda/saber_fake_quantize_abs_max.h" #include "cuda_fp16.h" #include "saber/funcs/impl/cuda/cudnn_helper.h" namespace anakin { namespace saber { template <> SaberStatus SaberFakeQuantizeAbsMax<NV, AK_FLOAT>::\ create(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, FakeQuantizeAbsMaxParam<NV>& param, Context<NV>& ctx) { if (&ctx != this->_ctx) { if (_handle != NULL) { CUDNN_CHECK(cudnnDestroy(_handle)); } this->_ctx = &ctx; cudaStream_t cuda_stream; cuda_stream = ctx.get_compute_stream(); CUDNN_CHECK(cudnnCreate(&_handle)); CUDNN_CHECK(cudnnSetStream(_handle, cuda_stream)); } int input_num = inputs[0]->num(); int input_channel = inputs[0]->channel(); int input_height = inputs[0]->height(); int input_width = inputs[0]->width(); Shape in_stride = inputs[0]->get_stride(); Shape max_abs_stride = std::vector<int>{1, 1, 1, 1}; int dim_a[] = {input_num, input_channel, input_height, input_width}; int dim_b[] = {1, 1, 1, 1}; cudnn::setTensorNdDesc<float >(&_input_descs, inputs[0]->dims(), dim_a, &in_stride[0]); cudnn::setTensorNdDesc<float>(&_output_descs, _max_abs.dims(), dim_b, &max_abs_stride[0]); cudnn::setReduceTensorDesc<OpDataType >(&_reduce_tensor_descs, CUDNN_REDUCE_TENSOR_AMAX, CUDNN_PROPAGATE_NAN, CUDNN_REDUCE_TENSOR_NO_INDICES, CUDNN_64BIT_INDICES); // Get fastest implement of cudnn // set up algo and workspace size size_t workspace_size = 0; CUDNN_CHECK(cudnnGetReductionWorkspaceSize( _handle, _reduce_tensor_descs, _input_descs, _output_descs, &workspace_size)); if (workspace_size > _workspaceSizeInBytes) { _workspaceSizeInBytes = workspace_size; if (_workspace != NULL) { cudaFree(_workspace); } cudaMalloc(&_workspace, _workspaceSizeInBytes); } size_t indices_size = 0; CUDNN_CHECK(cudnnGetReductionIndicesSize(_handle, _reduce_tensor_descs, _input_descs, _output_descs, &indices_size)); if (indices_size > _indices_size) { _indices_size = indices_size; if (_indices != NULL) { cudaFree(_indices); } cudaMalloc(&_indices, _indices_size); } return SaberSuccess; } template <> SaberStatus SaberFakeQuantizeAbsMax<NV, AK_FLOAT>::\ init(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, FakeQuantizeAbsMaxParam<NV>& param, Context<NV>& ctx) { _workspaceSizeInBytes = 0; _workspace = NULL; _indices = NULL; _indices_size = 0; this->_ctx = &ctx; // ---- get cuda resources ---- cudaStream_t cuda_stream; cuda_stream = ctx.get_compute_stream(); CUDNN_CHECK(cudnnCreate(&_handle)); CUDNN_CHECK(cudnnSetStream(_handle, cuda_stream)); int in_channels = inputs[0]->channel(); // ---- create cudnn Descs ---- cudnn::createReduceTensorDesc<OpDataType>(&_reduce_tensor_descs); cudnn::createTensorDesc<OpDataType>(&_input_descs); cudnn::createTensorDesc<OpDataType>(&_output_descs); Shape max_abs_shape = std::vector<int>{1, 1, 1, 1}; _max_abs.reshape(max_abs_shape); return create(inputs, outputs, param, ctx); } template <typename Dtype, typename Ttype> __global__ void ker_fake_quantize_max_abs_fwd(Ttype * out_data, \ const Dtype* in_data, const Dtype scale, const int count) { CUDA_KERNEL_LOOP(tid, count){ out_data[tid] = round(in_data[tid] * scale); //printf("%d, %d\n", tid, (int)out_data[tid]); } } template <DataType OpDtype> SaberStatus SaberFakeQuantizeAbsMax<NV, OpDtype>::dispatch(\ const std::vector<Tensor<NV> *>& inputs, \ std::vector<Tensor<NV> *>& outputs, \ FakeQuantizeAbsMaxParam<NV>& param) { const OpDataType* in_data = (const OpDataType*)inputs[0]->data(); OpDataType* max_abs_data = (OpDataType*) _max_abs.mutable_data(); cudaStream_t cuda_stream = this->_ctx->get_compute_stream(); int count = outputs[0]->valid_size(); float alpha = 1.0f; float beta = 0.f; OpDataType cpu_max_abs_data; if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) { cudnnReduceTensor(_handle, _reduce_tensor_descs, _indices, _indices_size, _workspace, _workspaceSizeInBytes, &alpha, _input_descs, in_data, &beta, _output_descs, max_abs_data); cudaMemcpyAsync((void*)&cpu_max_abs_data, (void*)max_abs_data, sizeof(OpDataType) * 1, cudaMemcpyDeviceToHost, cuda_stream); OpDataType scale = ((1 << (param.bit_length - 1)) - 1) / cpu_max_abs_data; auto out_data = outputs[0]->mutable_data(); //LOG(INFO) <<"gpu max_data" << cpu_max_abs_data; if (param.bit_length == 8) { ker_fake_quantize_max_abs_fwd<OpDataType, char>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ (char*)out_data, in_data, \ scale, count); } else if (param.bit_length == 16) { ker_fake_quantize_max_abs_fwd<OpDataType, int16_t>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ (int16_t*)out_data, in_data, \ scale, count); } else { LOG(FATAL) << "other bit length has not been supported"; } } return SaberSuccess; } DEFINE_OP_TEMPLATE(SaberFakeQuantizeAbsMax, FakeQuantizeAbsMaxParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberFakeQuantizeAbsMax, FakeQuantizeAbsMaxParam, NV, AK_INT8); } }
f5b0dba73954af9d7696588e2ab62383f4ecfc59.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include"scrImagePgmPpmPackage.h" #include<omp.h> #define MIN(X,Y) ((X<Y) ? X:Y) __global__ void merging_kernel(unsigned char *in1, unsigned char*in2,unsigned char *out, long w, long h, int lower, int upper) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = (blockDim.y * blockIdx.y + threadIdx.y); int index = y*w+x; out[index] = (in1[index]+in2[index])/2; } void merge_single_gpu(unsigned char *in1, unsigned char*in2,unsigned char *out, long w, long h) { unsigned char *d_in1,*d_in2,*d_out; dim3 blockDim(32,32,1); dim3 gridDim(w/32,h/32,1); int size = w*h*sizeof(unsigned char); hipMalloc(&d_in1, size); hipMalloc(&d_in2, size); hipMalloc(&d_out, size); for(int i=0;i<32;i++) { hipMemcpyAsync(d_in1, in1, size, hipMemcpyHostToDevice); hipMemcpyAsync(d_in2, in2, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( merging_kernel), dim3(gridDim),dim3(blockDim), 0, 0, d_in1,d_in2,d_out,w,h,0,h); hipMemcpyAsync(out, d_out, size, hipMemcpyDeviceToHost); } hipDeviceSynchronize(); hipFree(d_in1); hipFree(d_in2); hipFree(d_out); } void merge_multi_gpu(unsigned char *in1, unsigned char*in2,unsigned char *out, long w, long h) { int noDevices; hipGetDeviceCount(&noDevices); hipStream_t *streams; streams = (hipStream_t*) malloc(sizeof(hipStream_t) * noDevices); #pragma omp parallel num_threads(noDevices) { int block = omp_get_thread_num(); int blockSize = sizeof(unsigned char) * w* (h/noDevices); unsigned char *d_in1,*d_in2,*d_out; long lower = block*(h/noDevices); // Compute Lower long upper = MIN(h, lower+(h/noDevices)); // Compute Upper dim3 blockDim(32,32,1); dim3 gridDim(w/32,(h/noDevices)/32,1); hipSetDevice(block); hipStreamCreate(&streams[block]); printf("\n T[%d] L[%d] U[%d] Gr[%d][%d]",block,lower,upper,gridDim.x,gridDim.y); hipMalloc(&d_in1, blockSize); hipMalloc(&d_in2, blockSize); hipMalloc(&d_out, blockSize); #pragma omp barrier for(int i=0;i<32;i++) { hipMemcpyAsync(d_in1, in1+(lower*w), blockSize, hipMemcpyHostToDevice,streams[block]); hipMemcpyAsync(d_in2, in2+(lower*w), blockSize, hipMemcpyHostToDevice, streams[block]); hipLaunchKernelGGL(( merging_kernel), dim3(gridDim),dim3(blockDim),0,streams[block], d_in1,d_in2,d_out,w,h,lower,upper); hipMemcpyAsync(out+(lower*w), d_out, blockSize, hipMemcpyDeviceToHost,streams[block]); } hipFree(d_in1); hipFree(d_in2); hipFree(d_out); } hipDeviceSynchronize(); } int main(int argc, char*argv[]) { int height=0, width =0; unsigned char*data1,*data2; unsigned char*merged_data; char inputStr1[1024] = {"cat.pgm"}; char inputStr2[1024] = {"dog.pgm"}; char outputPipelineStr[1024] = {"merged_pipeline.pgm"}; get_PgmPpmParams(inputStr1, &height, &width); //getting height and width of the current image hipHostMalloc(&data1,height*width*sizeof(unsigned char)); hipHostMalloc(&data2,height*width*sizeof(unsigned char)); hipHostMalloc(&merged_data,height*width*sizeof(unsigned char)); printf("\n Reading image height and width [%d][%d]\n", height, width); scr_read_pgm( inputStr1 , data1, height, width );//loading an image to "inputimage" scr_read_pgm( inputStr2 , data2, height, width );//loading an image to "inputimage" merge_single_gpu(data1,data2,merged_data, width,height); merge_multi_gpu(data1,data2,merged_data, width,height); scr_write_pgm( outputPipelineStr, merged_data, height, width, "Merged Pipeline" ); //storing the image with the detections if(data1 != NULL) hipHostFree(data1); if(data2 != NULL) hipHostFree(data2); if(merged_data != NULL) hipHostFree(merged_data); printf("\n Done"); return 0; }
f5b0dba73954af9d7696588e2ab62383f4ecfc59.cu
#include<stdio.h> #include<stdlib.h> #include"scrImagePgmPpmPackage.h" #include<omp.h> #define MIN(X,Y) ((X<Y) ? X:Y) __global__ void merging_kernel(unsigned char *in1, unsigned char*in2,unsigned char *out, long w, long h, int lower, int upper) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = (blockDim.y * blockIdx.y + threadIdx.y); int index = y*w+x; out[index] = (in1[index]+in2[index])/2; } void merge_single_gpu(unsigned char *in1, unsigned char*in2,unsigned char *out, long w, long h) { unsigned char *d_in1,*d_in2,*d_out; dim3 blockDim(32,32,1); dim3 gridDim(w/32,h/32,1); int size = w*h*sizeof(unsigned char); cudaMalloc(&d_in1, size); cudaMalloc(&d_in2, size); cudaMalloc(&d_out, size); for(int i=0;i<32;i++) { cudaMemcpyAsync(d_in1, in1, size, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_in2, in2, size, cudaMemcpyHostToDevice); merging_kernel<<<gridDim,blockDim>>>(d_in1,d_in2,d_out,w,h,0,h); cudaMemcpyAsync(out, d_out, size, cudaMemcpyDeviceToHost); } cudaDeviceSynchronize(); cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_out); } void merge_multi_gpu(unsigned char *in1, unsigned char*in2,unsigned char *out, long w, long h) { int noDevices; cudaGetDeviceCount(&noDevices); cudaStream_t *streams; streams = (cudaStream_t*) malloc(sizeof(cudaStream_t) * noDevices); #pragma omp parallel num_threads(noDevices) { int block = omp_get_thread_num(); int blockSize = sizeof(unsigned char) * w* (h/noDevices); unsigned char *d_in1,*d_in2,*d_out; long lower = block*(h/noDevices); // Compute Lower long upper = MIN(h, lower+(h/noDevices)); // Compute Upper dim3 blockDim(32,32,1); dim3 gridDim(w/32,(h/noDevices)/32,1); cudaSetDevice(block); cudaStreamCreate(&streams[block]); printf("\n T[%d] L[%d] U[%d] Gr[%d][%d]",block,lower,upper,gridDim.x,gridDim.y); cudaMalloc(&d_in1, blockSize); cudaMalloc(&d_in2, blockSize); cudaMalloc(&d_out, blockSize); #pragma omp barrier for(int i=0;i<32;i++) { cudaMemcpyAsync(d_in1, in1+(lower*w), blockSize, cudaMemcpyHostToDevice,streams[block]); cudaMemcpyAsync(d_in2, in2+(lower*w), blockSize, cudaMemcpyHostToDevice, streams[block]); merging_kernel<<<gridDim,blockDim,0,streams[block]>>>(d_in1,d_in2,d_out,w,h,lower,upper); cudaMemcpyAsync(out+(lower*w), d_out, blockSize, cudaMemcpyDeviceToHost,streams[block]); } cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_out); } cudaDeviceSynchronize(); } int main(int argc, char*argv[]) { int height=0, width =0; unsigned char*data1,*data2; unsigned char*merged_data; char inputStr1[1024] = {"cat.pgm"}; char inputStr2[1024] = {"dog.pgm"}; char outputPipelineStr[1024] = {"merged_pipeline.pgm"}; get_PgmPpmParams(inputStr1, &height, &width); //getting height and width of the current image cudaMallocHost(&data1,height*width*sizeof(unsigned char)); cudaMallocHost(&data2,height*width*sizeof(unsigned char)); cudaMallocHost(&merged_data,height*width*sizeof(unsigned char)); printf("\n Reading image height and width [%d][%d]\n", height, width); scr_read_pgm( inputStr1 , data1, height, width );//loading an image to "inputimage" scr_read_pgm( inputStr2 , data2, height, width );//loading an image to "inputimage" merge_single_gpu(data1,data2,merged_data, width,height); merge_multi_gpu(data1,data2,merged_data, width,height); scr_write_pgm( outputPipelineStr, merged_data, height, width, "Merged Pipeline" ); //storing the image with the detections if(data1 != NULL) cudaFreeHost(data1); if(data2 != NULL) cudaFreeHost(data2); if(merged_data != NULL) cudaFreeHost(merged_data); printf("\n Done"); return 0; }
5f80caf011f1fa5b04e081113f4082a4e0918cf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "cutil.h" #include "rng.h" #include "reduce.h" #include "test_functions.h" #define PI 3.14159265358979f // p(x) = 1/2 * N(x;-1,0.5^2) + 1/2 * N(x;1.5,0.5^2) __device__ float target_pdf(float x) { return 1.0f / sqrtf(2 * PI) * exp(-(x - 1.5) * (x - 1.5) / 0.5f) + 1.0f / sqrtf(2 * PI) * exp( -(x + 1) * (x + 1) / 0.5f); } // q(x) = N(x;0,1) __device__ float proposal_pdf(float x) { return 1.0f / sqrtf(2 * PI) * exp(-x * x / 2.0f); } float target_pdfh(float x) { return 1.0f / sqrtf(2 * PI) * exp(-(x - 1.5f) * (x - 1.5f) / 0.5f) + 1.0f / sqrtf(2 * PI) * ((float) exp( -(x + 1) * (x + 1) / 0.5f)); } float proposal_pdfh(float x) { return 1.0f / sqrtf(2 * PI) * ((float) exp(-x * x / 2.0f)); } __device__ float phi(float x) { return x * x; } float phih(float x) { return x * x; } __global__ void is(int N, float* d_array, float* d_array_out) { // get thread identifier const int tid = blockDim.x * blockIdx.x + threadIdx.x; // get total number of threads const int tt = blockDim.x * gridDim.x; int i; float w, x; for (i = tid; i < N; i += tt) { x = d_array[i]; w = target_pdf(x) / proposal_pdf(x); d_array_out[i] = phi(x) * w; } } void eg_v1() { unsigned int hTimer; double ctime, gtime; cutCreateTimer(&hTimer); seed_rng(); // int N = 1048576; int N = 16777216; int nb = 64; int nt = 128; float h_sum; float* d_array; float* d_array_out; // float* d_warray; hipMalloc((void **) &d_array, N * sizeof(float)); hipMalloc((void **) &d_array_out, N * sizeof(float)); // hipMalloc((void **) &d_warray, N * sizeof(float)); populate_randn_d(d_array, N); cutResetTimer(hTimer); cutStartTimer(hTimer); hipLaunchKernelGGL(( is), dim3(nb),dim3(nt), 0, 0, N, d_array, d_array_out); // hipDeviceSynchronize(); // multiply(N, d_array, d_array2, d_warray, nb, nt); hipDeviceSynchronize(); reduce(N, d_array_out, h_sum, nb, nt); hipDeviceSynchronize(); cutStopTimer(hTimer); gtime = cutGetTimerValue(hTimer); printf("Time = %f\n", gtime); printf("RESULT = %f\n", h_sum / N); float* array = (float*) malloc(N * sizeof(float)); hipMemcpy(array, d_array, N * sizeof(float), hipMemcpyDeviceToHost); cutResetTimer(hTimer); cutStartTimer(hTimer); double h_sum2 = 0; for (int i = 0; i < N; i++) { float x = array[i]; h_sum2 += phih(x) * target_pdfh(x) / proposal_pdfh(x); } cutStopTimer(hTimer); ctime = cutGetTimerValue(hTimer); printf("Time = %f\n", ctime); printf("RESULT = %f\n", h_sum2 / N); printf("speedup = %f\n", ctime / gtime); kill_rng(); } void eg_v2() { unsigned int hTimer; double ctime, gtime; cutCreateTimer(&hTimer); seed_rng(); int N = 16777216; float h_sum, result; float* d_array; float* d_array_out; hipMalloc((void **) &d_array, N * sizeof(float)); hipMalloc((void **) &d_array_out, N * sizeof(float)); float* array = (float*) malloc(N * sizeof(float)); populate_randn(array, N); hipMemcpy(d_array, array, N * sizeof(float), hipMemcpyHostToDevice); cutResetTimer(hTimer); cutStartTimer(hTimer); hipLaunchKernelGGL(( is), dim3(64),dim3(128), 0, 0, N, d_array, d_array_out); reduce(N, d_array_out, h_sum, 64, 128); cutStopTimer(hTimer); gtime = cutGetTimerValue(hTimer); printf("Time = %f\n", gtime); result = h_sum / N; printf("RESULT = %f\n", result); cutResetTimer(hTimer); cutStartTimer(hTimer); double h_sum2 = 0; for (int i = 0; i < N; i++) { float x = array[i]; h_sum2 += phih(x) * target_pdfh(x) / proposal_pdfh(x); } cutStopTimer(hTimer); ctime = cutGetTimerValue(hTimer); printf("Time = %f\n", ctime); printf("RESULT = %f\n", h_sum2 / N); printf("speedup = %f\n", ctime / gtime); free(array); hipFree(d_array); hipFree(d_array_out); kill_rng(); } int main(int argc, char **argv) { // eg_v1(); eg_v2(); }
5f80caf011f1fa5b04e081113f4082a4e0918cf9.cu
#include <stdio.h> #include "cutil.h" #include "rng.h" #include "reduce.h" #include "test_functions.h" #define PI 3.14159265358979f // p(x) = 1/2 * N(x;-1,0.5^2) + 1/2 * N(x;1.5,0.5^2) __device__ float target_pdf(float x) { return 1.0f / sqrtf(2 * PI) * exp(-(x - 1.5) * (x - 1.5) / 0.5f) + 1.0f / sqrtf(2 * PI) * exp( -(x + 1) * (x + 1) / 0.5f); } // q(x) = N(x;0,1) __device__ float proposal_pdf(float x) { return 1.0f / sqrtf(2 * PI) * exp(-x * x / 2.0f); } float target_pdfh(float x) { return 1.0f / sqrtf(2 * PI) * exp(-(x - 1.5f) * (x - 1.5f) / 0.5f) + 1.0f / sqrtf(2 * PI) * ((float) exp( -(x + 1) * (x + 1) / 0.5f)); } float proposal_pdfh(float x) { return 1.0f / sqrtf(2 * PI) * ((float) exp(-x * x / 2.0f)); } __device__ float phi(float x) { return x * x; } float phih(float x) { return x * x; } __global__ void is(int N, float* d_array, float* d_array_out) { // get thread identifier const int tid = blockDim.x * blockIdx.x + threadIdx.x; // get total number of threads const int tt = blockDim.x * gridDim.x; int i; float w, x; for (i = tid; i < N; i += tt) { x = d_array[i]; w = target_pdf(x) / proposal_pdf(x); d_array_out[i] = phi(x) * w; } } void eg_v1() { unsigned int hTimer; double ctime, gtime; cutCreateTimer(&hTimer); seed_rng(); // int N = 1048576; int N = 16777216; int nb = 64; int nt = 128; float h_sum; float* d_array; float* d_array_out; // float* d_warray; cudaMalloc((void **) &d_array, N * sizeof(float)); cudaMalloc((void **) &d_array_out, N * sizeof(float)); // cudaMalloc((void **) &d_warray, N * sizeof(float)); populate_randn_d(d_array, N); cutResetTimer(hTimer); cutStartTimer(hTimer); is<<<nb,nt>>>(N, d_array, d_array_out); // cudaThreadSynchronize(); // multiply(N, d_array, d_array2, d_warray, nb, nt); cudaThreadSynchronize(); reduce(N, d_array_out, h_sum, nb, nt); cudaThreadSynchronize(); cutStopTimer(hTimer); gtime = cutGetTimerValue(hTimer); printf("Time = %f\n", gtime); printf("RESULT = %f\n", h_sum / N); float* array = (float*) malloc(N * sizeof(float)); cudaMemcpy(array, d_array, N * sizeof(float), cudaMemcpyDeviceToHost); cutResetTimer(hTimer); cutStartTimer(hTimer); double h_sum2 = 0; for (int i = 0; i < N; i++) { float x = array[i]; h_sum2 += phih(x) * target_pdfh(x) / proposal_pdfh(x); } cutStopTimer(hTimer); ctime = cutGetTimerValue(hTimer); printf("Time = %f\n", ctime); printf("RESULT = %f\n", h_sum2 / N); printf("speedup = %f\n", ctime / gtime); kill_rng(); } void eg_v2() { unsigned int hTimer; double ctime, gtime; cutCreateTimer(&hTimer); seed_rng(); int N = 16777216; float h_sum, result; float* d_array; float* d_array_out; cudaMalloc((void **) &d_array, N * sizeof(float)); cudaMalloc((void **) &d_array_out, N * sizeof(float)); float* array = (float*) malloc(N * sizeof(float)); populate_randn(array, N); cudaMemcpy(d_array, array, N * sizeof(float), cudaMemcpyHostToDevice); cutResetTimer(hTimer); cutStartTimer(hTimer); is<<<64,128>>>(N, d_array, d_array_out); reduce(N, d_array_out, h_sum, 64, 128); cutStopTimer(hTimer); gtime = cutGetTimerValue(hTimer); printf("Time = %f\n", gtime); result = h_sum / N; printf("RESULT = %f\n", result); cutResetTimer(hTimer); cutStartTimer(hTimer); double h_sum2 = 0; for (int i = 0; i < N; i++) { float x = array[i]; h_sum2 += phih(x) * target_pdfh(x) / proposal_pdfh(x); } cutStopTimer(hTimer); ctime = cutGetTimerValue(hTimer); printf("Time = %f\n", ctime); printf("RESULT = %f\n", h_sum2 / N); printf("speedup = %f\n", ctime / gtime); free(array); cudaFree(d_array); cudaFree(d_array_out); kill_rng(); } int main(int argc, char **argv) { // eg_v1(); eg_v2(); }
69142f19556f57ed133adbedc4ec1931d2ce0a69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * mcmc_kernel_mv.cu * * Created on: 24-Feb-2009 * Author: alee */ #include "temper.ch" #include "matrix.ch" #include "matrix.h" #include <stdio.h> #include "sharedmem.cuh" #include "test_functions.h" #include "rng.h" __constant__ float args_p[NUM_AP]; template<int D> __global__ void FUNC( metropolis_rw_gpu, TYPE)(int N, float* d_array_init, float* d_array_step, float* d_array_uniform, float* d_array_out, int log) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int nt = blockDim.x * gridDim.x; int j; float* x; float* w; float ratio; SharedMemory<float> smem; float* sdata = smem.getPointer(); float* y = sdata + D * threadIdx.x; x = d_vector_get(d_array_init, D, tid); for (j = tid; j < N; j += nt) { w = d_vector_get(d_array_step, D, j); d_vector_add(x, w, y, D); // Metropolis so q(y,x) = q(x,y) if (log == 0) { ratio = TARGET<D> (y, args_p) / TARGET<D> (x, args_p); } else { ratio = expf(LOG_TARGET<D> (y, args_p) - LOG_TARGET<D> (x, args_p)); } if (d_array_uniform[j] < ratio) { d_vector_set(x, y, D); } d_vector_set(d_vector_get(d_array_out, D, j), x, D); } } template <int D> void FUNC( metropolis_rw, TYPE)(int N, float* d_array_init, float sigma, float* d_array_out, float* h_args_p, int log, int nb, int nt) { hipMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float)); float* d_array_uniform; hipMalloc((void **) &d_array_uniform, N * sizeof(float)); populate_rand_d(d_array_uniform, N); float* d_array_step; hipMalloc((void **) &d_array_step, N * D * sizeof(float)); populate_randn_d(d_array_step, N * D); if (sigma != 1.0) { multiply(N * D, d_array_step, d_array_step, sigma, nb, nt); } FUNC(metropolis_rw_gpu,hipLaunchKernelGGL(( TYPE) < D>) , dim3(nb),dim3(nt),D*nt*sizeof(float), 0, N, d_array_init, d_array_step, d_array_uniform, d_array_out, log); hipFree(d_array_uniform); hipFree(d_array_step); } template <int D> __global__ void FUNC( metropolis_rwpop_step, TYPE)(float* d_array_init, float* d_array_step, float* d_array_uniform, float* d_temps, float* d_array_out, int log) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; float* w; float* x; float t, ratio; SharedMemory<float> smem; float* sdata = smem.getPointer(); float* y = sdata + D * threadIdx.x; t = d_temps[tid]; x = d_vector_get(d_array_init, D, tid); w = d_vector_get(d_array_step, D, tid); d_vector_add(x, w, y, D); // Metropolis so q(y,x) = q(x,y) if (log == 0) { ratio = temper(TARGET<D> (y, args_p), t) / temper(TARGET<D> (x, args_p), t); } else { ratio = expf(LOG_TARGET<D> (y, args_p) * t - LOG_TARGET<D> (x, args_p) * t); } if (d_array_uniform[tid] < ratio) { d_vector_set(d_vector_get(d_array_out, D, tid), y, D); } else { d_vector_set(d_vector_get(d_array_out, D, tid), x, D); } } template <int D> __global__ void FUNC( metropolis_rwpop_step2, TYPE)(float* d_array_init, float* d_array_step, float* d_array_uniform, float* d_temps, float* d_array_out, int log, float* densities) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; float* w; float* x; float t, ratio; SharedMemory<float> smem; float* sdata = smem.getPointer(); float* y = sdata + D * threadIdx.x; t = d_temps[tid]; x = d_vector_get(d_array_init, D, tid); w = d_vector_get(d_array_step, D, tid); d_vector_add(x, w, y, D); // Metropolis so q(y,x) = q(x,y) float nv; if (log == 0) { nv = TARGET<D> (y, args_p); ratio = temper(nv, t) / temper(densities[tid], t); } else { nv = LOG_TARGET<D> (y, args_p); ratio = expf(nv * t - densities[tid] * t); } if (d_array_uniform[tid] < ratio) { densities[tid] = nv; d_vector_set(d_vector_get(d_array_out, D, tid), y, D); } else { d_vector_set(d_vector_get(d_array_out, D, tid), x, D); } } template <int D> __global__ void FUNC( metropolis_rwpop_init, TYPE)(float* d_array_init, int log, float* densities) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; float* x = d_vector_get(d_array_init, D, tid); if (log == 0) { densities[tid] = TARGET<D> (x, args_p); } else { densities[tid] = LOG_TARGET<D> (x, args_p); } } template<int D> __global__ void FUNC( metropolis_rwpop_exchange, TYPE)(float* d_array_values, int type, float* d_temps, float* d_array_uniform, int log) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int tt = blockDim.x * gridDim.x; // if ((type == 1 && tid % 2 == 0) || (type == 0 && tid % 2 == 1)) { if (tid % 2 == type) { int otid = (tid + 1) % tt; float* x = d_vector_get(d_array_values, D, tid); float* y = d_vector_get(d_array_values, D, otid); float t = d_temps[tid]; float t2 = d_temps[otid]; float ratio; if (log) { float ty = LOG_TARGET<D> (y, args_p); float tx = LOG_TARGET<D> (x, args_p); ratio = expf(ty * (t - t2) + tx * (t2 - t)); } else { ratio = temper(TARGET<D> (y, args_p), t) / temper(TARGET<D> (y, args_p), t2) * temper( TARGET<D> (x, args_p), t2) / temper(TARGET<D> (x, args_p), t); } if (d_array_uniform[tid] < ratio) { d_vector_swap(x, y, D); } } } template<int D> __global__ void FUNC( metropolis_rwpop_exchange2, TYPE)(float* d_array_values, int type, float* d_temps, float* d_array_uniform, int log, float* densities) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int tt = blockDim.x * gridDim.x; // if ((type == 1 && tid % 2 == 0) || (type == 0 && tid % 2 == 1)) { if (tid % 2 == type) { int otid = (tid + 1) % tt; float* x = d_vector_get(d_array_values, D, tid); float* y = d_vector_get(d_array_values, D, otid); float t = d_temps[tid]; float t2 = d_temps[otid]; float ratio; float ty = densities[otid]; float tx = densities[tid]; if (log) { ratio = expf(ty * (t - t2) + tx * (t2 - t)); } else { ratio = temper(ty, t - t2) * temper(tx, t2 - t); } if (d_array_uniform[tid] < ratio) { densities[tid] = ty; densities[otid] = tx; d_vector_swap(x, y, D); } } } template<int D> void FUNC( metropolis_rwpop, TYPE)(int N, float* d_array_init, float sigma, float* h_args_p, float* d_temps, float* d_array_out, int log, int nb, int nt) { hipMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float)); int tt = nb * nt; int numSteps = N / tt; int* array_types = (int*) malloc(numSteps * sizeof(int)); populate_randIK(array_types, numSteps, 2); float* d_array_step; hipMalloc((void **) &d_array_step, N * D * sizeof(float)); populate_randn_d(d_array_step, N * D); if (sigma != 1.0) { multiply(N * D, d_array_step, d_array_step, sigma, nb, nt); } float* d_array_uniform1; float* d_array_uniform2; hipMalloc((void **) &d_array_uniform1, N * sizeof(float)); hipMalloc((void **) &d_array_uniform2, N * sizeof(float)); populate_rand_d(d_array_uniform1, N); populate_rand_d(d_array_uniform2, N); float* du1 = d_array_uniform1; float* du2 = d_array_uniform2; float* ds = d_array_step; for (int i = 0; i < numSteps; i++) { // printf("on step %d\n", i); FUNC(metropolis_rwpop_step,hipLaunchKernelGGL(( TYPE)<D>), dim3(nb),dim3(nt),D*nt*sizeof(float), 0, d_array_init, ds, du1, d_temps, d_array_out, log); hipDeviceSynchronize(); FUNC(metropolis_rwpop_exchange,hipLaunchKernelGGL(( TYPE)<D>), dim3(nb),dim3(nt), 0, 0, d_array_out, array_types[i], d_temps, du2, log); hipDeviceSynchronize(); d_array_init = d_array_out; ds += tt * D; du1 += tt; d_array_out += tt * D; du2 += tt; } hipFree(d_array_uniform1); hipFree(d_array_uniform2); hipFree(d_array_step); free(array_types); } template<int D> void FUNC( metropolis_rwpop_marginal, TYPE)(int N, float* d_array_init, float sigma, float* h_args_p, float* d_temps, float* d_array_out, int log, int nb, int nt) { hipMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float)); int tt = nb * nt; int numSteps = N / tt; int* array_types = (int*) malloc(numSteps * sizeof(int)); populate_randIK(array_types, numSteps, 2); float* d_array_step; hipMalloc((void **) &d_array_step, N * D * sizeof(float)); populate_randn_d(d_array_step, N * D); if (sigma != 1.0) { multiply(N * D, d_array_step, d_array_step, sigma, nb, nt); } float* d_array_uniform1; float* d_array_uniform2; hipMalloc((void **) &d_array_uniform1, N * sizeof(float)); hipMalloc((void **) &d_array_uniform2, N * sizeof(float)); populate_rand_d(d_array_uniform1, N); populate_rand_d(d_array_uniform2, N); float* du1 = d_array_uniform1; float* du2 = d_array_uniform2; float* ds = d_array_step; float* d_array_temp; hipMalloc((void**) &d_array_temp, tt * D * sizeof(float)); float* densities; hipMalloc((void**) &densities, tt * sizeof(float)); FUNC( metropolis_rwpop_init,hipLaunchKernelGGL(( TYPE)<D>), dim3(nb),dim3(nt), 0, 0, d_array_init, log, densities); hipDeviceSynchronize(); for (int i = 0; i < numSteps; i++) { // printf("Time %d:\n", i); FUNC(metropolis_rwpop_step2,hipLaunchKernelGGL(( TYPE)<D>), dim3(nb),dim3(nt),D*nt*sizeof(float), 0, d_array_init, ds + i * tt * D, du1 + i * tt, d_temps, d_array_temp, log, densities); hipDeviceSynchronize(); FUNC(metropolis_rwpop_exchange2,hipLaunchKernelGGL(( TYPE)<D>), dim3(nb),dim3(nt), 0, 0, d_array_temp, array_types[i], d_temps, du2 + i * tt, log, densities); hipMemcpy(d_array_init, d_array_temp, tt * D * sizeof(float), hipMemcpyDeviceToDevice); hipMemcpy(vector_get(d_array_out, D, i), vector_get(d_array_temp, D, tt - 1), D * sizeof(float), hipMemcpyDeviceToDevice); } hipFree(d_array_uniform1); hipFree(d_array_uniform2); hipFree(d_array_step); hipFree(d_array_temp); hipFree(densities); free(array_types); }
69142f19556f57ed133adbedc4ec1931d2ce0a69.cu
/* * mcmc_kernel_mv.cu * * Created on: 24-Feb-2009 * Author: alee */ #include "temper.ch" #include "matrix.ch" #include "matrix.h" #include <stdio.h> #include "sharedmem.cuh" #include "test_functions.h" #include "rng.h" __constant__ float args_p[NUM_AP]; template<int D> __global__ void FUNC( metropolis_rw_gpu, TYPE)(int N, float* d_array_init, float* d_array_step, float* d_array_uniform, float* d_array_out, int log) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int nt = blockDim.x * gridDim.x; int j; float* x; float* w; float ratio; SharedMemory<float> smem; float* sdata = smem.getPointer(); float* y = sdata + D * threadIdx.x; x = d_vector_get(d_array_init, D, tid); for (j = tid; j < N; j += nt) { w = d_vector_get(d_array_step, D, j); d_vector_add(x, w, y, D); // Metropolis so q(y,x) = q(x,y) if (log == 0) { ratio = TARGET<D> (y, args_p) / TARGET<D> (x, args_p); } else { ratio = expf(LOG_TARGET<D> (y, args_p) - LOG_TARGET<D> (x, args_p)); } if (d_array_uniform[j] < ratio) { d_vector_set(x, y, D); } d_vector_set(d_vector_get(d_array_out, D, j), x, D); } } template <int D> void FUNC( metropolis_rw, TYPE)(int N, float* d_array_init, float sigma, float* d_array_out, float* h_args_p, int log, int nb, int nt) { cudaMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float)); float* d_array_uniform; cudaMalloc((void **) &d_array_uniform, N * sizeof(float)); populate_rand_d(d_array_uniform, N); float* d_array_step; cudaMalloc((void **) &d_array_step, N * D * sizeof(float)); populate_randn_d(d_array_step, N * D); if (sigma != 1.0) { multiply(N * D, d_array_step, d_array_step, sigma, nb, nt); } FUNC(metropolis_rw_gpu, TYPE) < D> <<<nb,nt,D*nt*sizeof(float)>>>(N, d_array_init, d_array_step, d_array_uniform, d_array_out, log); cudaFree(d_array_uniform); cudaFree(d_array_step); } template <int D> __global__ void FUNC( metropolis_rwpop_step, TYPE)(float* d_array_init, float* d_array_step, float* d_array_uniform, float* d_temps, float* d_array_out, int log) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; float* w; float* x; float t, ratio; SharedMemory<float> smem; float* sdata = smem.getPointer(); float* y = sdata + D * threadIdx.x; t = d_temps[tid]; x = d_vector_get(d_array_init, D, tid); w = d_vector_get(d_array_step, D, tid); d_vector_add(x, w, y, D); // Metropolis so q(y,x) = q(x,y) if (log == 0) { ratio = temper(TARGET<D> (y, args_p), t) / temper(TARGET<D> (x, args_p), t); } else { ratio = expf(LOG_TARGET<D> (y, args_p) * t - LOG_TARGET<D> (x, args_p) * t); } if (d_array_uniform[tid] < ratio) { d_vector_set(d_vector_get(d_array_out, D, tid), y, D); } else { d_vector_set(d_vector_get(d_array_out, D, tid), x, D); } } template <int D> __global__ void FUNC( metropolis_rwpop_step2, TYPE)(float* d_array_init, float* d_array_step, float* d_array_uniform, float* d_temps, float* d_array_out, int log, float* densities) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; float* w; float* x; float t, ratio; SharedMemory<float> smem; float* sdata = smem.getPointer(); float* y = sdata + D * threadIdx.x; t = d_temps[tid]; x = d_vector_get(d_array_init, D, tid); w = d_vector_get(d_array_step, D, tid); d_vector_add(x, w, y, D); // Metropolis so q(y,x) = q(x,y) float nv; if (log == 0) { nv = TARGET<D> (y, args_p); ratio = temper(nv, t) / temper(densities[tid], t); } else { nv = LOG_TARGET<D> (y, args_p); ratio = expf(nv * t - densities[tid] * t); } if (d_array_uniform[tid] < ratio) { densities[tid] = nv; d_vector_set(d_vector_get(d_array_out, D, tid), y, D); } else { d_vector_set(d_vector_get(d_array_out, D, tid), x, D); } } template <int D> __global__ void FUNC( metropolis_rwpop_init, TYPE)(float* d_array_init, int log, float* densities) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; float* x = d_vector_get(d_array_init, D, tid); if (log == 0) { densities[tid] = TARGET<D> (x, args_p); } else { densities[tid] = LOG_TARGET<D> (x, args_p); } } template<int D> __global__ void FUNC( metropolis_rwpop_exchange, TYPE)(float* d_array_values, int type, float* d_temps, float* d_array_uniform, int log) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int tt = blockDim.x * gridDim.x; // if ((type == 1 && tid % 2 == 0) || (type == 0 && tid % 2 == 1)) { if (tid % 2 == type) { int otid = (tid + 1) % tt; float* x = d_vector_get(d_array_values, D, tid); float* y = d_vector_get(d_array_values, D, otid); float t = d_temps[tid]; float t2 = d_temps[otid]; float ratio; if (log) { float ty = LOG_TARGET<D> (y, args_p); float tx = LOG_TARGET<D> (x, args_p); ratio = expf(ty * (t - t2) + tx * (t2 - t)); } else { ratio = temper(TARGET<D> (y, args_p), t) / temper(TARGET<D> (y, args_p), t2) * temper( TARGET<D> (x, args_p), t2) / temper(TARGET<D> (x, args_p), t); } if (d_array_uniform[tid] < ratio) { d_vector_swap(x, y, D); } } } template<int D> __global__ void FUNC( metropolis_rwpop_exchange2, TYPE)(float* d_array_values, int type, float* d_temps, float* d_array_uniform, int log, float* densities) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int tt = blockDim.x * gridDim.x; // if ((type == 1 && tid % 2 == 0) || (type == 0 && tid % 2 == 1)) { if (tid % 2 == type) { int otid = (tid + 1) % tt; float* x = d_vector_get(d_array_values, D, tid); float* y = d_vector_get(d_array_values, D, otid); float t = d_temps[tid]; float t2 = d_temps[otid]; float ratio; float ty = densities[otid]; float tx = densities[tid]; if (log) { ratio = expf(ty * (t - t2) + tx * (t2 - t)); } else { ratio = temper(ty, t - t2) * temper(tx, t2 - t); } if (d_array_uniform[tid] < ratio) { densities[tid] = ty; densities[otid] = tx; d_vector_swap(x, y, D); } } } template<int D> void FUNC( metropolis_rwpop, TYPE)(int N, float* d_array_init, float sigma, float* h_args_p, float* d_temps, float* d_array_out, int log, int nb, int nt) { cudaMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float)); int tt = nb * nt; int numSteps = N / tt; int* array_types = (int*) malloc(numSteps * sizeof(int)); populate_randIK(array_types, numSteps, 2); float* d_array_step; cudaMalloc((void **) &d_array_step, N * D * sizeof(float)); populate_randn_d(d_array_step, N * D); if (sigma != 1.0) { multiply(N * D, d_array_step, d_array_step, sigma, nb, nt); } float* d_array_uniform1; float* d_array_uniform2; cudaMalloc((void **) &d_array_uniform1, N * sizeof(float)); cudaMalloc((void **) &d_array_uniform2, N * sizeof(float)); populate_rand_d(d_array_uniform1, N); populate_rand_d(d_array_uniform2, N); float* du1 = d_array_uniform1; float* du2 = d_array_uniform2; float* ds = d_array_step; for (int i = 0; i < numSteps; i++) { // printf("on step %d\n", i); FUNC(metropolis_rwpop_step, TYPE)<D><<<nb,nt,D*nt*sizeof(float)>>>(d_array_init, ds, du1, d_temps, d_array_out, log); cudaThreadSynchronize(); FUNC(metropolis_rwpop_exchange, TYPE)<D><<<nb,nt>>>(d_array_out, array_types[i], d_temps, du2, log); cudaThreadSynchronize(); d_array_init = d_array_out; ds += tt * D; du1 += tt; d_array_out += tt * D; du2 += tt; } cudaFree(d_array_uniform1); cudaFree(d_array_uniform2); cudaFree(d_array_step); free(array_types); } template<int D> void FUNC( metropolis_rwpop_marginal, TYPE)(int N, float* d_array_init, float sigma, float* h_args_p, float* d_temps, float* d_array_out, int log, int nb, int nt) { cudaMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float)); int tt = nb * nt; int numSteps = N / tt; int* array_types = (int*) malloc(numSteps * sizeof(int)); populate_randIK(array_types, numSteps, 2); float* d_array_step; cudaMalloc((void **) &d_array_step, N * D * sizeof(float)); populate_randn_d(d_array_step, N * D); if (sigma != 1.0) { multiply(N * D, d_array_step, d_array_step, sigma, nb, nt); } float* d_array_uniform1; float* d_array_uniform2; cudaMalloc((void **) &d_array_uniform1, N * sizeof(float)); cudaMalloc((void **) &d_array_uniform2, N * sizeof(float)); populate_rand_d(d_array_uniform1, N); populate_rand_d(d_array_uniform2, N); float* du1 = d_array_uniform1; float* du2 = d_array_uniform2; float* ds = d_array_step; float* d_array_temp; cudaMalloc((void**) &d_array_temp, tt * D * sizeof(float)); float* densities; cudaMalloc((void**) &densities, tt * sizeof(float)); FUNC( metropolis_rwpop_init, TYPE)<D><<<nb,nt>>>(d_array_init, log, densities); cudaThreadSynchronize(); for (int i = 0; i < numSteps; i++) { // printf("Time %d:\n", i); FUNC(metropolis_rwpop_step2, TYPE)<D><<<nb,nt,D*nt*sizeof(float)>>>(d_array_init, ds + i * tt * D, du1 + i * tt, d_temps, d_array_temp, log, densities); cudaThreadSynchronize(); FUNC(metropolis_rwpop_exchange2, TYPE)<D><<<nb,nt>>>(d_array_temp, array_types[i], d_temps, du2 + i * tt, log, densities); cudaMemcpy(d_array_init, d_array_temp, tt * D * sizeof(float), cudaMemcpyDeviceToDevice); cudaMemcpy(vector_get(d_array_out, D, i), vector_get(d_array_temp, D, tt - 1), D * sizeof(float), cudaMemcpyDeviceToDevice); } cudaFree(d_array_uniform1); cudaFree(d_array_uniform2); cudaFree(d_array_step); cudaFree(d_array_temp); cudaFree(densities); free(array_types); }
e99165c749c0979a41ffb40ad71fd32e09199768.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "helper.hpp" __global__ void s2g_gpu_gather_kernel(uint32_t *in, uint32_t *out, int len) { //@@ INSERT KERNEL CODE HERE int out_reg = 0; int y = threadIdx.x + blockDim.x * blockIdx.x; for (int x = 0; x < len; ++x){ int intermediate = outInvariant(in[x]); out_reg += outDependent(intermediate, x, y); } out[y] += out_reg; } static void s2g_cpu_gather(uint32_t *in, uint32_t *out, int len) { for (int outIdx = 0; outIdx < len; ++outIdx) { int out_reg = 0; for (int inIdx = 0; inIdx < len; ++inIdx) { int intermediate = outInvariant(in[inIdx]); out_reg += outDependent(intermediate, inIdx, outIdx); } out[outIdx] += out_reg; } } static void s2g_gpu_gather(uint32_t *in, uint32_t *out, int len) { //@@ INSERT CODE HERE hipLaunchKernelGGL(( s2g_gpu_gather_kernel), dim3(ceil(len/1024.0)), dim3(1024), 0, 0, in, out, len); } static int eval(int inputLength) { uint32_t *deviceInput = nullptr; uint32_t *deviceOutput= nullptr; const std::string conf_info = std::string("gather[len:") + std::to_string(inputLength) + "]"; INFO("Running " << conf_info); auto hostInput = generate_input(inputLength); const size_t byteCount = inputLength * sizeof(uint32_t); timer_start("Allocating GPU memory."); THROW_IF_ERROR(hipMalloc((void **)&deviceInput, byteCount)); THROW_IF_ERROR(hipMalloc((void **)&deviceOutput, byteCount)); timer_stop(); timer_start("Copying input memory to the GPU."); THROW_IF_ERROR(hipMemcpy(deviceInput, hostInput.data(), byteCount, hipMemcpyHostToDevice)); THROW_IF_ERROR(hipMemset(deviceOutput, 0, byteCount)); timer_stop(); ////////////////////////////////////////// // GPU Gather Computation ////////////////////////////////////////// timer_start("Performing GPU Gather computation"); s2g_gpu_gather(deviceInput, deviceOutput, inputLength); timer_stop(); std::vector<uint32_t> hostOutput(inputLength); timer_start("Copying output memory to the CPU"); THROW_IF_ERROR(hipMemcpy(hostOutput.data(), deviceOutput, byteCount, hipMemcpyDeviceToHost)); timer_stop(); auto expected = compute_output(hostInput, inputLength); verify(expected, hostOutput); hipFree(deviceInput); hipFree(deviceOutput); return 0; } TEST_CASE("Gather", "[gather]") { SECTION("[inputSize:1024]") { eval(1024); } SECTION("[inputSize:2048]") { eval(2048); } SECTION("[inputSize:2047]") { eval(2047); } SECTION("[inputSize:2049]") { eval(2049); } SECTION("[inputSize:9101]") { eval(9101); } SECTION("[inputSize:9910]") { eval(9910); } SECTION("[inputSize:8192]") { eval(8192); } SECTION("[inputSize:8193]") { eval(8193); } SECTION("[inputSize:8191]") { eval(8191); } SECTION("[inputSize:16191]") { eval(16191); } }
e99165c749c0979a41ffb40ad71fd32e09199768.cu
#include "helper.hpp" __global__ void s2g_gpu_gather_kernel(uint32_t *in, uint32_t *out, int len) { //@@ INSERT KERNEL CODE HERE int out_reg = 0; int y = threadIdx.x + blockDim.x * blockIdx.x; for (int x = 0; x < len; ++x){ int intermediate = outInvariant(in[x]); out_reg += outDependent(intermediate, x, y); } out[y] += out_reg; } static void s2g_cpu_gather(uint32_t *in, uint32_t *out, int len) { for (int outIdx = 0; outIdx < len; ++outIdx) { int out_reg = 0; for (int inIdx = 0; inIdx < len; ++inIdx) { int intermediate = outInvariant(in[inIdx]); out_reg += outDependent(intermediate, inIdx, outIdx); } out[outIdx] += out_reg; } } static void s2g_gpu_gather(uint32_t *in, uint32_t *out, int len) { //@@ INSERT CODE HERE s2g_gpu_gather_kernel<<<ceil(len/1024.0), 1024>>>(in, out, len); } static int eval(int inputLength) { uint32_t *deviceInput = nullptr; uint32_t *deviceOutput= nullptr; const std::string conf_info = std::string("gather[len:") + std::to_string(inputLength) + "]"; INFO("Running " << conf_info); auto hostInput = generate_input(inputLength); const size_t byteCount = inputLength * sizeof(uint32_t); timer_start("Allocating GPU memory."); THROW_IF_ERROR(cudaMalloc((void **)&deviceInput, byteCount)); THROW_IF_ERROR(cudaMalloc((void **)&deviceOutput, byteCount)); timer_stop(); timer_start("Copying input memory to the GPU."); THROW_IF_ERROR(cudaMemcpy(deviceInput, hostInput.data(), byteCount, cudaMemcpyHostToDevice)); THROW_IF_ERROR(cudaMemset(deviceOutput, 0, byteCount)); timer_stop(); ////////////////////////////////////////// // GPU Gather Computation ////////////////////////////////////////// timer_start("Performing GPU Gather computation"); s2g_gpu_gather(deviceInput, deviceOutput, inputLength); timer_stop(); std::vector<uint32_t> hostOutput(inputLength); timer_start("Copying output memory to the CPU"); THROW_IF_ERROR(cudaMemcpy(hostOutput.data(), deviceOutput, byteCount, cudaMemcpyDeviceToHost)); timer_stop(); auto expected = compute_output(hostInput, inputLength); verify(expected, hostOutput); cudaFree(deviceInput); cudaFree(deviceOutput); return 0; } TEST_CASE("Gather", "[gather]") { SECTION("[inputSize:1024]") { eval(1024); } SECTION("[inputSize:2048]") { eval(2048); } SECTION("[inputSize:2047]") { eval(2047); } SECTION("[inputSize:2049]") { eval(2049); } SECTION("[inputSize:9101]") { eval(9101); } SECTION("[inputSize:9910]") { eval(9910); } SECTION("[inputSize:8192]") { eval(8192); } SECTION("[inputSize:8193]") { eval(8193); } SECTION("[inputSize:8191]") { eval(8191); } SECTION("[inputSize:16191]") { eval(16191); } }
080ae400ff28265de0aa5dc1855d17f0f591c89d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <iomanip> #include <string> #include <cstdlib> #include <stdio.h> #include <cstring> #include <sstream> #include <cmath> #include <stdlib.h> #include <vector> using namespace std; const float eps = 10e-10; /*accuracy*/ const float G = 6.67e-11; /*gravity const*/ const float tau = 0.001; /*time step*/ const float t0 = 0; /*start time*/ const float T = 0.003; /*total time*/ const int N = 1024 * 16; /*number of bodies*/ const int numthreads = 128; /*cuda - threads*/ const int blocksize = N / numthreads ; /*size of block*/ const float tau2 = tau / 2.; /*half time step*/ /* * generate bodies, speed and initial radius * @param{float*} m - mass * @param{float*} r - radius * @param{float*} m - speed */ __host__ void body_gen(float* m, float* r, float* v){ float mas = 0.0; float TEMP = 0.0; // srand(time(0)); for (int i = 0; i < N; ++i){ mas = rand() % 1000000000 + 1000000000; m[i] = mas; for (int j = 0; j < 3; ++j){ TEMP = exp(cos((rand() % 10000))); r[i * 3 + j] = TEMP; } for (int j = 0; j < 3; ++j){ TEMP = exp(cos((rand() % 10000))); v[i * 3 + j] = TEMP; } }/*end for N*/ } /* * squared */ __host__ __device__ inline float pow2(const float x) { return x * x; } /* * generate bodies. if 4 - use inicial condicions, otherwise random * @param{float*} m - mass * @param{float*} r - radius * @param{float*} m - speed */ __host__ void ic_input(float* m, float* r, float* v) { if (N != 4) body_gen(m, r, v); else { float mt = 8810324116.227; r[0 * 3 + 0] = 1.; r[0 * 3 + 1] = 0.; r[0 * 3 + 2] = 0.; v[0 * 3 + 0] = 0.; v[0 * 3 + 1] = 0.9; v[0 * 3 + 2] = 0.; m[0] = mt; r[1 * 3 + 0] = 0.; r[1 * 3 + 1] = 1.; r[1 * 3 + 2] = 0.; v[1 * 3 + 0] = -0.9; v[1 * 3 + 1] = 0.; v[1 * 3 + 2] = 0.; m[1] = mt; r[2 * 3 + 0] = -1.; r[2 * 3 + 1] = 0.; r[2 * 3 + 2] = 0.; v[2 * 3 + 0] = 0.; v[2 * 3 + 1] = -0.9; v[2 * 3 + 2] = 0.; m[2] = mt; r[3 * 3 + 0] = 0.; r[3 * 3 + 1] = -1.; r[3 * 3 + 2] = 0.; v[3 * 3 + 0] = 0.9; v[3 * 3 + 1] = 0.; v[3 * 3 + 2] = 0.; m[3] = mt; } } /*probably not needed*/ __host__ void ic_check(const float* m,const float* r,const float* v) { cout << "Input data check " << endl; cout << "tau = " << tau << endl; cout << "T = " << T << endl; cout << "t0 = " << t0 << endl; cout << "N = " << N << endl; for (int i = 0; i < N; ++i) { cout << "m = " << m[i] << endl; for (int j = 0; j < 3; ++j) cout << "r[" << i << ", " << j << "] = " << r[i * 3 + j] << endl; for (int j = 0; j < 3; ++j) cout << "v[" << i << ", " << j << "] = " << v[i * 3 + j] << endl; } } /* * @param{const float*} m - mass * @param{const float*} r - radius * @param{float*} a - speed */ __global__ void kernel(const float* m, const float* r, float* a) { int id = blockIdx.x * blockDim.x + threadIdx.x; /*number of thread*/ float temp = 0.0; // 2 float r0 = r[id * 3]; // 3 float r1 = r[id * 3 + 1]; // 4 float r2 = r[id * 3 + 2] ; // 5 float a0 = 0, a1 = 0, a2 = 0; // 6 7 8 for (int j = 0; j < N; ++j){ temp = pow2(r0 - r[j * 3]) + pow2(r1 - r[j * 3 + 1]) + pow2(r2 - r[j * 3 + 2]); temp = m[j] / max(temp * sqrtf(temp), eps); a0 += (r0 - r[j * 3]) * temp; a1 += (r1 - r[j * 3 + 1]) * temp; a2 += (r2 - r[j * 3 + 2]) * temp; } a[id * 3] = -a0 * G; a[id * 3 + 1] = -a1 * G; a[id * 3 + 2] = -a2 * G; __syncthreads(); } __host__ void RK(const float* m, float* r, float* v, const float tau) { dim3 threads(numthreads); dim3 blocks(blocksize); float t = t0; float* k1 = new float[3 * N]; // host float* k2 = new float[3 * N]; // host float* r_t = new float[3 * N]; // host float* v_t = new float[3 * N]; // host float* cu_r = new float[3 * N]; // device float* cu_a = new float[3 * N]; // device float* cu_m = new float[N]; // device hipMalloc((void**)& cu_a, N * 3 * sizeof(float)); /*allocate memory for acceleretions*/ hipMalloc((void**)& cu_r, N * 3 * sizeof(float)); /*allocate memory for radiuses*/ hipMalloc((void**)& cu_m, N * sizeof(float)); /*allocate memory for masses*/ hipMemcpy(cu_m, m, N * sizeof(float), hipMemcpyHostToDevice); /*copy masses from host to device*/ do{ t += tau; hipMemcpy(cu_r, r, 3 * N * sizeof(float), hipMemcpyHostToDevice); /*copy radiuses from host to device*/ kernel << < blocks, threads >> > (cu_m, cu_r, cu_a); /*run kernel*/ hipMemcpy(k1, cu_a, 3 * N * sizeof(float), hipMemcpyDeviceToHost); /*copy accelerations to k1 from device to host*/ for (int i = 0; i < N; ++i){ /*first rk step*/ for (int j = 0; j < 3; ++j){ float vij = v[i * 3 + j]; r_t[i * 3 + j] = r[i * 3 + j] + vij * tau; v_t[i * 3 + j] = vij + k1[i * 3 + j] * tau; } } hipMemcpy(cu_r, r_t, 3 * N * sizeof(float), hipMemcpyHostToDevice); /*copy radiuses_t from host to device*/ kernel << < blocks, threads >> > (cu_m, cu_r, cu_a); /*run kernel*/ hipMemcpy(k2, cu_a, 3 * N * sizeof(float), hipMemcpyDeviceToHost); /*copy accelerations to k2 from device to host*/ for (int i = 0; i < N; ++i){ /*second rk step*/ for (int j = 0; j < 3; ++j) r[i * 3 + j] += (v[i * 3 + j] + v_t[i * 3 + j]) * tau2; v[i * 3 + j] += (k1[i * 3 + j] + k2[i * 3 + j]) * tau2; } if (N == 4) /*calculate difference between our and reference solutions in point t = 10*/ if (fabs(t - 10) < 1e-10){ cout << "error = " << \ sqrt(pow2(r[3 * 2] - 2.5723673494326125) + pow2(r[3 * 2 + 1] - 4.2292866972437615e-7) + pow2(r[3 * 2 + 2])) << endl; } } while (t <= T); delete[] k2; delete[] k1; delete[] r_t; delete[] v_t; hipFree(cu_r); hipFree(cu_a); hipFree(cu_m); } int main() { float* m = new float[N]; float* r = new float[3 * N]; float* v = new float[3 * N]; ic_input(m, r, v); hipError_t SD; /*if unable to connect to videocart*/ SD = hipSetDevice(0); if (SD != hipSuccess) { cout << "Could not connect to cuda device." << endl; return (-1); } hipEvent_t start, finish; /*events to count time*/ hipEventCreate(&start); hipEventCreate(&finish); /*create events*/ hipEventRecord(start); hipEventSynchronize(start); /*start time*/ RK(m, r, v, tau); /*calculations*/ hipEventRecord(finish); hipEventSynchronize(finish); /*finish time*/ float dt; /*finish time - start time*/ hipEventElapsedTime(&dt, start, finish); /*count dt*/ cout << "number of bodyes = " << N << endl; cout << "blocksize = " << blocksize << endl; cout << "numthreads = " << numthreads << endl; cout << "time = " << dt / 1000.0 / ( (T - t0) / tau - 1)<< endl; /*one step time*/ hipEventDestroy(start); hipEventDestroy(finish); delete[] m; delete[] r; delete[] v; return 0; }
080ae400ff28265de0aa5dc1855d17f0f591c89d.cu
#include <iostream> #include <fstream> #include <iomanip> #include <string> #include <cstdlib> #include <stdio.h> #include <cstring> #include <sstream> #include <cmath> #include <stdlib.h> #include <vector> using namespace std; const float eps = 10e-10; /*accuracy*/ const float G = 6.67e-11; /*gravity const*/ const float tau = 0.001; /*time step*/ const float t0 = 0; /*start time*/ const float T = 0.003; /*total time*/ const int N = 1024 * 16; /*number of bodies*/ const int numthreads = 128; /*cuda - threads*/ const int blocksize = N / numthreads ; /*size of block*/ const float tau2 = tau / 2.; /*half time step*/ /* * generate bodies, speed and initial radius * @param{float*} m - mass * @param{float*} r - radius * @param{float*} m - speed */ __host__ void body_gen(float* m, float* r, float* v){ float mas = 0.0; float TEMP = 0.0; // srand(time(0)); for (int i = 0; i < N; ++i){ mas = rand() % 1000000000 + 1000000000; m[i] = mas; for (int j = 0; j < 3; ++j){ TEMP = exp(cos((rand() % 10000))); r[i * 3 + j] = TEMP; } for (int j = 0; j < 3; ++j){ TEMP = exp(cos((rand() % 10000))); v[i * 3 + j] = TEMP; } }/*end for N*/ } /* * squared */ __host__ __device__ inline float pow2(const float x) { return x * x; } /* * generate bodies. if 4 - use inicial condicions, otherwise random * @param{float*} m - mass * @param{float*} r - radius * @param{float*} m - speed */ __host__ void ic_input(float* m, float* r, float* v) { if (N != 4) body_gen(m, r, v); else { float mt = 8810324116.227; r[0 * 3 + 0] = 1.; r[0 * 3 + 1] = 0.; r[0 * 3 + 2] = 0.; v[0 * 3 + 0] = 0.; v[0 * 3 + 1] = 0.9; v[0 * 3 + 2] = 0.; m[0] = mt; r[1 * 3 + 0] = 0.; r[1 * 3 + 1] = 1.; r[1 * 3 + 2] = 0.; v[1 * 3 + 0] = -0.9; v[1 * 3 + 1] = 0.; v[1 * 3 + 2] = 0.; m[1] = mt; r[2 * 3 + 0] = -1.; r[2 * 3 + 1] = 0.; r[2 * 3 + 2] = 0.; v[2 * 3 + 0] = 0.; v[2 * 3 + 1] = -0.9; v[2 * 3 + 2] = 0.; m[2] = mt; r[3 * 3 + 0] = 0.; r[3 * 3 + 1] = -1.; r[3 * 3 + 2] = 0.; v[3 * 3 + 0] = 0.9; v[3 * 3 + 1] = 0.; v[3 * 3 + 2] = 0.; m[3] = mt; } } /*probably not needed*/ __host__ void ic_check(const float* m,const float* r,const float* v) { cout << "Input data check " << endl; cout << "tau = " << tau << endl; cout << "T = " << T << endl; cout << "t0 = " << t0 << endl; cout << "N = " << N << endl; for (int i = 0; i < N; ++i) { cout << "m = " << m[i] << endl; for (int j = 0; j < 3; ++j) cout << "r[" << i << ", " << j << "] = " << r[i * 3 + j] << endl; for (int j = 0; j < 3; ++j) cout << "v[" << i << ", " << j << "] = " << v[i * 3 + j] << endl; } } /* * @param{const float*} m - mass * @param{const float*} r - radius * @param{float*} a - speed */ __global__ void kernel(const float* m, const float* r, float* a) { int id = blockIdx.x * blockDim.x + threadIdx.x; /*number of thread*/ float temp = 0.0; // 2 float r0 = r[id * 3]; // 3 float r1 = r[id * 3 + 1]; // 4 float r2 = r[id * 3 + 2] ; // 5 float a0 = 0, a1 = 0, a2 = 0; // 6 7 8 for (int j = 0; j < N; ++j){ temp = pow2(r0 - r[j * 3]) + pow2(r1 - r[j * 3 + 1]) + pow2(r2 - r[j * 3 + 2]); temp = m[j] / max(temp * sqrtf(temp), eps); a0 += (r0 - r[j * 3]) * temp; a1 += (r1 - r[j * 3 + 1]) * temp; a2 += (r2 - r[j * 3 + 2]) * temp; } a[id * 3] = -a0 * G; a[id * 3 + 1] = -a1 * G; a[id * 3 + 2] = -a2 * G; __syncthreads(); } __host__ void RK(const float* m, float* r, float* v, const float tau) { dim3 threads(numthreads); dim3 blocks(blocksize); float t = t0; float* k1 = new float[3 * N]; // host float* k2 = new float[3 * N]; // host float* r_t = new float[3 * N]; // host float* v_t = new float[3 * N]; // host float* cu_r = new float[3 * N]; // device float* cu_a = new float[3 * N]; // device float* cu_m = new float[N]; // device cudaMalloc((void**)& cu_a, N * 3 * sizeof(float)); /*allocate memory for acceleretions*/ cudaMalloc((void**)& cu_r, N * 3 * sizeof(float)); /*allocate memory for radiuses*/ cudaMalloc((void**)& cu_m, N * sizeof(float)); /*allocate memory for masses*/ cudaMemcpy(cu_m, m, N * sizeof(float), cudaMemcpyHostToDevice); /*copy masses from host to device*/ do{ t += tau; cudaMemcpy(cu_r, r, 3 * N * sizeof(float), cudaMemcpyHostToDevice); /*copy radiuses from host to device*/ kernel << < blocks, threads >> > (cu_m, cu_r, cu_a); /*run kernel*/ cudaMemcpy(k1, cu_a, 3 * N * sizeof(float), cudaMemcpyDeviceToHost); /*copy accelerations to k1 from device to host*/ for (int i = 0; i < N; ++i){ /*first rk step*/ for (int j = 0; j < 3; ++j){ float vij = v[i * 3 + j]; r_t[i * 3 + j] = r[i * 3 + j] + vij * tau; v_t[i * 3 + j] = vij + k1[i * 3 + j] * tau; } } cudaMemcpy(cu_r, r_t, 3 * N * sizeof(float), cudaMemcpyHostToDevice); /*copy radiuses_t from host to device*/ kernel << < blocks, threads >> > (cu_m, cu_r, cu_a); /*run kernel*/ cudaMemcpy(k2, cu_a, 3 * N * sizeof(float), cudaMemcpyDeviceToHost); /*copy accelerations to k2 from device to host*/ for (int i = 0; i < N; ++i){ /*second rk step*/ for (int j = 0; j < 3; ++j) r[i * 3 + j] += (v[i * 3 + j] + v_t[i * 3 + j]) * tau2; v[i * 3 + j] += (k1[i * 3 + j] + k2[i * 3 + j]) * tau2; } if (N == 4) /*calculate difference between our and reference solutions in point t = 10*/ if (fabs(t - 10) < 1e-10){ cout << "error = " << \ sqrt(pow2(r[3 * 2] - 2.5723673494326125) + pow2(r[3 * 2 + 1] - 4.2292866972437615e-7) + pow2(r[3 * 2 + 2])) << endl; } } while (t <= T); delete[] k2; delete[] k1; delete[] r_t; delete[] v_t; cudaFree(cu_r); cudaFree(cu_a); cudaFree(cu_m); } int main() { float* m = new float[N]; float* r = new float[3 * N]; float* v = new float[3 * N]; ic_input(m, r, v); cudaError_t SD; /*if unable to connect to videocart*/ SD = cudaSetDevice(0); if (SD != cudaSuccess) { cout << "Could not connect to cuda device." << endl; return (-1); } cudaEvent_t start, finish; /*events to count time*/ cudaEventCreate(&start); cudaEventCreate(&finish); /*create events*/ cudaEventRecord(start); cudaEventSynchronize(start); /*start time*/ RK(m, r, v, tau); /*calculations*/ cudaEventRecord(finish); cudaEventSynchronize(finish); /*finish time*/ float dt; /*finish time - start time*/ cudaEventElapsedTime(&dt, start, finish); /*count dt*/ cout << "number of bodyes = " << N << endl; cout << "blocksize = " << blocksize << endl; cout << "numthreads = " << numthreads << endl; cout << "time = " << dt / 1000.0 / ( (T - t0) / tau - 1)<< endl; /*one step time*/ cudaEventDestroy(start); cudaEventDestroy(finish); delete[] m; delete[] r; delete[] v; return 0; }
fc4e3968d8c85dd9457f1f11a46ab462d5d8e5e6.hip
// !!! This is a file automatically generated by hipify!!! // #define BLOCKDIMX 64 // #define BLOCKDIMY 2 // #define BLOCKDIMZ 1 // #define BLOCKSIZEX 128 // #define BLOCKSIZEY 4 // #define BLOCKSIZEZ 2 // Use all constants to debug and get the performance #define DIMX 512 #define DIMY 512 #define DIMZ 512 #define TOTAL (DIMX*DIMY*DIMZ) #define NUMTHREADS (BLOCKDIMX*BLOCKDIMY*BLOCKDIMZ) #define HALO 1 #define OPENEDDIMX (BLOCKSIZEX+2*HALO) #define OPENEDDIMY (BLOCKSIZEY+2*HALO) #define OPENEDDIMZ (BLOCKSIZEZ+2*HALO) #define OPENEDDIMXY (OPENEDDIMX*OPENEDDIMY) #define OPENEDDIMXYZ (OPENEDDIMX*OPENEDDIMY*OPENEDDIMZ) #define CLOSEDDIMX (BLOCKSIZEX) #define CLOSEDDIMY (BLOCKSIZEY) #define CLOSEDDIMZ (BLOCKSIZEZ) #define CLOSEDDIMXY (CLOSEDDIMX*CLOSEDDIMY) #define CLOSEDDIMXYZ (CLOSEDDIMX*CLOSEDDIMY*CLOSEDDIMZ) #define NUMREADING ((OPENEDDIMXYZ / NUMTHREADS) + ((OPENEDDIMXYZ%NUMTHREADS)?1:0)) #define NUMWRITING ((CLOSEDDIMXYZ / NUMTHREADS) + ((CLOSEDDIMXYZ%NUMTHREADS)?1:0)) // #define CORRECTNESS_DATA #define CORRECTNESS_HEAT // #define myclamp(x, value, tx, fx) {return ((x)==(value)) ? (tx):(fx)} #define C0 0.25f #define C1 0.50f #include <iostream> #include <fstream> #include <sstream> #include <iomanip> // std::setfill, std::setw #include <string> // #include <sys/ioctl.h> #include <hip/hip_runtime.h> #include <helper_math.h> // #include <gpu_timer.hpp> using namespace std; //////////////////////////////////////////////////////////////////////////////////////////////////// #define checkLastError() { \ hipError_t error = hipGetLastError(); \ int id; \ hipGetDevice(&id); \ if(error != hipSuccess) { \ printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \ __FILE__,__LINE__, hipGetErrorString(error), id); \ exit(EXIT_FAILURE); \ } \ } //////////////////////////////////////////////////////////////////////////////////////////////////// #define checkReadFile(filename, pData, size) { \ fstream *fs = new fstream; \ fs->open(filename, ios::in|ios::binary); \ if (!fs->is_open()) \ { \ printf("Cannot open file '%s' in file '%s' at line %i\n", \ filename, __FILE__, __LINE__); \ return 1; \ } \ fs->read(reinterpret_cast<char*>(pData), size); \ fs->close(); \ delete fs; \ } //////////////////////////////////////////////////////////////////////////////////////////////////// #define checkWriteFile(filename, pData, size) { \ fstream *fs = new fstream; \ fs->open(filename, ios::out|ios::binary); \ if (!fs->is_open()) \ { \ fprintf(stderr, "Cannot open file '%s' in file '%s' at line %i\n", \ filename, __FILE__, __LINE__); \ return 1; \ } \ fs->write(reinterpret_cast<char*>(pData), size); \ fs->close(); \ delete fs; \ } //////////////////////////////////////////////////////////////////////////////////////////////////// #define at(x, y, z, DIMX, DIMY, DIMZ) ( clamp((int)(z), 0, DIMZ-1)*DIMY*DIMX + \ clamp((int)(y), 0, DIMY-1)*DIMX + \ clamp((int)(x), 0, DIMX-1) ) //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void heatflow_global(float *src, float *dst) { int closed_index_1d, offset_index_1d, global_index_1d; int3 closed_index_3d, offset_index_3d, global_index_3d; offset_index_3d = make_int3(blockIdx.x * BLOCKSIZEX, blockIdx.y * BLOCKSIZEY, blockIdx.z * BLOCKSIZEZ); int index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; #pragma unroll for(int thisWriting=0; thisWriting<NUMWRITING; thisWriting++) { // closed_index_1d = threadIdx.z * blockDim.y * blockDim.x + // threadIdx.y * blockDim.x + // threadIdx.x + closed_index_1d = index + thisWriting*NUMTHREADS; closed_index_3d = make_int3((closed_index_1d % CLOSEDDIMXY % CLOSEDDIMX), (closed_index_1d % CLOSEDDIMXY / CLOSEDDIMX), (closed_index_1d / CLOSEDDIMXY) ); global_index_3d = make_int3((offset_index_3d.x + closed_index_3d.x), (offset_index_3d.y + closed_index_3d.y), (offset_index_3d.z + closed_index_3d.z) ); global_index_1d = global_index_3d.z * DIMY * DIMX + global_index_3d.y * DIMX + global_index_3d.x; if(global_index_3d.z > 0 && global_index_3d.z < (DIMZ-1) && global_index_3d.y > 0 && global_index_3d.y < (DIMY-1) && global_index_3d.x > 0 && global_index_3d.x < (DIMX-1) ) { // dst[at(global_index_3d.x, global_index_3d.y, global_index_3d.z, DIMX, DIMY, DIMZ)] dst[global_index_1d] = C0 * (src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)])+ C1 * (src[at(global_index_3d.x-1, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+1, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y-1, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y+1, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z-1, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z+1, DIMX, DIMY, DIMZ)]); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void heatflow_shared(float *src, float *dst) { int opened_index_1d, closed_index_1d, offset_index_1d, global_index_1d; int3 opened_index_3d, closed_index_3d, offset_index_3d, global_index_3d; offset_index_3d = make_int3(blockIdx.x * BLOCKSIZEX, blockIdx.y * BLOCKSIZEY, blockIdx.z * BLOCKSIZEZ); __shared__ float sharedMem[OPENEDDIMZ][OPENEDDIMY][OPENEDDIMX]; float result; int index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; #pragma unroll for(int thisReading=0; thisReading<NUMREADING; thisReading++) { // opened_index_1d = threadIdx.z * blockDim.y * blockDim.x + // threadIdx.y * blockDim.x + // threadIdx.x + opened_index_1d = index + thisReading * NUMTHREADS; opened_index_3d = make_int3((opened_index_1d % OPENEDDIMXY % OPENEDDIMX), (opened_index_1d % OPENEDDIMXY / OPENEDDIMX), (opened_index_1d / OPENEDDIMXY) ); global_index_3d = make_int3((offset_index_3d.x + opened_index_3d.x - HALO), (offset_index_3d.y + opened_index_3d.y - HALO), (offset_index_3d.z + opened_index_3d.z - HALO) ); global_index_1d = global_index_3d.z * DIMY * DIMX + global_index_3d.y * DIMX + global_index_3d.x; if(opened_index_3d.z < OPENEDDIMZ) { if(global_index_3d.z >= 0 && global_index_3d.z < (DIMZ) && global_index_3d.y >= 0 && global_index_3d.y < (DIMY) && global_index_3d.x >= 0 && global_index_3d.x < (DIMX) ) { sharedMem[opened_index_3d.z][opened_index_3d.y][opened_index_3d.x] = src[global_index_1d]; } } } __syncthreads(); #pragma unroll for(int thisWriting=0; thisWriting<NUMWRITING; thisWriting++) { // closed_index_1d = threadIdx.z * blockDim.y * blockDim.x + // threadIdx.y * blockDim.x + // threadIdx.x + closed_index_1d = index + thisWriting * NUMTHREADS; closed_index_3d = make_int3((closed_index_1d % CLOSEDDIMXY % CLOSEDDIMX), (closed_index_1d % CLOSEDDIMXY / CLOSEDDIMX), (closed_index_1d / CLOSEDDIMXY) ); global_index_3d = make_int3((offset_index_3d.x + closed_index_3d.x), (offset_index_3d.y + closed_index_3d.y), (offset_index_3d.z + closed_index_3d.z) ); global_index_1d = global_index_3d.z * DIMY * DIMX + global_index_3d.y * DIMX + global_index_3d.x; result = C0 * (sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+0])+ C1 * (sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO-1] + sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+1] + sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO-1][closed_index_3d.x+HALO+0] + sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+1][closed_index_3d.x+HALO+0] + sharedMem[closed_index_3d.z+HALO-1][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+0] + sharedMem[closed_index_3d.z+HALO+1][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+0]); if(global_index_3d.z > 0 && global_index_3d.z < (DIMZ-1) && global_index_3d.y > 0 && global_index_3d.y < (DIMY-1) && global_index_3d.x > 0 && global_index_3d.x < (DIMX-1) ) { dst[global_index_1d] = result; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("-----------------------------------------------------------------------\n"); srand(time(NULL)); // for random number generator hipSetDevice(3);checkLastError(); hipDeviceReset();checkLastError(); // Specify dimensions // Allocate host memory float *h_src = new float[TOTAL]; float *h_dst = new float[TOTAL]; // Allocate device memory float *d_src; float *d_dst; hipMalloc((void**)&d_src, TOTAL*sizeof(float)); checkLastError(); hipMalloc((void**)&d_dst, TOTAL*sizeof(float)); checkLastError(); // Initialize the image source for(int z=0; z<DIMZ; z++) { for(int y=0; y<DIMY; y++) { for(int x=0; x<DIMX; x++) { h_src[z*DIMY*DIMX+y*DIMX+x] = (float)( (int)rand() % 10); // 7; } } } // Transferring to the device memory hipMemcpy(d_src, h_src, TOTAL*sizeof(float), hipMemcpyHostToDevice); checkLastError(); hipMemset(d_dst, 0, TOTAL*sizeof(float));checkLastError(); // parameters for performance eval double flops, gbps, nops, nbp; nbp = 8*4; // # of bytes transferred per point nops = 8.; // # of flops per point int iter = 20; int rightData = 0; int rightHeat = 0; /// Verify the correctness of data // #ifdef CORRECTNESS_DATA hipMemcpy(d_dst, d_src, TOTAL*sizeof(float), hipMemcpyDeviceToDevice); checkLastError(); hipMemcpy(h_dst, d_dst, TOTAL*sizeof(float), hipMemcpyDeviceToHost); checkLastError(); for(int z=0; z<DIMZ && rightData; z++) { for(int y=0; y<DIMY && rightData; y++) { for(int x=0; x<DIMX && rightData; x++) { if(h_src[z*DIMY*DIMX+y*DIMX+x] != h_dst[z*DIMY*DIMX+y*DIMX+x]) { printf("Data does not match at x: %d, y: %d, z: %d\n", x, y, z); rightData = 0; // goto cleanup_data; } } } } if(rightData) printf("Data is correct.\n"); // cleanup_data: // #endif // grid construction dim3 numThreads(BLOCKDIMX, BLOCKDIMY, BLOCKDIMZ); //Dim dim3 numBlocks((DIMX/BLOCKSIZEX)+((DIMX%BLOCKSIZEX)?1:0), //Size for ILP (DIMY/BLOCKSIZEY)+((DIMY%BLOCKSIZEY)?1:0), (DIMZ/BLOCKSIZEZ)+((DIMZ%BLOCKSIZEZ)?1:0)); hipMemset(d_dst, 0, TOTAL*sizeof(float));checkLastError(); // Reset the result memset(h_dst, 0, TOTAL*sizeof(float)); printf("Blockdim (%03d, %03d, %03d); Blocksize (%03d, %03d, %03d);\n", BLOCKDIMX, BLOCKDIMY, BLOCKDIMZ, BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); // launch kernel // GpuTimer gpu_timer; // gpu_timer.Start(); hipEvent_t begin, end; hipEventCreate(&begin); hipEventCreate(&end); hipEventRecord(begin, 0); for(int n=0; n<iter; n++) { // heatflow_global<<<numBlocks, numThreads>>>(d_src, d_dst); hipLaunchKernelGGL(( heatflow_shared), dim3(numBlocks), dim3(numThreads), 0, 0, d_src, d_dst); } // gpu_timer.Stop(); hipDeviceSynchronize(); hipEventRecord(end, 0); hipEventSynchronize(end); float msec; hipEventElapsedTime(&msec, begin, end); checkLastError(); // float msec = gpu_timer.Elapsed(); gbps = nbp*DIMX*DIMY*DIMZ/(msec/1000.)/(1024.*1024.*1024.)*(double)iter; flops = nops*DIMX*DIMY*DIMZ/(msec/1000.)/(1024.*1024.*1024.)*(double)iter; printf("Computing time : %.3f msec, Device memory bandwidth : %.3f GB/s, GFLOPS : %.3f\n", msec, gbps, flops); float* h_ref = new float[DIMX*DIMY*DIMZ]; float tmp, result; // #ifdef CORRECTNESS_HEAT /// Verify the correctness of heat flow, no check at boundary // Golden result for(int z=1; z<(DIMZ-1); z++) { for(int y=1; y<(DIMY-1); y++) { for(int x=1; x<(DIMX-1); x++) { result = C0 * (h_src[at(x+0, y+0, z+0, DIMX, DIMY, DIMZ)])+ C1 * (h_src[at(x-1, y+0, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+1, y+0, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y-1, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y+1, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y+0, z-1, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y+0, z+1, DIMX, DIMY, DIMZ)]); h_ref[at(x+0, y+0, z+0, DIMX, DIMY, DIMZ)] = result; } } } // Transferring to the host memory hipMemcpy(h_dst, d_dst, TOTAL*sizeof(float), hipMemcpyDeviceToHost); checkLastError(); // Compare result for(int z=1; z<(DIMZ-1) && rightHeat; z++) { for(int y=1; y<(DIMY-1) && rightHeat; y++) { for(int x=1; x<(DIMX-1) && rightHeat; x++) { if(h_ref[z*DIMY*DIMX+y*DIMX+x] != h_dst[z*DIMY*DIMX+y*DIMX+x]) { printf("Solution does not match at x: %d, y: %d, z: %d\n", x, y, z); printf("h_ref (%04.4f), h_dst (%04.4f)\n", h_ref[z*DIMY*DIMX+y*DIMX+x], h_dst[z*DIMY*DIMX+y*DIMX+x]); rightHeat = 0; // goto cleanup_heat; } } } } if(rightHeat) printf("Solution is correct.\n"); // cleanup_heat: // #endif ///!!! Print line // struct winsize w; // ioctl(0, TIOCGWINSZ, &w); // for(int k=0; k<w.ws_col; k++) // printf("-"); printf("\n"); checkLastError(); // cleanup: hipFree(d_src); hipFree(d_dst); free(h_src); free(h_dst); free(h_ref); return 0; }
fc4e3968d8c85dd9457f1f11a46ab462d5d8e5e6.cu
// #define BLOCKDIMX 64 // #define BLOCKDIMY 2 // #define BLOCKDIMZ 1 // #define BLOCKSIZEX 128 // #define BLOCKSIZEY 4 // #define BLOCKSIZEZ 2 // Use all constants to debug and get the performance #define DIMX 512 #define DIMY 512 #define DIMZ 512 #define TOTAL (DIMX*DIMY*DIMZ) #define NUMTHREADS (BLOCKDIMX*BLOCKDIMY*BLOCKDIMZ) #define HALO 1 #define OPENEDDIMX (BLOCKSIZEX+2*HALO) #define OPENEDDIMY (BLOCKSIZEY+2*HALO) #define OPENEDDIMZ (BLOCKSIZEZ+2*HALO) #define OPENEDDIMXY (OPENEDDIMX*OPENEDDIMY) #define OPENEDDIMXYZ (OPENEDDIMX*OPENEDDIMY*OPENEDDIMZ) #define CLOSEDDIMX (BLOCKSIZEX) #define CLOSEDDIMY (BLOCKSIZEY) #define CLOSEDDIMZ (BLOCKSIZEZ) #define CLOSEDDIMXY (CLOSEDDIMX*CLOSEDDIMY) #define CLOSEDDIMXYZ (CLOSEDDIMX*CLOSEDDIMY*CLOSEDDIMZ) #define NUMREADING ((OPENEDDIMXYZ / NUMTHREADS) + ((OPENEDDIMXYZ%NUMTHREADS)?1:0)) #define NUMWRITING ((CLOSEDDIMXYZ / NUMTHREADS) + ((CLOSEDDIMXYZ%NUMTHREADS)?1:0)) // #define CORRECTNESS_DATA #define CORRECTNESS_HEAT // #define myclamp(x, value, tx, fx) {return ((x)==(value)) ? (tx):(fx)} #define C0 0.25f #define C1 0.50f #include <iostream> #include <fstream> #include <sstream> #include <iomanip> // std::setfill, std::setw #include <string> // #include <sys/ioctl.h> #include <cuda.h> #include <helper_math.h> // #include <gpu_timer.hpp> using namespace std; //////////////////////////////////////////////////////////////////////////////////////////////////// #define checkLastError() { \ cudaError_t error = cudaGetLastError(); \ int id; \ cudaGetDevice(&id); \ if(error != cudaSuccess) { \ printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \ __FILE__,__LINE__, cudaGetErrorString(error), id); \ exit(EXIT_FAILURE); \ } \ } //////////////////////////////////////////////////////////////////////////////////////////////////// #define checkReadFile(filename, pData, size) { \ fstream *fs = new fstream; \ fs->open(filename, ios::in|ios::binary); \ if (!fs->is_open()) \ { \ printf("Cannot open file '%s' in file '%s' at line %i\n", \ filename, __FILE__, __LINE__); \ return 1; \ } \ fs->read(reinterpret_cast<char*>(pData), size); \ fs->close(); \ delete fs; \ } //////////////////////////////////////////////////////////////////////////////////////////////////// #define checkWriteFile(filename, pData, size) { \ fstream *fs = new fstream; \ fs->open(filename, ios::out|ios::binary); \ if (!fs->is_open()) \ { \ fprintf(stderr, "Cannot open file '%s' in file '%s' at line %i\n", \ filename, __FILE__, __LINE__); \ return 1; \ } \ fs->write(reinterpret_cast<char*>(pData), size); \ fs->close(); \ delete fs; \ } //////////////////////////////////////////////////////////////////////////////////////////////////// #define at(x, y, z, DIMX, DIMY, DIMZ) ( clamp((int)(z), 0, DIMZ-1)*DIMY*DIMX + \ clamp((int)(y), 0, DIMY-1)*DIMX + \ clamp((int)(x), 0, DIMX-1) ) //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void heatflow_global(float *src, float *dst) { int closed_index_1d, offset_index_1d, global_index_1d; int3 closed_index_3d, offset_index_3d, global_index_3d; offset_index_3d = make_int3(blockIdx.x * BLOCKSIZEX, blockIdx.y * BLOCKSIZEY, blockIdx.z * BLOCKSIZEZ); int index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; #pragma unroll for(int thisWriting=0; thisWriting<NUMWRITING; thisWriting++) { // closed_index_1d = threadIdx.z * blockDim.y * blockDim.x + // threadIdx.y * blockDim.x + // threadIdx.x + closed_index_1d = index + thisWriting*NUMTHREADS; closed_index_3d = make_int3((closed_index_1d % CLOSEDDIMXY % CLOSEDDIMX), (closed_index_1d % CLOSEDDIMXY / CLOSEDDIMX), (closed_index_1d / CLOSEDDIMXY) ); global_index_3d = make_int3((offset_index_3d.x + closed_index_3d.x), (offset_index_3d.y + closed_index_3d.y), (offset_index_3d.z + closed_index_3d.z) ); global_index_1d = global_index_3d.z * DIMY * DIMX + global_index_3d.y * DIMX + global_index_3d.x; if(global_index_3d.z > 0 && global_index_3d.z < (DIMZ-1) && global_index_3d.y > 0 && global_index_3d.y < (DIMY-1) && global_index_3d.x > 0 && global_index_3d.x < (DIMX-1) ) { // dst[at(global_index_3d.x, global_index_3d.y, global_index_3d.z, DIMX, DIMY, DIMZ)] dst[global_index_1d] = C0 * (src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)])+ C1 * (src[at(global_index_3d.x-1, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+1, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y-1, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y+1, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z-1, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z+1, DIMX, DIMY, DIMZ)]); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void heatflow_shared(float *src, float *dst) { int opened_index_1d, closed_index_1d, offset_index_1d, global_index_1d; int3 opened_index_3d, closed_index_3d, offset_index_3d, global_index_3d; offset_index_3d = make_int3(blockIdx.x * BLOCKSIZEX, blockIdx.y * BLOCKSIZEY, blockIdx.z * BLOCKSIZEZ); __shared__ float sharedMem[OPENEDDIMZ][OPENEDDIMY][OPENEDDIMX]; float result; int index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; #pragma unroll for(int thisReading=0; thisReading<NUMREADING; thisReading++) { // opened_index_1d = threadIdx.z * blockDim.y * blockDim.x + // threadIdx.y * blockDim.x + // threadIdx.x + opened_index_1d = index + thisReading * NUMTHREADS; opened_index_3d = make_int3((opened_index_1d % OPENEDDIMXY % OPENEDDIMX), (opened_index_1d % OPENEDDIMXY / OPENEDDIMX), (opened_index_1d / OPENEDDIMXY) ); global_index_3d = make_int3((offset_index_3d.x + opened_index_3d.x - HALO), (offset_index_3d.y + opened_index_3d.y - HALO), (offset_index_3d.z + opened_index_3d.z - HALO) ); global_index_1d = global_index_3d.z * DIMY * DIMX + global_index_3d.y * DIMX + global_index_3d.x; if(opened_index_3d.z < OPENEDDIMZ) { if(global_index_3d.z >= 0 && global_index_3d.z < (DIMZ) && global_index_3d.y >= 0 && global_index_3d.y < (DIMY) && global_index_3d.x >= 0 && global_index_3d.x < (DIMX) ) { sharedMem[opened_index_3d.z][opened_index_3d.y][opened_index_3d.x] = src[global_index_1d]; } } } __syncthreads(); #pragma unroll for(int thisWriting=0; thisWriting<NUMWRITING; thisWriting++) { // closed_index_1d = threadIdx.z * blockDim.y * blockDim.x + // threadIdx.y * blockDim.x + // threadIdx.x + closed_index_1d = index + thisWriting * NUMTHREADS; closed_index_3d = make_int3((closed_index_1d % CLOSEDDIMXY % CLOSEDDIMX), (closed_index_1d % CLOSEDDIMXY / CLOSEDDIMX), (closed_index_1d / CLOSEDDIMXY) ); global_index_3d = make_int3((offset_index_3d.x + closed_index_3d.x), (offset_index_3d.y + closed_index_3d.y), (offset_index_3d.z + closed_index_3d.z) ); global_index_1d = global_index_3d.z * DIMY * DIMX + global_index_3d.y * DIMX + global_index_3d.x; result = C0 * (sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+0])+ C1 * (sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO-1] + sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+1] + sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO-1][closed_index_3d.x+HALO+0] + sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+1][closed_index_3d.x+HALO+0] + sharedMem[closed_index_3d.z+HALO-1][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+0] + sharedMem[closed_index_3d.z+HALO+1][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+0]); if(global_index_3d.z > 0 && global_index_3d.z < (DIMZ-1) && global_index_3d.y > 0 && global_index_3d.y < (DIMY-1) && global_index_3d.x > 0 && global_index_3d.x < (DIMX-1) ) { dst[global_index_1d] = result; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("-----------------------------------------------------------------------\n"); srand(time(NULL)); // for random number generator cudaSetDevice(3);checkLastError(); cudaDeviceReset();checkLastError(); // Specify dimensions // Allocate host memory float *h_src = new float[TOTAL]; float *h_dst = new float[TOTAL]; // Allocate device memory float *d_src; float *d_dst; cudaMalloc((void**)&d_src, TOTAL*sizeof(float)); checkLastError(); cudaMalloc((void**)&d_dst, TOTAL*sizeof(float)); checkLastError(); // Initialize the image source for(int z=0; z<DIMZ; z++) { for(int y=0; y<DIMY; y++) { for(int x=0; x<DIMX; x++) { h_src[z*DIMY*DIMX+y*DIMX+x] = (float)( (int)rand() % 10); // 7; } } } // Transferring to the device memory cudaMemcpy(d_src, h_src, TOTAL*sizeof(float), cudaMemcpyHostToDevice); checkLastError(); cudaMemset(d_dst, 0, TOTAL*sizeof(float));checkLastError(); // parameters for performance eval double flops, gbps, nops, nbp; nbp = 8*4; // # of bytes transferred per point nops = 8.; // # of flops per point int iter = 20; int rightData = 0; int rightHeat = 0; /// Verify the correctness of data // #ifdef CORRECTNESS_DATA cudaMemcpy(d_dst, d_src, TOTAL*sizeof(float), cudaMemcpyDeviceToDevice); checkLastError(); cudaMemcpy(h_dst, d_dst, TOTAL*sizeof(float), cudaMemcpyDeviceToHost); checkLastError(); for(int z=0; z<DIMZ && rightData; z++) { for(int y=0; y<DIMY && rightData; y++) { for(int x=0; x<DIMX && rightData; x++) { if(h_src[z*DIMY*DIMX+y*DIMX+x] != h_dst[z*DIMY*DIMX+y*DIMX+x]) { printf("Data does not match at x: %d, y: %d, z: %d\n", x, y, z); rightData = 0; // goto cleanup_data; } } } } if(rightData) printf("Data is correct.\n"); // cleanup_data: // #endif // grid construction dim3 numThreads(BLOCKDIMX, BLOCKDIMY, BLOCKDIMZ); //Dim dim3 numBlocks((DIMX/BLOCKSIZEX)+((DIMX%BLOCKSIZEX)?1:0), //Size for ILP (DIMY/BLOCKSIZEY)+((DIMY%BLOCKSIZEY)?1:0), (DIMZ/BLOCKSIZEZ)+((DIMZ%BLOCKSIZEZ)?1:0)); cudaMemset(d_dst, 0, TOTAL*sizeof(float));checkLastError(); // Reset the result memset(h_dst, 0, TOTAL*sizeof(float)); printf("Blockdim (%03d, %03d, %03d); Blocksize (%03d, %03d, %03d);\n", BLOCKDIMX, BLOCKDIMY, BLOCKDIMZ, BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); // launch kernel // GpuTimer gpu_timer; // gpu_timer.Start(); cudaEvent_t begin, end; cudaEventCreate(&begin); cudaEventCreate(&end); cudaEventRecord(begin, 0); for(int n=0; n<iter; n++) { // heatflow_global<<<numBlocks, numThreads>>>(d_src, d_dst); heatflow_shared<<<numBlocks, numThreads>>>(d_src, d_dst); } // gpu_timer.Stop(); cudaDeviceSynchronize(); cudaEventRecord(end, 0); cudaEventSynchronize(end); float msec; cudaEventElapsedTime(&msec, begin, end); checkLastError(); // float msec = gpu_timer.Elapsed(); gbps = nbp*DIMX*DIMY*DIMZ/(msec/1000.)/(1024.*1024.*1024.)*(double)iter; flops = nops*DIMX*DIMY*DIMZ/(msec/1000.)/(1024.*1024.*1024.)*(double)iter; printf("Computing time : %.3f msec, Device memory bandwidth : %.3f GB/s, GFLOPS : %.3f\n", msec, gbps, flops); float* h_ref = new float[DIMX*DIMY*DIMZ]; float tmp, result; // #ifdef CORRECTNESS_HEAT /// Verify the correctness of heat flow, no check at boundary // Golden result for(int z=1; z<(DIMZ-1); z++) { for(int y=1; y<(DIMY-1); y++) { for(int x=1; x<(DIMX-1); x++) { result = C0 * (h_src[at(x+0, y+0, z+0, DIMX, DIMY, DIMZ)])+ C1 * (h_src[at(x-1, y+0, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+1, y+0, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y-1, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y+1, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y+0, z-1, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y+0, z+1, DIMX, DIMY, DIMZ)]); h_ref[at(x+0, y+0, z+0, DIMX, DIMY, DIMZ)] = result; } } } // Transferring to the host memory cudaMemcpy(h_dst, d_dst, TOTAL*sizeof(float), cudaMemcpyDeviceToHost); checkLastError(); // Compare result for(int z=1; z<(DIMZ-1) && rightHeat; z++) { for(int y=1; y<(DIMY-1) && rightHeat; y++) { for(int x=1; x<(DIMX-1) && rightHeat; x++) { if(h_ref[z*DIMY*DIMX+y*DIMX+x] != h_dst[z*DIMY*DIMX+y*DIMX+x]) { printf("Solution does not match at x: %d, y: %d, z: %d\n", x, y, z); printf("h_ref (%04.4f), h_dst (%04.4f)\n", h_ref[z*DIMY*DIMX+y*DIMX+x], h_dst[z*DIMY*DIMX+y*DIMX+x]); rightHeat = 0; // goto cleanup_heat; } } } } if(rightHeat) printf("Solution is correct.\n"); // cleanup_heat: // #endif ///!!! Print line // struct winsize w; // ioctl(0, TIOCGWINSZ, &w); // for(int k=0; k<w.ws_col; k++) // printf("-"); printf("\n"); checkLastError(); // cleanup: cudaFree(d_src); cudaFree(d_dst); free(h_src); free(h_dst); free(h_ref); return 0; }
afe6f1a9d46e0f40b93968ac20d43b9ccad2f6e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <lbann-dev@llnl.gov> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/layers/misc/dist_embedding.hpp" #ifdef LBANN_HAS_NVSHMEM #include "lbann/utils/cuda.hpp" #include "lbann/utils/nvshmem.hpp" namespace lbann { namespace { // Typedefs using Size2 = cuda::array<size_t, 2>; template <typename T> using VectorMetadata = typename dist_embedding_layer<T,data_layout::DATA_PARALLEL,El::Device::GPU>::vector_metadata; /** Copy between two device buffers, using all threads in a warp. */ template <typename T> __device__ __forceinline__ T* memcpy_warp(T* __restrict__ dest, const T* __restrict__ src, size_t n) { constexpr size_t warp_size = 32; for (size_t i = threadIdx.x; i < n; i += warp_size) { dest[i] = src[i]; } __syncwarp(); return dest; } /** See El::AbstractDistMatrix::ColOwner. */ __device__ __forceinline__ size_t distmat_index_owner(size_t global_index, size_t align, size_t stride) { return (global_index + align) % stride; } /** See El::AbstractDistMatrix::GlobalCol. */ __device__ __forceinline__ size_t distmat_global_index(size_t local_index, size_t shift, size_t stride) { return shift + local_index * stride; } /** See El::AbstractDistMatrix::LocalCol. */ __device__ __forceinline__ size_t distmat_local_index(size_t global_index, size_t rank, size_t align, size_t stride) { auto shift = (stride + rank - align) % stride; if (global_index > shift) { return (global_index - shift - 1) / stride + 1; } else { return 0; } } /** Launch a CUDA kernel. * * @todo Check that argument types match kernel signature. */ template <typename Kernel, typename... Args> inline void launch_cuda_kernel( const Kernel& kernel, dim3 grid_dims, dim3 block_dims, size_t shared_mem, hipStream_t stream, Args... args) { void* arg_list[] = { const_cast<void*>(reinterpret_cast<const void*>(&args))... }; CHECK_CUDA( cudaLaunchKernel( reinterpret_cast<const void*>(&kernel), grid_dims, block_dims, arg_list, shared_mem, stream)); } /** Launch a collective NVSHMEM kernel. * * Needed for device-side NVSHMEM synchronization calls like * nvshmem_wait. If grid_dims is zero, then the NVSHMEM will launch * with the largest available grid. * * @todo Check that argument types match kernel signature. */ template <typename Kernel, typename... Args> inline void launch_nvshmem_collective_kernel( const Kernel& kernel, dim3 grid_dims, dim3 block_dims, size_t shared_mem, hipStream_t stream, Args... args) { if (grid_dims.x == 0) { grid_dims.y = 0; grid_dims.z = 0; } void* arg_list[] = { const_cast<void*>(reinterpret_cast<const void*>(&args))... }; auto status = nvshmemx_collective_launch( reinterpret_cast<const void*>(&kernel), grid_dims, block_dims, arg_list, shared_mem, stream); if (status != 0) { LBANN_ERROR( "Failed to launch NVSHMEM collective kernel ", "(error ",status,")"); } } } // namespace <anon> // --------------------------------------------- // Life cycle and setup // --------------------------------------------- template <typename TensorDataType, data_layout Layout, El::Device Device> dist_embedding_layer<TensorDataType,Layout,Device>::~dist_embedding_layer() { if (m_embeddings_buffer != nullptr) { nvshmem_free(m_embeddings_buffer); } if (m_workspace_buffer != nullptr) { nvshmem_free(m_workspace_buffer); } if (m_metadata_buffer != nullptr) { nvshmem_free(m_metadata_buffer); } } template <typename TensorDataType, data_layout Layout, El::Device Device> void dist_embedding_layer<TensorDataType,Layout,Device>::attach_embeddings_to_shmem_buffer() { if (m_embeddings_buffer != nullptr || m_embeddings_buffer_size != 0) { LBANN_ERROR("attempted to attach embedding matrix ", "to NVSHMEM buffer multiple times"); } // Embedding weights matrix using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>; auto& embeddings = ValuesGetter::mutable_values(this->get_weights(0)); const auto dist = embeddings.DistData(); if (dist.device != El::Device::GPU) { LBANN_ERROR("attempted to attach non-GPU matrix to NVSHMEM buffer"); } #if 0 // nvshmem_addr_accessible is not supported as of NVSHMEM 1.4 if (nvshmem_addr_accessible(embeddings.LockedBuffer(), nvshmem_my_pe())) { return; } #endif // Calculate size of NVSHMEM buffer const auto col_comm_size = El::mpi::Size(embeddings.ColComm()); const auto row_comm_size = El::mpi::Size(embeddings.RowComm()); const auto height = embeddings.Height(); const auto width = embeddings.Width(); const auto local_height = (height + col_comm_size - 1) / col_comm_size; const auto local_width = (width + row_comm_size - 1) / row_comm_size; m_embeddings_buffer_size = local_height * local_width * sizeof(TensorDataType); if (m_embeddings_buffer_size == 0) { return; } // Allocate NVSHMEM buffer m_embeddings_buffer = nvshmem::malloc<TensorDataType>(m_embeddings_buffer_size); // Attach matrix to NVSHMEM buffer std::unique_ptr<El::AbstractDistMatrix<TensorDataType>> orig_mat( embeddings.Construct(embeddings.Grid(), embeddings.Root())); *orig_mat = std::move(embeddings); embeddings.Empty(); embeddings.AlignWith(dist); dynamic_cast<El::ElementalMatrix<TensorDataType>&>(embeddings).Attach( height, width, *dist.grid, dist.colAlign, dist.rowAlign, m_embeddings_buffer, local_height, dist.root); El::Copy(*orig_mat, embeddings); } // --------------------------------------------- // Forward prop // --------------------------------------------- namespace { /** Request embedding vectors from owner processes. * * Block dimensions: 32 x 1 x 1 * * Grid dimensions: input_dims[1] x input_dims[0] x 1 */ template <typename T> __global__ void request_embeddings_kernel( size_t embedding_dim, Size2 input_dims, const T* __restrict__ input, Size2 input_strides, const T* __restrict__ embeddings, Size2 embeddings_strides, VectorMetadata<T>* __restrict__ metadata, Size2 metadata_strides, T* __restrict__ workspace, Size2 workspace_strides, size_t rank, size_t input_rowshift, size_t input_rowstride, size_t embeddings_rowalign, size_t embeddings_rowstride) { // Indices const size_t bidx = blockIdx.x; const size_t bidy = blockIdx.y; const size_t nblocksx = gridDim.x; const size_t nblocksy = gridDim.y; const size_t i_per_block = (input_dims[1] + nblocksx - 1) / nblocksx; const size_t i_start = bidx * i_per_block; const size_t i_end = cuda::min((bidx+1) * i_per_block, input_dims[1]); for (size_t j = bidy; j < input_dims[0]; j += nblocksy) { for (size_t i = i_start; i < i_end; ++i) { const auto& global_j = distmat_global_index(j, input_rowshift, input_rowstride); // Get embedding vector index const auto& global_index_float = input[i*input_strides[1] + j*input_strides[0]]; const auto& global_index = static_cast<size_t>(cuda::floor(global_index_float)); // Figure out which process owns embedding vector __shared__ unsigned char metadata_shared[sizeof(VectorMetadata<T>)]; auto& m = *reinterpret_cast<VectorMetadata<T>*>(metadata_shared); if (threadIdx.x == 0) { m.source_rank = distmat_index_owner(global_index, embeddings_rowalign, embeddings_rowstride); m.source_index = distmat_local_index(global_index, m.source_rank, embeddings_rowalign, embeddings_rowstride); m.target_rank = rank; m.target_index = i + global_j*input_dims[1]; m.is_active = true; metadata[i*metadata_strides[1] + global_j*metadata_strides[0]] = m; } __syncwarp(); // Get embedding vector from owner process nvshmemx_getmem_nbi_warp( &workspace[m.target_index * workspace_strides[0]], &embeddings[m.source_index * embeddings_strides[0]], embedding_dim*sizeof(T), m.source_rank); } } } /** Copy embedding vectors to output tensor. * * Block dimensions: 32 x 1 x 1 * * Grid dimensions: input_dims[1] x input_dims[0] x 1 */ template <typename T> __global__ void copy_embeddings_kernel( size_t embedding_dim, Size2 input_dims, const VectorMetadata<T>* __restrict__ metadata, Size2 metadata_strides, const T* __restrict__ workspace, Size2 workspace_strides, T* __restrict__ output, Size2 output_strides, size_t input_rowshift, size_t input_rowstride) { // Indices const size_t bidx = blockIdx.x; const size_t bidy = blockIdx.y; const size_t nblocksx = gridDim.x; const size_t nblocksy = gridDim.y; const size_t i_per_block = (input_dims[1] + nblocksx - 1) / nblocksx; const size_t i_start = bidx * i_per_block; const size_t i_end = cuda::min((bidx+1) * i_per_block, input_dims[1]); for (size_t j = bidy; j < input_dims[0]; j += nblocksy) { for (size_t i = i_start; i < i_end; ++i) { const auto& global_j = distmat_global_index(j, input_rowshift, input_rowstride); const auto& m = metadata[i*metadata_strides[1] + global_j*metadata_strides[0]]; memcpy_warp( &output[i*embedding_dim + j*output_strides[0]], &workspace[m.target_index * workspace_strides[0]], embedding_dim); } } } } // namespace <anon> template <typename TensorDataType, data_layout Layout, El::Device Device> void dist_embedding_layer<TensorDataType,Layout,Device>::fp_compute() { // Data matrices // Note: Make sure to get original weight values since they are in // SHMEM buffer. using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>; const auto& embeddings = ValuesGetter::mutable_values(this->get_weights(0)); const auto& input = this->get_prev_activations(); const auto& local_input = dynamic_cast<const LocalMat&>(input.LockedMatrix()); auto& local_output = dynamic_cast<LocalMat&>(this->get_local_activations()); // Dimensions const size_t input_size = this->get_input_size(); const size_t output_size = this->get_output_size(); const size_t mini_batch_size = input.Width(); const size_t local_mini_batch_size = local_input.Width(); // GPU objects auto&& stream = El::GPUManager::Stream(); nvshmem::initialize(); // Barrier to handle gradient checking /// @todo Think of a way to avoid this synchronization if (m_barrier_in_forward_prop) { nvshmemx_barrier_all_on_stream(stream); } // Synchronize non-blocking barrier // Note: Make sure embeddings are up-to-date and NVSHMEM workspaces // are safe to reset. auto& comm = *this->get_comm(); comm.wait(m_nb_barrier_request); // Initialize NVSHMEM buffer for communicating embedding vectors if (m_workspace_buffer_size < output_size * mini_batch_size) { m_workspace_buffer_size = output_size * mini_batch_size; m_workspace_buffer = nvshmem::realloc(m_workspace_buffer, m_workspace_buffer_size); } LocalMat workspace( m_embedding_dim, input_size * mini_batch_size, m_workspace_buffer, m_embedding_dim); // Initialize NVSHMEM buffer for embedding vector metadata if (m_metadata_buffer_size < input_size * mini_batch_size) { m_metadata_buffer_size = input_size * mini_batch_size; m_metadata_buffer = nvshmem::realloc(m_metadata_buffer, m_metadata_buffer_size); } CHECK_CUDA( hipMemsetAsync( m_metadata_buffer, 0, m_metadata_buffer_size*sizeof(vector_metadata), stream)); // Request embedding vectors from owning processes const size_t rank = comm.get_rank_in_trainer(); if (!local_input.IsEmpty()) { constexpr size_t block_size = 32; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = input_size; grid_dims.y = local_mini_batch_size; launch_cuda_kernel( request_embeddings_kernel<TensorDataType>, grid_dims, block_dims, 0, stream, m_embedding_dim, Size2{local_mini_batch_size, input_size}, local_input.LockedBuffer(), Size2{size_t(local_input.LDim()), 1}, embeddings.LockedBuffer(), Size2{size_t(embeddings.LDim()), 1}, m_metadata_buffer, Size2{input_size, 1}, workspace.Buffer(), Size2{size_t(workspace.LDim()), 1}, size_t(rank), size_t(input.RowShift()), size_t(input.RowStride()), size_t(embeddings.RowAlign()), size_t(embeddings.RowStride())); } nvshmemx_quiet_on_stream(stream); // Copy embedding vectors to output tensor if (!local_output.IsEmpty()) { constexpr size_t block_size = 32; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = input_size; grid_dims.y = local_mini_batch_size; launch_cuda_kernel( copy_embeddings_kernel<TensorDataType>, grid_dims, block_dims, 0, stream, m_embedding_dim, Size2{local_mini_batch_size, input_size}, m_metadata_buffer, Size2{input_size, 1}, workspace.LockedBuffer(), Size2{size_t(workspace.LDim()), 1}, local_output.Buffer(), Size2{size_t(local_output.LDim()), 1}, size_t(input.RowShift()), size_t(input.RowStride())); } // Non-blocking barrier // Note: NVSHMEM workspaces are ready to recieve gradients. nb_barrier(comm, comm.get_trainer_comm(), m_nb_barrier_request); } // --------------------------------------------- // Backprop // --------------------------------------------- namespace { /** Send gradients to owner processes. * * Block dimensions: 32 x 1 x 1 * * Grid dimensions: input_dims[1] x input_dims[0] x 1 */ template <typename T> __global__ void send_gradients_kernel( size_t embedding_dim, Size2 input_dims, const T* __restrict__ output_grad, Size2 output_grad_strides, VectorMetadata<T>* __restrict__ metadata, Size2 metadata_strides, T* __restrict__ workspace, Size2 workspace_strides, size_t input_rowshift, size_t input_rowstride) { // Indices const size_t bidx = blockIdx.x; const size_t bidy = blockIdx.y; const size_t nblocksx = gridDim.x; const size_t nblocksy = gridDim.y; // Assign metadata to CUDA blocks const size_t i_per_block = (input_dims[1] + nblocksx - 1) / nblocksx; const size_t i_start = bidx * i_per_block; const size_t i_end = cuda::min((bidx+1) * i_per_block, input_dims[1]); // Send gradients to owner processes for (size_t j = bidy; j < input_dims[0]; j += nblocksy) { for (size_t i = i_start; i < i_end; ++i) { const auto& global_j = distmat_global_index(j, input_rowshift, input_rowstride); auto& m = metadata[i*metadata_strides[1] + global_j*metadata_strides[0]]; auto* workspace_ptr = &workspace[m.target_index * workspace_strides[0]]; memcpy_warp( workspace_ptr, &output_grad[i*embedding_dim + j*output_grad_strides[0]], embedding_dim); if (m.source_rank != m.target_rank) { nvshmemx_putmem_nbi_warp( workspace_ptr, workspace_ptr, embedding_dim*sizeof(T), m.source_rank); nvshmemx_putmem_nbi_warp( &m, &m, sizeof(VectorMetadata<T>), m.source_rank); } } } } } // namespace <anon> template <typename TensorDataType, data_layout Layout, El::Device Device> void dist_embedding_layer<TensorDataType,Layout,Device>::bp_compute() { // Data matrices const auto& input = this->get_prev_activations(); const auto& local_output_grad = dynamic_cast<const LocalMat&>(this->get_local_prev_error_signals()); // Dimensions const size_t input_size = this->get_input_size(); const size_t mini_batch_size = input.Width(); const size_t local_mini_batch_size = local_output_grad.Width(); // GPU objects auto&& stream = El::GPUManager::Stream(); // Synchronize non-blocking barrier // Note: Make sure NVSHMEM workspaces are ready to recieve gradients. auto& comm = *this->get_comm(); comm.wait(m_nb_barrier_request); // Initialize NVSHMEM buffer for gradient w.r.t. embeddings LocalMat workspace( m_embedding_dim, input_size * mini_batch_size, m_workspace_buffer, m_embedding_dim); // Send gradients to owner processes if (!local_output_grad.IsEmpty()) { constexpr size_t block_size = 32; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = input_size; grid_dims.y = local_mini_batch_size; launch_cuda_kernel( send_gradients_kernel<TensorDataType>, grid_dims, block_dims, 0, stream, m_embedding_dim, Size2{local_mini_batch_size, input_size}, local_output_grad.LockedBuffer(), Size2{size_t(local_output_grad.LDim()), 1}, m_metadata_buffer, Size2{input_size, 1}, workspace.Buffer(), Size2{size_t(workspace.LDim()), 1}, size_t(input.RowShift()), size_t(input.RowStride())); } nvshmemx_quiet_on_stream(stream); // Non-blocking barrier // Note: Gradients have been sent. nb_barrier(comm, comm.get_trainer_comm(), m_nb_barrier_request); // Use dense optimizer if needed if (!m_sparse_sgd) { // Create buffer for dense gradients const auto& embeddings = this->weights_values(0); std::unique_ptr<El::AbstractDistMatrix<TensorDataType>> embeddings_grad( embeddings.Construct(embeddings.Grid(), embeddings.Root())); embeddings_grad->AlignWith(embeddings); El::Zeros(*embeddings_grad, embeddings.Height(), embeddings.Width()); auto& local_embeddings_grad = dynamic_cast<LocalMat&>(embeddings_grad->Matrix()); // Apply SGD step to convert sparse gradients to dense gradients apply_sparse_sgd_step( input_size * mini_batch_size, local_embeddings_grad); // Send dense gradients to dense optimizer auto* opt = this->get_weights(0).get_optimizer(); if (opt != nullptr) { opt->add_to_gradient(*embeddings_grad); } } } // --------------------------------------------- // Sparse SGD // --------------------------------------------- namespace { /** Sparse SGD on local embeddings. * * Block dimensions: 32 x 1 x 1 * * Grid dimensions: num_gradients x 1 x 1 */ template <typename T> __global__ void sgd_kernel( T learning_rate, size_t embedding_dim, size_t num_gradients, const VectorMetadata<T>* __restrict__ metadata, const T* __restrict__ embeddings_grad, Size2 embeddings_grad_strides, T* __restrict__ embeddings, Size2 embeddings_strides, size_t rank) { // Indices const size_t tid = threadIdx.x; const size_t bid = blockIdx.x; const size_t nblocks = gridDim.x; constexpr size_t warp_size = 32; // Assign requests to CUDA blocks const size_t gradients_per_block = (num_gradients + nblocks - 1) / nblocks; const size_t i_start = bid * gradients_per_block; const size_t i_end = cuda::min((bid+1) * gradients_per_block, num_gradients); for (size_t i = i_start; i < i_end; ++i) { const auto& m = metadata[i]; if (m.is_active && m.source_rank == rank) { // Update embedding vector with gradient const auto* __restrict__ dw = &embeddings_grad[m.target_index * embeddings_grad_strides[0]]; auto* __restrict__ w = &embeddings[m.source_index * embeddings_strides[0]]; for (size_t k = tid; k < embedding_dim; k += warp_size) { cuda::atomic_add(&w[k], -learning_rate * dw[k]); } } } } } // namespace <anon> template <typename TensorDataType, data_layout Layout, El::Device Device> void dist_embedding_layer<TensorDataType,Layout,Device>::apply_sparse_sgd_step( size_t num_gradients, LocalMat& local_embeddings) { // GPU objects auto&& stream = El::GPUManager::Stream(); // Synchronize non-blocking barrier // Note: Make sure gradients have been received. auto& comm = *this->get_comm(); comm.wait(m_nb_barrier_request); // Initialize SHMEM buffer for gradient w.r.t. embeddings LocalMat local_embeddings_grad( m_embedding_dim, num_gradients, m_workspace_buffer, m_embedding_dim); // Sparse SGD on local embeddings const size_t rank = comm.get_rank_in_trainer(); constexpr size_t block_size = 32; const size_t grid_size = num_gradients; launch_cuda_kernel( sgd_kernel<TensorDataType>, grid_size, block_size, 0, stream, m_learning_rate, m_embedding_dim, num_gradients, m_metadata_buffer, local_embeddings_grad.LockedBuffer(), Size2{size_t(local_embeddings_grad.LDim()), 1}, local_embeddings.Buffer(), Size2{size_t(local_embeddings.LDim()), 1}, rank); } // --------------------------------------------- // Explicit template instantiation // --------------------------------------------- /// @todo fp16 template class dist_embedding_layer< float, data_layout::DATA_PARALLEL, El::Device::GPU>; } // namespace lbann #endif // LBANN_HAS_NVSHMEM
afe6f1a9d46e0f40b93968ac20d43b9ccad2f6e9.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <lbann-dev@llnl.gov> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/layers/misc/dist_embedding.hpp" #ifdef LBANN_HAS_NVSHMEM #include "lbann/utils/cuda.hpp" #include "lbann/utils/nvshmem.hpp" namespace lbann { namespace { // Typedefs using Size2 = cuda::array<size_t, 2>; template <typename T> using VectorMetadata = typename dist_embedding_layer<T,data_layout::DATA_PARALLEL,El::Device::GPU>::vector_metadata; /** Copy between two device buffers, using all threads in a warp. */ template <typename T> __device__ __forceinline__ T* memcpy_warp(T* __restrict__ dest, const T* __restrict__ src, size_t n) { constexpr size_t warp_size = 32; for (size_t i = threadIdx.x; i < n; i += warp_size) { dest[i] = src[i]; } __syncwarp(); return dest; } /** See El::AbstractDistMatrix::ColOwner. */ __device__ __forceinline__ size_t distmat_index_owner(size_t global_index, size_t align, size_t stride) { return (global_index + align) % stride; } /** See El::AbstractDistMatrix::GlobalCol. */ __device__ __forceinline__ size_t distmat_global_index(size_t local_index, size_t shift, size_t stride) { return shift + local_index * stride; } /** See El::AbstractDistMatrix::LocalCol. */ __device__ __forceinline__ size_t distmat_local_index(size_t global_index, size_t rank, size_t align, size_t stride) { auto shift = (stride + rank - align) % stride; if (global_index > shift) { return (global_index - shift - 1) / stride + 1; } else { return 0; } } /** Launch a CUDA kernel. * * @todo Check that argument types match kernel signature. */ template <typename Kernel, typename... Args> inline void launch_cuda_kernel( const Kernel& kernel, dim3 grid_dims, dim3 block_dims, size_t shared_mem, cudaStream_t stream, Args... args) { void* arg_list[] = { const_cast<void*>(reinterpret_cast<const void*>(&args))... }; CHECK_CUDA( cudaLaunchKernel( reinterpret_cast<const void*>(&kernel), grid_dims, block_dims, arg_list, shared_mem, stream)); } /** Launch a collective NVSHMEM kernel. * * Needed for device-side NVSHMEM synchronization calls like * nvshmem_wait. If grid_dims is zero, then the NVSHMEM will launch * with the largest available grid. * * @todo Check that argument types match kernel signature. */ template <typename Kernel, typename... Args> inline void launch_nvshmem_collective_kernel( const Kernel& kernel, dim3 grid_dims, dim3 block_dims, size_t shared_mem, cudaStream_t stream, Args... args) { if (grid_dims.x == 0) { grid_dims.y = 0; grid_dims.z = 0; } void* arg_list[] = { const_cast<void*>(reinterpret_cast<const void*>(&args))... }; auto status = nvshmemx_collective_launch( reinterpret_cast<const void*>(&kernel), grid_dims, block_dims, arg_list, shared_mem, stream); if (status != 0) { LBANN_ERROR( "Failed to launch NVSHMEM collective kernel ", "(error ",status,")"); } } } // namespace <anon> // --------------------------------------------- // Life cycle and setup // --------------------------------------------- template <typename TensorDataType, data_layout Layout, El::Device Device> dist_embedding_layer<TensorDataType,Layout,Device>::~dist_embedding_layer() { if (m_embeddings_buffer != nullptr) { nvshmem_free(m_embeddings_buffer); } if (m_workspace_buffer != nullptr) { nvshmem_free(m_workspace_buffer); } if (m_metadata_buffer != nullptr) { nvshmem_free(m_metadata_buffer); } } template <typename TensorDataType, data_layout Layout, El::Device Device> void dist_embedding_layer<TensorDataType,Layout,Device>::attach_embeddings_to_shmem_buffer() { if (m_embeddings_buffer != nullptr || m_embeddings_buffer_size != 0) { LBANN_ERROR("attempted to attach embedding matrix ", "to NVSHMEM buffer multiple times"); } // Embedding weights matrix using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>; auto& embeddings = ValuesGetter::mutable_values(this->get_weights(0)); const auto dist = embeddings.DistData(); if (dist.device != El::Device::GPU) { LBANN_ERROR("attempted to attach non-GPU matrix to NVSHMEM buffer"); } #if 0 // nvshmem_addr_accessible is not supported as of NVSHMEM 1.4 if (nvshmem_addr_accessible(embeddings.LockedBuffer(), nvshmem_my_pe())) { return; } #endif // Calculate size of NVSHMEM buffer const auto col_comm_size = El::mpi::Size(embeddings.ColComm()); const auto row_comm_size = El::mpi::Size(embeddings.RowComm()); const auto height = embeddings.Height(); const auto width = embeddings.Width(); const auto local_height = (height + col_comm_size - 1) / col_comm_size; const auto local_width = (width + row_comm_size - 1) / row_comm_size; m_embeddings_buffer_size = local_height * local_width * sizeof(TensorDataType); if (m_embeddings_buffer_size == 0) { return; } // Allocate NVSHMEM buffer m_embeddings_buffer = nvshmem::malloc<TensorDataType>(m_embeddings_buffer_size); // Attach matrix to NVSHMEM buffer std::unique_ptr<El::AbstractDistMatrix<TensorDataType>> orig_mat( embeddings.Construct(embeddings.Grid(), embeddings.Root())); *orig_mat = std::move(embeddings); embeddings.Empty(); embeddings.AlignWith(dist); dynamic_cast<El::ElementalMatrix<TensorDataType>&>(embeddings).Attach( height, width, *dist.grid, dist.colAlign, dist.rowAlign, m_embeddings_buffer, local_height, dist.root); El::Copy(*orig_mat, embeddings); } // --------------------------------------------- // Forward prop // --------------------------------------------- namespace { /** Request embedding vectors from owner processes. * * Block dimensions: 32 x 1 x 1 * * Grid dimensions: input_dims[1] x input_dims[0] x 1 */ template <typename T> __global__ void request_embeddings_kernel( size_t embedding_dim, Size2 input_dims, const T* __restrict__ input, Size2 input_strides, const T* __restrict__ embeddings, Size2 embeddings_strides, VectorMetadata<T>* __restrict__ metadata, Size2 metadata_strides, T* __restrict__ workspace, Size2 workspace_strides, size_t rank, size_t input_rowshift, size_t input_rowstride, size_t embeddings_rowalign, size_t embeddings_rowstride) { // Indices const size_t bidx = blockIdx.x; const size_t bidy = blockIdx.y; const size_t nblocksx = gridDim.x; const size_t nblocksy = gridDim.y; const size_t i_per_block = (input_dims[1] + nblocksx - 1) / nblocksx; const size_t i_start = bidx * i_per_block; const size_t i_end = cuda::min((bidx+1) * i_per_block, input_dims[1]); for (size_t j = bidy; j < input_dims[0]; j += nblocksy) { for (size_t i = i_start; i < i_end; ++i) { const auto& global_j = distmat_global_index(j, input_rowshift, input_rowstride); // Get embedding vector index const auto& global_index_float = input[i*input_strides[1] + j*input_strides[0]]; const auto& global_index = static_cast<size_t>(cuda::floor(global_index_float)); // Figure out which process owns embedding vector __shared__ unsigned char metadata_shared[sizeof(VectorMetadata<T>)]; auto& m = *reinterpret_cast<VectorMetadata<T>*>(metadata_shared); if (threadIdx.x == 0) { m.source_rank = distmat_index_owner(global_index, embeddings_rowalign, embeddings_rowstride); m.source_index = distmat_local_index(global_index, m.source_rank, embeddings_rowalign, embeddings_rowstride); m.target_rank = rank; m.target_index = i + global_j*input_dims[1]; m.is_active = true; metadata[i*metadata_strides[1] + global_j*metadata_strides[0]] = m; } __syncwarp(); // Get embedding vector from owner process nvshmemx_getmem_nbi_warp( &workspace[m.target_index * workspace_strides[0]], &embeddings[m.source_index * embeddings_strides[0]], embedding_dim*sizeof(T), m.source_rank); } } } /** Copy embedding vectors to output tensor. * * Block dimensions: 32 x 1 x 1 * * Grid dimensions: input_dims[1] x input_dims[0] x 1 */ template <typename T> __global__ void copy_embeddings_kernel( size_t embedding_dim, Size2 input_dims, const VectorMetadata<T>* __restrict__ metadata, Size2 metadata_strides, const T* __restrict__ workspace, Size2 workspace_strides, T* __restrict__ output, Size2 output_strides, size_t input_rowshift, size_t input_rowstride) { // Indices const size_t bidx = blockIdx.x; const size_t bidy = blockIdx.y; const size_t nblocksx = gridDim.x; const size_t nblocksy = gridDim.y; const size_t i_per_block = (input_dims[1] + nblocksx - 1) / nblocksx; const size_t i_start = bidx * i_per_block; const size_t i_end = cuda::min((bidx+1) * i_per_block, input_dims[1]); for (size_t j = bidy; j < input_dims[0]; j += nblocksy) { for (size_t i = i_start; i < i_end; ++i) { const auto& global_j = distmat_global_index(j, input_rowshift, input_rowstride); const auto& m = metadata[i*metadata_strides[1] + global_j*metadata_strides[0]]; memcpy_warp( &output[i*embedding_dim + j*output_strides[0]], &workspace[m.target_index * workspace_strides[0]], embedding_dim); } } } } // namespace <anon> template <typename TensorDataType, data_layout Layout, El::Device Device> void dist_embedding_layer<TensorDataType,Layout,Device>::fp_compute() { // Data matrices // Note: Make sure to get original weight values since they are in // SHMEM buffer. using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>; const auto& embeddings = ValuesGetter::mutable_values(this->get_weights(0)); const auto& input = this->get_prev_activations(); const auto& local_input = dynamic_cast<const LocalMat&>(input.LockedMatrix()); auto& local_output = dynamic_cast<LocalMat&>(this->get_local_activations()); // Dimensions const size_t input_size = this->get_input_size(); const size_t output_size = this->get_output_size(); const size_t mini_batch_size = input.Width(); const size_t local_mini_batch_size = local_input.Width(); // GPU objects auto&& stream = El::GPUManager::Stream(); nvshmem::initialize(); // Barrier to handle gradient checking /// @todo Think of a way to avoid this synchronization if (m_barrier_in_forward_prop) { nvshmemx_barrier_all_on_stream(stream); } // Synchronize non-blocking barrier // Note: Make sure embeddings are up-to-date and NVSHMEM workspaces // are safe to reset. auto& comm = *this->get_comm(); comm.wait(m_nb_barrier_request); // Initialize NVSHMEM buffer for communicating embedding vectors if (m_workspace_buffer_size < output_size * mini_batch_size) { m_workspace_buffer_size = output_size * mini_batch_size; m_workspace_buffer = nvshmem::realloc(m_workspace_buffer, m_workspace_buffer_size); } LocalMat workspace( m_embedding_dim, input_size * mini_batch_size, m_workspace_buffer, m_embedding_dim); // Initialize NVSHMEM buffer for embedding vector metadata if (m_metadata_buffer_size < input_size * mini_batch_size) { m_metadata_buffer_size = input_size * mini_batch_size; m_metadata_buffer = nvshmem::realloc(m_metadata_buffer, m_metadata_buffer_size); } CHECK_CUDA( cudaMemsetAsync( m_metadata_buffer, 0, m_metadata_buffer_size*sizeof(vector_metadata), stream)); // Request embedding vectors from owning processes const size_t rank = comm.get_rank_in_trainer(); if (!local_input.IsEmpty()) { constexpr size_t block_size = 32; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = input_size; grid_dims.y = local_mini_batch_size; launch_cuda_kernel( request_embeddings_kernel<TensorDataType>, grid_dims, block_dims, 0, stream, m_embedding_dim, Size2{local_mini_batch_size, input_size}, local_input.LockedBuffer(), Size2{size_t(local_input.LDim()), 1}, embeddings.LockedBuffer(), Size2{size_t(embeddings.LDim()), 1}, m_metadata_buffer, Size2{input_size, 1}, workspace.Buffer(), Size2{size_t(workspace.LDim()), 1}, size_t(rank), size_t(input.RowShift()), size_t(input.RowStride()), size_t(embeddings.RowAlign()), size_t(embeddings.RowStride())); } nvshmemx_quiet_on_stream(stream); // Copy embedding vectors to output tensor if (!local_output.IsEmpty()) { constexpr size_t block_size = 32; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = input_size; grid_dims.y = local_mini_batch_size; launch_cuda_kernel( copy_embeddings_kernel<TensorDataType>, grid_dims, block_dims, 0, stream, m_embedding_dim, Size2{local_mini_batch_size, input_size}, m_metadata_buffer, Size2{input_size, 1}, workspace.LockedBuffer(), Size2{size_t(workspace.LDim()), 1}, local_output.Buffer(), Size2{size_t(local_output.LDim()), 1}, size_t(input.RowShift()), size_t(input.RowStride())); } // Non-blocking barrier // Note: NVSHMEM workspaces are ready to recieve gradients. nb_barrier(comm, comm.get_trainer_comm(), m_nb_barrier_request); } // --------------------------------------------- // Backprop // --------------------------------------------- namespace { /** Send gradients to owner processes. * * Block dimensions: 32 x 1 x 1 * * Grid dimensions: input_dims[1] x input_dims[0] x 1 */ template <typename T> __global__ void send_gradients_kernel( size_t embedding_dim, Size2 input_dims, const T* __restrict__ output_grad, Size2 output_grad_strides, VectorMetadata<T>* __restrict__ metadata, Size2 metadata_strides, T* __restrict__ workspace, Size2 workspace_strides, size_t input_rowshift, size_t input_rowstride) { // Indices const size_t bidx = blockIdx.x; const size_t bidy = blockIdx.y; const size_t nblocksx = gridDim.x; const size_t nblocksy = gridDim.y; // Assign metadata to CUDA blocks const size_t i_per_block = (input_dims[1] + nblocksx - 1) / nblocksx; const size_t i_start = bidx * i_per_block; const size_t i_end = cuda::min((bidx+1) * i_per_block, input_dims[1]); // Send gradients to owner processes for (size_t j = bidy; j < input_dims[0]; j += nblocksy) { for (size_t i = i_start; i < i_end; ++i) { const auto& global_j = distmat_global_index(j, input_rowshift, input_rowstride); auto& m = metadata[i*metadata_strides[1] + global_j*metadata_strides[0]]; auto* workspace_ptr = &workspace[m.target_index * workspace_strides[0]]; memcpy_warp( workspace_ptr, &output_grad[i*embedding_dim + j*output_grad_strides[0]], embedding_dim); if (m.source_rank != m.target_rank) { nvshmemx_putmem_nbi_warp( workspace_ptr, workspace_ptr, embedding_dim*sizeof(T), m.source_rank); nvshmemx_putmem_nbi_warp( &m, &m, sizeof(VectorMetadata<T>), m.source_rank); } } } } } // namespace <anon> template <typename TensorDataType, data_layout Layout, El::Device Device> void dist_embedding_layer<TensorDataType,Layout,Device>::bp_compute() { // Data matrices const auto& input = this->get_prev_activations(); const auto& local_output_grad = dynamic_cast<const LocalMat&>(this->get_local_prev_error_signals()); // Dimensions const size_t input_size = this->get_input_size(); const size_t mini_batch_size = input.Width(); const size_t local_mini_batch_size = local_output_grad.Width(); // GPU objects auto&& stream = El::GPUManager::Stream(); // Synchronize non-blocking barrier // Note: Make sure NVSHMEM workspaces are ready to recieve gradients. auto& comm = *this->get_comm(); comm.wait(m_nb_barrier_request); // Initialize NVSHMEM buffer for gradient w.r.t. embeddings LocalMat workspace( m_embedding_dim, input_size * mini_batch_size, m_workspace_buffer, m_embedding_dim); // Send gradients to owner processes if (!local_output_grad.IsEmpty()) { constexpr size_t block_size = 32; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = input_size; grid_dims.y = local_mini_batch_size; launch_cuda_kernel( send_gradients_kernel<TensorDataType>, grid_dims, block_dims, 0, stream, m_embedding_dim, Size2{local_mini_batch_size, input_size}, local_output_grad.LockedBuffer(), Size2{size_t(local_output_grad.LDim()), 1}, m_metadata_buffer, Size2{input_size, 1}, workspace.Buffer(), Size2{size_t(workspace.LDim()), 1}, size_t(input.RowShift()), size_t(input.RowStride())); } nvshmemx_quiet_on_stream(stream); // Non-blocking barrier // Note: Gradients have been sent. nb_barrier(comm, comm.get_trainer_comm(), m_nb_barrier_request); // Use dense optimizer if needed if (!m_sparse_sgd) { // Create buffer for dense gradients const auto& embeddings = this->weights_values(0); std::unique_ptr<El::AbstractDistMatrix<TensorDataType>> embeddings_grad( embeddings.Construct(embeddings.Grid(), embeddings.Root())); embeddings_grad->AlignWith(embeddings); El::Zeros(*embeddings_grad, embeddings.Height(), embeddings.Width()); auto& local_embeddings_grad = dynamic_cast<LocalMat&>(embeddings_grad->Matrix()); // Apply SGD step to convert sparse gradients to dense gradients apply_sparse_sgd_step( input_size * mini_batch_size, local_embeddings_grad); // Send dense gradients to dense optimizer auto* opt = this->get_weights(0).get_optimizer(); if (opt != nullptr) { opt->add_to_gradient(*embeddings_grad); } } } // --------------------------------------------- // Sparse SGD // --------------------------------------------- namespace { /** Sparse SGD on local embeddings. * * Block dimensions: 32 x 1 x 1 * * Grid dimensions: num_gradients x 1 x 1 */ template <typename T> __global__ void sgd_kernel( T learning_rate, size_t embedding_dim, size_t num_gradients, const VectorMetadata<T>* __restrict__ metadata, const T* __restrict__ embeddings_grad, Size2 embeddings_grad_strides, T* __restrict__ embeddings, Size2 embeddings_strides, size_t rank) { // Indices const size_t tid = threadIdx.x; const size_t bid = blockIdx.x; const size_t nblocks = gridDim.x; constexpr size_t warp_size = 32; // Assign requests to CUDA blocks const size_t gradients_per_block = (num_gradients + nblocks - 1) / nblocks; const size_t i_start = bid * gradients_per_block; const size_t i_end = cuda::min((bid+1) * gradients_per_block, num_gradients); for (size_t i = i_start; i < i_end; ++i) { const auto& m = metadata[i]; if (m.is_active && m.source_rank == rank) { // Update embedding vector with gradient const auto* __restrict__ dw = &embeddings_grad[m.target_index * embeddings_grad_strides[0]]; auto* __restrict__ w = &embeddings[m.source_index * embeddings_strides[0]]; for (size_t k = tid; k < embedding_dim; k += warp_size) { cuda::atomic_add(&w[k], -learning_rate * dw[k]); } } } } } // namespace <anon> template <typename TensorDataType, data_layout Layout, El::Device Device> void dist_embedding_layer<TensorDataType,Layout,Device>::apply_sparse_sgd_step( size_t num_gradients, LocalMat& local_embeddings) { // GPU objects auto&& stream = El::GPUManager::Stream(); // Synchronize non-blocking barrier // Note: Make sure gradients have been received. auto& comm = *this->get_comm(); comm.wait(m_nb_barrier_request); // Initialize SHMEM buffer for gradient w.r.t. embeddings LocalMat local_embeddings_grad( m_embedding_dim, num_gradients, m_workspace_buffer, m_embedding_dim); // Sparse SGD on local embeddings const size_t rank = comm.get_rank_in_trainer(); constexpr size_t block_size = 32; const size_t grid_size = num_gradients; launch_cuda_kernel( sgd_kernel<TensorDataType>, grid_size, block_size, 0, stream, m_learning_rate, m_embedding_dim, num_gradients, m_metadata_buffer, local_embeddings_grad.LockedBuffer(), Size2{size_t(local_embeddings_grad.LDim()), 1}, local_embeddings.Buffer(), Size2{size_t(local_embeddings.LDim()), 1}, rank); } // --------------------------------------------- // Explicit template instantiation // --------------------------------------------- /// @todo fp16 template class dist_embedding_layer< float, data_layout::DATA_PARALLEL, El::Device::GPU>; } // namespace lbann #endif // LBANN_HAS_NVSHMEM