system
stringclasses
7 values
input
stringlengths
10
94.5k
output
stringlengths
14
55.5k
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/** size of A = 768 size of B = 180 gridDim = 60 blockDim = 256 k= 200000 x = 3 **/ __global__ void CompareAddVectors(const int* A, const int* B, int* C, int x, int k) { int size_A = x*blockDim.x; int B_start_index = (blockIdx.x*gridDim.y + blockIdx.y)*x; int t,i,j,temp; __shared__ int c[3][768]; __shared__ int a[768]; int b_0, b_1, b_2; for (t = 0; t < x; t++) { temp = blockDim.x*t + threadIdx.x; a[temp] = A[temp]; } for (i = 0; i < x; i++) { for (t = 0; t < x; t++) { c[i][blockDim.x*t + threadIdx.x] = 0; } } __syncthreads(); b_0 = B[B_start_index+0]; b_1 = B[B_start_index+1]; b_2 = B[B_start_index+2]; for (t = 0; t < k; t++) { #pragma unroll for (j = 0; j < x; j++) { temp = blockDim.x*j+threadIdx.x; c[0][temp] = max(a[temp]+b_0, c[0][temp]); } #pragma unroll for (j = 0; j < x; j++) { temp = blockDim.x*j+threadIdx.x; c[1][temp] = max(a[temp]+b_1, c[1][temp]); } #pragma unroll for (j = 0; j < x; j++) { temp = blockDim.x*j+threadIdx.x; c[2][temp] = max(a[temp]+b_2, c[2][temp]); } for (i = 0; i < x; i++) { temp = blockDim.x*i+threadIdx.x; a[temp] = max(a[temp]+10, a[temp]); } b_0 = max(b_0, b_0+10); b_1 = max(b_1, b_1+10); b_2 = max(b_2, b_2+10); __syncthreads(); } for (i = 0; i < x; i++) { for (t = 0; t < x; t++) { temp = t*blockDim.x + threadIdx.x; C[(x*blockIdx.x+i)*size_A + temp] = c[i][temp]; } } }
#include <hip/hip_runtime.h> /** size of A = 768 size of B = 180 gridDim = 60 blockDim = 256 k= 200000 x = 3 **/ __global__ void CompareAddVectors(const int* A, const int* B, int* C, int x, int k) { int size_A = x*blockDim.x; int B_start_index = (blockIdx.x*gridDim.y + blockIdx.y)*x; int t,i,j,temp; __shared__ int c[3][768]; __shared__ int a[768]; int b_0, b_1, b_2; for (t = 0; t < x; t++) { temp = blockDim.x*t + threadIdx.x; a[temp] = A[temp]; } for (i = 0; i < x; i++) { for (t = 0; t < x; t++) { c[i][blockDim.x*t + threadIdx.x] = 0; } } __syncthreads(); b_0 = B[B_start_index+0]; b_1 = B[B_start_index+1]; b_2 = B[B_start_index+2]; for (t = 0; t < k; t++) { #pragma unroll for (j = 0; j < x; j++) { temp = blockDim.x*j+threadIdx.x; c[0][temp] = max(a[temp]+b_0, c[0][temp]); } #pragma unroll for (j = 0; j < x; j++) { temp = blockDim.x*j+threadIdx.x; c[1][temp] = max(a[temp]+b_1, c[1][temp]); } #pragma unroll for (j = 0; j < x; j++) { temp = blockDim.x*j+threadIdx.x; c[2][temp] = max(a[temp]+b_2, c[2][temp]); } for (i = 0; i < x; i++) { temp = blockDim.x*i+threadIdx.x; a[temp] = max(a[temp]+10, a[temp]); } b_0 = max(b_0, b_0+10); b_1 = max(b_1, b_1+10); b_2 = max(b_2, b_2+10); __syncthreads(); } for (i = 0; i < x; i++) { for (t = 0; t < x; t++) { temp = t*blockDim.x + threadIdx.x; C[(x*blockIdx.x+i)*size_A + temp] = c[i][temp]; } } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> /** size of A = 768 size of B = 180 gridDim = 60 blockDim = 256 k= 200000 x = 3 **/ __global__ void CompareAddVectors(const int* A, const int* B, int* C, int x, int k) { int size_A = x*blockDim.x; int B_start_index = (blockIdx.x*gridDim.y + blockIdx.y)*x; int t,i,j,temp; __shared__ int c[3][768]; __shared__ int a[768]; int b_0, b_1, b_2; for (t = 0; t < x; t++) { temp = blockDim.x*t + threadIdx.x; a[temp] = A[temp]; } for (i = 0; i < x; i++) { for (t = 0; t < x; t++) { c[i][blockDim.x*t + threadIdx.x] = 0; } } __syncthreads(); b_0 = B[B_start_index+0]; b_1 = B[B_start_index+1]; b_2 = B[B_start_index+2]; for (t = 0; t < k; t++) { #pragma unroll for (j = 0; j < x; j++) { temp = blockDim.x*j+threadIdx.x; c[0][temp] = max(a[temp]+b_0, c[0][temp]); } #pragma unroll for (j = 0; j < x; j++) { temp = blockDim.x*j+threadIdx.x; c[1][temp] = max(a[temp]+b_1, c[1][temp]); } #pragma unroll for (j = 0; j < x; j++) { temp = blockDim.x*j+threadIdx.x; c[2][temp] = max(a[temp]+b_2, c[2][temp]); } for (i = 0; i < x; i++) { temp = blockDim.x*i+threadIdx.x; a[temp] = max(a[temp]+10, a[temp]); } b_0 = max(b_0, b_0+10); b_1 = max(b_1, b_1+10); b_2 = max(b_2, b_2+10); __syncthreads(); } for (i = 0; i < x; i++) { for (t = 0; t < x; t++) { temp = t*blockDim.x + threadIdx.x; C[(x*blockIdx.x+i)*size_A + temp] = c[i][temp]; } } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17CompareAddVectorsPKiS0_Piii .globl _Z17CompareAddVectorsPKiS0_Piii .p2align 8 .type _Z17CompareAddVectorsPKiS0_Piii,@function _Z17CompareAddVectorsPKiS0_Piii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s5, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s8, s2, 0xffff s_cmp_lt_i32 s5, 1 s_cbranch_scc1 .LBB0_3 s_load_b64 s[2:3], s[0:1], 0x0 v_lshl_add_u32 v3, v0, 2, 0x2400 v_mov_b32_e32 v1, v0 s_lshl_b32 s4, s8, 2 s_mov_b32 s6, s5 .LBB0_2: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_ashrrev_i32_e32 v2, 31, v1 s_add_i32 s6, s6, -1 s_cmp_eq_u32 s6, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_lshlrev_b64 v[4:5], 2, v[1:2] v_add_nc_u32_e32 v1, s8, v1 s_waitcnt lgkmcnt(0) v_add_co_u32 v4, vcc_lo, s2, v4 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo global_load_b32 v2, v[4:5], off s_waitcnt vmcnt(0) ds_store_b32 v3, v2 v_add_nc_u32_e32 v3, s4, v3 s_cbranch_scc0 .LBB0_2 .LBB0_3: s_cmp_lt_i32 s5, 1 s_cbranch_scc1 .LBB0_8 v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v0 s_lshl_b32 s2, s8, 2 s_mov_b32 s3, 0 .LBB0_5: s_delay_alu instid0(VALU_DEP_1) v_mov_b32_e32 v3, v1 s_mov_b32 s4, s5 .LBB0_6: ds_store_b32 v3, v2 v_add_nc_u32_e32 v3, s2, v3 s_add_i32 s4, s4, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lg_u32 s4, 0 s_cbranch_scc1 .LBB0_6 v_add_nc_u32_e32 v1, 0xc00, v1 s_add_i32 s3, s3, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lg_u32 s3, s5 s_cbranch_scc1 .LBB0_5 .LBB0_8: s_load_b32 s9, s[0:1], 0x1c s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cmp_lt_i32 s9, 1 s_cbranch_scc1 .LBB0_34 s_clause 0x1 s_load_b32 s4, s[0:1], 0x24 s_load_b64 s[2:3], s[0:1], 0x8 v_lshl_add_u32 v1, v0, 2, 0x2400 s_mov_b32 s13, 0 s_waitcnt lgkmcnt(0) s_mul_i32 s4, s4, s14 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_add_i32 s4, s4, s15 s_mul_i32 s6, s4, s5 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_ashr_i32 s7, s6, 31 s_lshl_b64 s[6:7], s[6:7], 2 s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s2, s2, s6 s_addc_u32 s3, s3, s7 s_cmp_gt_i32 s5, 0 s_clause 0x1 s_load_b64 s[6:7], s[2:3], 0x0 s_load_b32 s10, s[2:3], 0x8 s_cselect_b32 s2, -1, 0 s_and_b32 s11, s5, 7 s_and_b32 s12, s5, 0x7ffffff8 s_cmp_gt_u32 s5, 7 v_cndmask_b32_e64 v2, 0, 1, s2 s_cselect_b32 s3, -1, 0 s_cmp_lg_u32 s11, 0 v_cndmask_b32_e64 v3, 0, 1, s3 s_cselect_b32 s2, -1, 0 s_lshl_b32 s16, s8, 2 v_cndmask_b32_e64 v4, 0, 1, s2 v_cmp_ne_u32_e64 s2, 1, v2 v_cmp_ne_u32_e64 s3, 1, v3 s_lshl_b32 s15, s8, 5 s_add_i32 s17, s16, 0x2400 v_cmp_ne_u32_e64 s4, 1, v4 s_add_i32 s18, s16, 0x1800 s_add_i32 s19, s16, 0xc00 s_branch .LBB0_11 .LBB0_10: s_add_i32 s13, s13, 1 s_waitcnt lgkmcnt(0) s_add_i32 s6, s6, 10 s_add_i32 s7, s7, 10 s_add_i32 s10, s10, 10 s_cmp_eq_u32 s13, s9 s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_34 .LBB0_11: s_and_b32 vcc_lo, exec_lo, s2 s_cbranch_vccnz .LBB0_18 s_and_b32 vcc_lo, exec_lo, s3 s_mov_b32 s20, 0 s_cbranch_vccnz .LBB0_15 v_mov_b32_e32 v2, v1 .LBB0_14: ds_load_b32 v3, v2 v_add_nc_u32_e32 v4, 0xffffdc00, v2 s_add_i32 s20, s20, 8 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_cmp_eq_u32 s12, s20 ds_load_b32 v5, v4 v_add_nc_u32_e32 v6, s16, v4 v_add_nc_u32_e32 v7, s17, v6 ds_load_b32 v8, v7 v_add_nc_u32_e32 v2, s15, v2 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v3, s6, v3 s_delay_alu instid0(VALU_DEP_1) v_max_i32_e32 v3, v3, v5 ds_store_b32 v4, v3 ds_load_2addr_stride64_b32 v[3:4], v6 offset1:36 v_add_nc_u32_e32 v5, s6, v8 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v4, s6, v4 s_delay_alu instid0(VALU_DEP_1) v_max_i32_e32 v3, v4, v3 v_add_nc_u32_e32 v4, 0xffffdc00, v7 ds_store_b32 v6, v3 ds_load_b32 v3, v4 v_add_nc_u32_e32 v6, s17, v4 ds_load_b32 v7, v6 s_waitcnt lgkmcnt(1) v_max_i32_e32 v3, v5, v3 v_add_nc_u32_e32 v5, 0xffffdc00, v6 ds_store_b32 v4, v3 ds_load_b32 v3, v5 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v4, s6, v7 v_add_nc_u32_e32 v6, s17, v5 ds_load_b32 v7, v6 s_waitcnt lgkmcnt(1) v_max_i32_e32 v3, v4, v3 v_add_nc_u32_e32 v4, 0xffffdc00, v6 ds_store_b32 v5, v3 ds_load_b32 v3, v4 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v5, s6, v7 v_add_nc_u32_e32 v6, s17, v4 ds_load_b32 v7, v6 s_waitcnt lgkmcnt(1) v_max_i32_e32 v3, v5, v3 v_add_nc_u32_e32 v5, 0xffffdc00, v6 ds_store_b32 v4, v3 ds_load_b32 v3, v5 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v4, s6, v7 v_add_nc_u32_e32 v6, s17, v5 ds_load_b32 v7, v6 s_waitcnt lgkmcnt(1) v_max_i32_e32 v3, v4, v3 v_add_nc_u32_e32 v4, 0xffffdc00, v6 ds_store_b32 v5, v3 ds_load_b32 v3, v4 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v5, s6, v7 v_add_nc_u32_e32 v6, s17, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_nc_u32_e32 v7, 0xffffdc00, v6 s_waitcnt lgkmcnt(0) v_max_i32_e32 v3, v5, v3 ds_load_b32 v5, v6 ds_store_b32 v4, v3 ds_load_b32 v3, v7 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v4, s6, v5 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) v_max_i32_e32 v3, v4, v3 ds_store_b32 v7, v3 s_cbranch_scc0 .LBB0_14 .LBB0_15: s_and_b32 vcc_lo, exec_lo, s4 s_cbranch_vccnz .LBB0_18 s_mul_i32 s20, s8, s20 s_delay_alu instid0(SALU_CYCLE_1) v_add_lshl_u32 v2, v0, s20, 2 s_mov_b32 s20, s11 .LBB0_17: ds_load_2addr_stride64_b32 v[3:4], v2 offset1:36 s_add_i32 s20, s20, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_cmp_lg_u32 s20, 0 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v4, s6, v4 v_max_i32_e32 v3, v4, v3 ds_store_b32 v2, v3 v_add_nc_u32_e32 v2, s16, v2 s_cbranch_scc1 .LBB0_17 .LBB0_18: s_and_b32 vcc_lo, exec_lo, s2 s_cbranch_vccnz .LBB0_25 s_and_b32 vcc_lo, exec_lo, s3 s_mov_b32 s20, 0 s_cbranch_vccnz .LBB0_22 v_mov_b32_e32 v2, v1 .LBB0_21: ds_load_b32 v3, v2 v_add_nc_u32_e32 v4, 0xffffe800, v2 s_add_i32 s20, s20, 8 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_cmp_eq_u32 s12, s20 ds_load_b32 v5, v4 v_add_nc_u32_e32 v6, s16, v4 v_add_nc_u32_e32 v7, s18, v6 ds_load_b32 v8, v7 v_add_nc_u32_e32 v2, s15, v2 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v3, s7, v3 s_delay_alu instid0(VALU_DEP_1) v_max_i32_e32 v3, v3, v5 ds_store_b32 v4, v3 ds_load_2addr_stride64_b32 v[3:4], v6 offset1:24 v_add_nc_u32_e32 v5, s7, v8 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v4, s7, v4 s_delay_alu instid0(VALU_DEP_1) v_max_i32_e32 v3, v4, v3 v_add_nc_u32_e32 v4, 0xffffe800, v7 ds_store_b32 v6, v3 ds_load_b32 v3, v4 v_add_nc_u32_e32 v6, s18, v4 ds_load_b32 v7, v6 s_waitcnt lgkmcnt(1) v_max_i32_e32 v3, v5, v3 v_add_nc_u32_e32 v5, 0xffffe800, v6 ds_store_b32 v4, v3 ds_load_b32 v3, v5 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v4, s7, v7 v_add_nc_u32_e32 v6, s18, v5 ds_load_b32 v7, v6 s_waitcnt lgkmcnt(1) v_max_i32_e32 v3, v4, v3 v_add_nc_u32_e32 v4, 0xffffe800, v6 ds_store_b32 v5, v3 ds_load_b32 v3, v4 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v5, s7, v7 v_add_nc_u32_e32 v6, s18, v4 ds_load_b32 v7, v6 s_waitcnt lgkmcnt(1) v_max_i32_e32 v3, v5, v3 v_add_nc_u32_e32 v5, 0xffffe800, v6 ds_store_b32 v4, v3 ds_load_b32 v3, v5 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v4, s7, v7 v_add_nc_u32_e32 v6, s18, v5 ds_load_b32 v7, v6 s_waitcnt lgkmcnt(1) v_max_i32_e32 v3, v4, v3 v_add_nc_u32_e32 v4, 0xffffe800, v6 ds_store_b32 v5, v3 ds_load_b32 v3, v4 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v5, s7, v7 v_add_nc_u32_e32 v6, s18, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_nc_u32_e32 v7, 0xffffe800, v6 s_waitcnt lgkmcnt(0) v_max_i32_e32 v3, v5, v3 ds_load_b32 v5, v6 ds_store_b32 v4, v3 ds_load_b32 v3, v7 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v4, s7, v5 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) v_max_i32_e32 v3, v4, v3 ds_store_b32 v7, v3 s_cbranch_scc0 .LBB0_21 .LBB0_22: s_and_b32 vcc_lo, exec_lo, s4 s_cbranch_vccnz .LBB0_25 v_mad_u64_u32 v[2:3], null, s8, s20, v[0:1] s_mov_b32 s20, s11 s_delay_alu instid0(VALU_DEP_1) v_lshl_add_u32 v2, v2, 2, 0xc00 .LBB0_24: ds_load_2addr_stride64_b32 v[3:4], v2 offset1:24 s_add_i32 s20, s20, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_cmp_lg_u32 s20, 0 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v4, s7, v4 v_max_i32_e32 v3, v4, v3 ds_store_b32 v2, v3 v_add_nc_u32_e32 v2, s16, v2 s_cbranch_scc1 .LBB0_24 .LBB0_25: s_and_b32 vcc_lo, exec_lo, s2 s_cbranch_vccnz .LBB0_32 s_and_b32 vcc_lo, exec_lo, s3 s_mov_b32 s20, 0 s_cbranch_vccnz .LBB0_29 v_mov_b32_e32 v2, v1 .LBB0_28: ds_load_b32 v3, v2 v_add_nc_u32_e32 v4, 0xfffff400, v2 s_add_i32 s20, s20, 8 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_cmp_eq_u32 s12, s20 ds_load_b32 v5, v4 v_add_nc_u32_e32 v6, s16, v4 v_add_nc_u32_e32 v7, s19, v6 ds_load_b32 v8, v7 v_add_nc_u32_e32 v2, s15, v2 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v3, s10, v3 s_delay_alu instid0(VALU_DEP_1) v_max_i32_e32 v3, v3, v5 ds_store_b32 v4, v3 ds_load_2addr_stride64_b32 v[3:4], v6 offset1:12 v_add_nc_u32_e32 v5, s10, v8 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v4, s10, v4 s_delay_alu instid0(VALU_DEP_1) v_max_i32_e32 v3, v4, v3 v_add_nc_u32_e32 v4, 0xfffff400, v7 ds_store_b32 v6, v3 ds_load_b32 v3, v4 v_add_nc_u32_e32 v6, s19, v4 ds_load_b32 v7, v6 s_waitcnt lgkmcnt(1) v_max_i32_e32 v3, v5, v3 v_add_nc_u32_e32 v5, 0xfffff400, v6 ds_store_b32 v4, v3 ds_load_b32 v3, v5 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v4, s10, v7 v_add_nc_u32_e32 v6, s19, v5 ds_load_b32 v7, v6 s_waitcnt lgkmcnt(1) v_max_i32_e32 v3, v4, v3 v_add_nc_u32_e32 v4, 0xfffff400, v6 ds_store_b32 v5, v3 ds_load_b32 v3, v4 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v5, s10, v7 v_add_nc_u32_e32 v6, s19, v4 ds_load_b32 v7, v6 s_waitcnt lgkmcnt(1) v_max_i32_e32 v3, v5, v3 v_add_nc_u32_e32 v5, 0xfffff400, v6 ds_store_b32 v4, v3 ds_load_b32 v3, v5 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v4, s10, v7 v_add_nc_u32_e32 v6, s19, v5 ds_load_b32 v7, v6 s_waitcnt lgkmcnt(1) v_max_i32_e32 v3, v4, v3 v_add_nc_u32_e32 v4, 0xfffff400, v6 ds_store_b32 v5, v3 ds_load_b32 v3, v4 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v5, s10, v7 v_add_nc_u32_e32 v6, s19, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_nc_u32_e32 v7, 0xfffff400, v6 s_waitcnt lgkmcnt(0) v_max_i32_e32 v3, v5, v3 ds_load_b32 v5, v6 ds_store_b32 v4, v3 ds_load_b32 v3, v7 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v4, s10, v5 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) v_max_i32_e32 v3, v4, v3 ds_store_b32 v7, v3 s_cbranch_scc0 .LBB0_28 .LBB0_29: s_and_b32 vcc_lo, exec_lo, s4 s_cbranch_vccnz .LBB0_32 v_mad_u64_u32 v[2:3], null, s8, s20, v[0:1] s_mov_b32 s20, s11 s_delay_alu instid0(VALU_DEP_1) v_lshl_add_u32 v2, v2, 2, 0x1800 .LBB0_31: ds_load_2addr_stride64_b32 v[3:4], v2 offset1:12 s_add_i32 s20, s20, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_cmp_lg_u32 s20, 0 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v4, s10, v4 v_max_i32_e32 v3, v4, v3 ds_store_b32 v2, v3 v_add_nc_u32_e32 v2, s16, v2 s_cbranch_scc1 .LBB0_31 .LBB0_32: v_mov_b32_e32 v2, v1 s_and_b32 vcc_lo, exec_lo, s2 s_mov_b32 s20, s5 s_cbranch_vccnz .LBB0_10 .LBB0_33: ds_load_b32 v3, v2 s_add_i32 s20, s20, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lg_u32 s20, 0 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v3, 10, v3 ds_store_b32 v2, v3 v_add_nc_u32_e32 v2, s16, v2 s_cbranch_scc1 .LBB0_33 s_branch .LBB0_10 .LBB0_34: s_cmp_lt_i32 s5, 1 s_cbranch_scc1 .LBB0_39 s_load_b64 s[0:1], s[0:1], 0x10 s_mul_i32 s2, s5, s5 s_mul_i32 s3, s5, s8 s_mul_i32 s14, s14, s2 v_mov_b32_e32 v3, 0 v_mad_u64_u32 v[1:2], null, s14, s8, v[0:1] v_lshlrev_b32_e32 v0, 2, v0 s_lshl_b32 s2, s8, 2 s_mov_b32 s4, 0 .p2align 6 .LBB0_36: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_mov_b32_e32 v2, v1 v_mov_b32_e32 v4, v0 s_mov_b32 s6, s5 .LBB0_37: ds_load_b32 v7, v4 v_lshlrev_b64 v[5:6], 2, v[2:3] v_add_nc_u32_e32 v4, s2, v4 v_add_nc_u32_e32 v2, s8, v2 s_add_i32 s6, s6, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lg_u32 s6, 0 s_waitcnt lgkmcnt(0) v_add_co_u32 v5, vcc_lo, s0, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo global_store_b32 v[5:6], v7, off s_cbranch_scc1 .LBB0_37 v_add_nc_u32_e32 v0, 0xc00, v0 v_add_nc_u32_e32 v1, s3, v1 s_add_i32 s4, s4, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lg_u32 s4, s5 s_cbranch_scc1 .LBB0_36 .LBB0_39: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17CompareAddVectorsPKiS0_Piii .amdhsa_group_segment_fixed_size 12288 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 21 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17CompareAddVectorsPKiS0_Piii, .Lfunc_end0-_Z17CompareAddVectorsPKiS0_Piii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 12288 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17CompareAddVectorsPKiS0_Piii .private_segment_fixed_size: 0 .sgpr_count: 23 .sgpr_spill_count: 0 .symbol: _Z17CompareAddVectorsPKiS0_Piii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> /** size of A = 768 size of B = 180 gridDim = 60 blockDim = 256 k= 200000 x = 3 **/ __global__ void CompareAddVectors(const int* A, const int* B, int* C, int x, int k) { int size_A = x*blockDim.x; int B_start_index = (blockIdx.x*gridDim.y + blockIdx.y)*x; int t,i,j,temp; __shared__ int c[3][768]; __shared__ int a[768]; int b_0, b_1, b_2; for (t = 0; t < x; t++) { temp = blockDim.x*t + threadIdx.x; a[temp] = A[temp]; } for (i = 0; i < x; i++) { for (t = 0; t < x; t++) { c[i][blockDim.x*t + threadIdx.x] = 0; } } __syncthreads(); b_0 = B[B_start_index+0]; b_1 = B[B_start_index+1]; b_2 = B[B_start_index+2]; for (t = 0; t < k; t++) { #pragma unroll for (j = 0; j < x; j++) { temp = blockDim.x*j+threadIdx.x; c[0][temp] = max(a[temp]+b_0, c[0][temp]); } #pragma unroll for (j = 0; j < x; j++) { temp = blockDim.x*j+threadIdx.x; c[1][temp] = max(a[temp]+b_1, c[1][temp]); } #pragma unroll for (j = 0; j < x; j++) { temp = blockDim.x*j+threadIdx.x; c[2][temp] = max(a[temp]+b_2, c[2][temp]); } for (i = 0; i < x; i++) { temp = blockDim.x*i+threadIdx.x; a[temp] = max(a[temp]+10, a[temp]); } b_0 = max(b_0, b_0+10); b_1 = max(b_1, b_1+10); b_2 = max(b_2, b_2+10); __syncthreads(); } for (i = 0; i < x; i++) { for (t = 0; t < x; t++) { temp = t*blockDim.x + threadIdx.x; C[(x*blockIdx.x+i)*size_A + temp] = c[i][temp]; } } }
.text .file "intKernelMaxSharedAll.hip" .globl _Z32__device_stub__CompareAddVectorsPKiS0_Piii # -- Begin function _Z32__device_stub__CompareAddVectorsPKiS0_Piii .p2align 4, 0x90 .type _Z32__device_stub__CompareAddVectorsPKiS0_Piii,@function _Z32__device_stub__CompareAddVectorsPKiS0_Piii: # @_Z32__device_stub__CompareAddVectorsPKiS0_Piii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z17CompareAddVectorsPKiS0_Piii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z32__device_stub__CompareAddVectorsPKiS0_Piii, .Lfunc_end0-_Z32__device_stub__CompareAddVectorsPKiS0_Piii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17CompareAddVectorsPKiS0_Piii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z17CompareAddVectorsPKiS0_Piii,@object # @_Z17CompareAddVectorsPKiS0_Piii .section .rodata,"a",@progbits .globl _Z17CompareAddVectorsPKiS0_Piii .p2align 3, 0x0 _Z17CompareAddVectorsPKiS0_Piii: .quad _Z32__device_stub__CompareAddVectorsPKiS0_Piii .size _Z17CompareAddVectorsPKiS0_Piii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z17CompareAddVectorsPKiS0_Piii" .size .L__unnamed_1, 32 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__CompareAddVectorsPKiS0_Piii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z17CompareAddVectorsPKiS0_Piii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0019e7b6_00000000-6_intKernelMaxSharedAll.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z45__device_stub__Z17CompareAddVectorsPKiS0_PiiiPKiS0_Piii .type _Z45__device_stub__Z17CompareAddVectorsPKiS0_PiiiPKiS0_Piii, @function _Z45__device_stub__Z17CompareAddVectorsPKiS0_PiiiPKiS0_Piii: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z17CompareAddVectorsPKiS0_Piii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z45__device_stub__Z17CompareAddVectorsPKiS0_PiiiPKiS0_Piii, .-_Z45__device_stub__Z17CompareAddVectorsPKiS0_PiiiPKiS0_Piii .globl _Z17CompareAddVectorsPKiS0_Piii .type _Z17CompareAddVectorsPKiS0_Piii, @function _Z17CompareAddVectorsPKiS0_Piii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z45__device_stub__Z17CompareAddVectorsPKiS0_PiiiPKiS0_Piii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z17CompareAddVectorsPKiS0_Piii, .-_Z17CompareAddVectorsPKiS0_Piii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z17CompareAddVectorsPKiS0_Piii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z17CompareAddVectorsPKiS0_Piii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "intKernelMaxSharedAll.hip" .globl _Z32__device_stub__CompareAddVectorsPKiS0_Piii # -- Begin function _Z32__device_stub__CompareAddVectorsPKiS0_Piii .p2align 4, 0x90 .type _Z32__device_stub__CompareAddVectorsPKiS0_Piii,@function _Z32__device_stub__CompareAddVectorsPKiS0_Piii: # @_Z32__device_stub__CompareAddVectorsPKiS0_Piii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z17CompareAddVectorsPKiS0_Piii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z32__device_stub__CompareAddVectorsPKiS0_Piii, .Lfunc_end0-_Z32__device_stub__CompareAddVectorsPKiS0_Piii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17CompareAddVectorsPKiS0_Piii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z17CompareAddVectorsPKiS0_Piii,@object # @_Z17CompareAddVectorsPKiS0_Piii .section .rodata,"a",@progbits .globl _Z17CompareAddVectorsPKiS0_Piii .p2align 3, 0x0 _Z17CompareAddVectorsPKiS0_Piii: .quad _Z32__device_stub__CompareAddVectorsPKiS0_Piii .size _Z17CompareAddVectorsPKiS0_Piii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z17CompareAddVectorsPKiS0_Piii" .size .L__unnamed_1, 32 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__CompareAddVectorsPKiS0_Piii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z17CompareAddVectorsPKiS0_Piii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
//Submitted by GAutham M 15co118 and yashwanth 15co154 #include<stdio.h> #include<stdlib.h> #include<cuda.h> #include <time.h> __global__ void func(float *da_in,float *db_in,float *d_out) { int idx = blockIdx.x*100 + threadIdx.x; d_out[idx] = da_in[idx] + db_in[idx]; } int main() { const int array_size = 16000; const int array_bytes = array_size* sizeof(float); float a_in[array_size],b_in[array_size]; for(int i=0;i<array_size;i++) { a_in[i] = float(i); b_in[i]=float(i); } /*for(int i=0;i<array_size;i++) { b_in[i]=rand()%16000; }*/ float h_out[array_size]; float *da_in; float *db_in; float *d_out; int temp=array_size; int array_bytes1=array_bytes; time_t t,t1; srand((unsigned) time(&t)); t1=clock(); while(temp>1) { //printf("abc"); if((temp)%2==1) { a_in[temp]=0; //printf("con fail\n"); temp++; array_bytes1+=8; } temp=temp/2; array_bytes1/=2; cudaMalloc((void **)&da_in,array_bytes1); cudaMalloc((void **)&db_in,array_bytes1); cudaMalloc((void **)&d_out,array_bytes1); cudaMemcpy(da_in,a_in,array_bytes1,cudaMemcpyHostToDevice); cudaMemcpy(db_in,a_in+(temp),array_bytes1,cudaMemcpyHostToDevice); //kernel func<<<dim3(160,1,1),dim3(100,1,1)>>>(da_in,db_in,d_out); //copying back cudaMemcpy(h_out,d_out,array_bytes1,cudaMemcpyDeviceToHost); for(int i=0;i<temp;i++) { // a_in[i]=h_out[i]; // printf("%d=%f",i+1,h_out[i]); // printf(((i%4)!=3)? "\t":"\n"); a_in[i]=h_out[i]; } cudaFree(da_in); cudaFree(d_out); cudaFree(db_in); //printf("\n"); } t1=clock()-t1; double time_taken = ((double)t1)/CLOCKS_PER_SEC; printf("parallel execution gave answer as%f- time taken as %f\n",a_in[0],time_taken); }
code for sm_80 Function : _Z4funcPfS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e220000002500 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R6, R6, 0x64, R3 ; /* 0x0000006406067824 */ /* 0x001fca00078e0203 */ /*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fc800078e0207 */ /*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x0c0fe400078e0207 */ /*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea2000c1e1900 */ /*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fc800078e0207 */ /*00b0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */ /* 0x004fca0000000000 */ /*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
//Submitted by GAutham M 15co118 and yashwanth 15co154 #include<stdio.h> #include<stdlib.h> #include<cuda.h> #include <time.h> __global__ void func(float *da_in,float *db_in,float *d_out) { int idx = blockIdx.x*100 + threadIdx.x; d_out[idx] = da_in[idx] + db_in[idx]; } int main() { const int array_size = 16000; const int array_bytes = array_size* sizeof(float); float a_in[array_size],b_in[array_size]; for(int i=0;i<array_size;i++) { a_in[i] = float(i); b_in[i]=float(i); } /*for(int i=0;i<array_size;i++) { b_in[i]=rand()%16000; }*/ float h_out[array_size]; float *da_in; float *db_in; float *d_out; int temp=array_size; int array_bytes1=array_bytes; time_t t,t1; srand((unsigned) time(&t)); t1=clock(); while(temp>1) { //printf("abc"); if((temp)%2==1) { a_in[temp]=0; //printf("con fail\n"); temp++; array_bytes1+=8; } temp=temp/2; array_bytes1/=2; cudaMalloc((void **)&da_in,array_bytes1); cudaMalloc((void **)&db_in,array_bytes1); cudaMalloc((void **)&d_out,array_bytes1); cudaMemcpy(da_in,a_in,array_bytes1,cudaMemcpyHostToDevice); cudaMemcpy(db_in,a_in+(temp),array_bytes1,cudaMemcpyHostToDevice); //kernel func<<<dim3(160,1,1),dim3(100,1,1)>>>(da_in,db_in,d_out); //copying back cudaMemcpy(h_out,d_out,array_bytes1,cudaMemcpyDeviceToHost); for(int i=0;i<temp;i++) { // a_in[i]=h_out[i]; // printf("%d=%f",i+1,h_out[i]); // printf(((i%4)!=3)? "\t":"\n"); a_in[i]=h_out[i]; } cudaFree(da_in); cudaFree(d_out); cudaFree(db_in); //printf("\n"); } t1=clock()-t1; double time_taken = ((double)t1)/CLOCKS_PER_SEC; printf("parallel execution gave answer as%f- time taken as %f\n",a_in[0],time_taken); }
.file "tmpxft_000350f6_00000000-6_q2.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z27__device_stub__Z4funcPfS_S_PfS_S_ .type _Z27__device_stub__Z4funcPfS_S_PfS_S_, @function _Z27__device_stub__Z4funcPfS_S_PfS_S_: .LFB2082: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z4funcPfS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z27__device_stub__Z4funcPfS_S_PfS_S_, .-_Z27__device_stub__Z4funcPfS_S_PfS_S_ .globl _Z4funcPfS_S_ .type _Z4funcPfS_S_, @function _Z4funcPfS_S_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z27__device_stub__Z4funcPfS_S_PfS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z4funcPfS_S_, .-_Z4funcPfS_S_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string "parallel execution gave answer as%f- time taken as %f\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 leaq -126976(%rsp), %r11 .cfi_def_cfa 11, 127032 .LPSRL0: subq $4096, %rsp orq $0, (%rsp) cmpq %r11, %rsp jne .LPSRL0 .cfi_def_cfa_register 7 subq $1144, %rsp .cfi_def_cfa_offset 128176 movq %fs:40, %rax movq %rax, 128104(%rsp) xorl %eax, %eax .L12: pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 movss %xmm0, 96(%rsp,%rax,4) addq $1, %rax cmpq $16000, %rax jne .L12 leaq 64(%rsp), %rdi call time@PLT movl %eax, %edi call srand@PLT call clock@PLT movq %rax, %rbp movl $64000, 4(%rsp) movl $16000, %r14d leaq 40(%rsp), %rax movq %rax, 8(%rsp) leaq 48(%rsp), %rax movq %rax, 16(%rsp) leaq 96(%rsp), %r12 leaq 64096(%rsp), %r13 movq %rbp, 24(%rsp) jmp .L16 .L13: movl %r15d, %ebx shrl $31, %ebx addl %r15d, %ebx sarl %ebx movl %ebx, %r14d movl 4(%rsp), %eax movl %eax, %ebp shrl $31, %ebp addl %eax, %ebp sarl %ebp movl %ebp, 4(%rsp) movslq %ebp, %rbp movq %rbp, %rsi movq 8(%rsp), %rdi call cudaMalloc@PLT movq %rbp, %rsi movq 16(%rsp), %rdi call cudaMalloc@PLT leaq 56(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbp, %rdx movq %r12, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movslq %ebx, %rbx salq $2, %rbx leaq (%r12,%rbx), %rsi movl $1, %ecx movq %rbp, %rdx movq 48(%rsp), %rdi call cudaMemcpy@PLT movl $100, 84(%rsp) movl $1, 88(%rsp) movl $1, 92(%rsp) movl $160, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $0, %r9d movl $0, %r8d movq 84(%rsp), %rdx movl $1, %ecx movq 72(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L23 .L14: movl $2, %ecx movq %rbp, %rdx movq 56(%rsp), %rsi movq %r13, %rdi call cudaMemcpy@PLT movl $0, %eax .L15: movss (%rax,%r13), %xmm0 movss %xmm0, (%rax,%r12) addq $4, %rax cmpq %rax, %rbx jne .L15 movq 40(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT cmpl $3, %r15d jle .L24 .L16: movl %r14d, %edx shrl $31, %edx leal (%r14,%rdx), %eax andl $1, %eax subl %edx, %eax movl %r14d, %r15d cmpl $1, %eax jne .L13 movslq %r14d, %rax movl $0x00000000, 96(%rsp,%rax,4) leal 1(%r14), %r15d addl $8, 4(%rsp) jmp .L13 .L23: movq 56(%rsp), %rdx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z27__device_stub__Z4funcPfS_S_PfS_S_ jmp .L14 .L24: movq 24(%rsp), %rbp call clock@PLT subq %rbp, %rax pxor %xmm1, %xmm1 cvtsi2sdq %rax, %xmm1 pxor %xmm0, %xmm0 cvtss2sd 96(%rsp), %xmm0 divsd .LC1(%rip), %xmm1 leaq .LC2(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT movq 128104(%rsp), %rax subq %fs:40, %rax jne .L25 movl $0, %eax addq $128120, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L25: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC3: .string "_Z4funcPfS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z4funcPfS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
//Submitted by GAutham M 15co118 and yashwanth 15co154 #include<stdio.h> #include<stdlib.h> #include<cuda.h> #include <time.h> __global__ void func(float *da_in,float *db_in,float *d_out) { int idx = blockIdx.x*100 + threadIdx.x; d_out[idx] = da_in[idx] + db_in[idx]; } int main() { const int array_size = 16000; const int array_bytes = array_size* sizeof(float); float a_in[array_size],b_in[array_size]; for(int i=0;i<array_size;i++) { a_in[i] = float(i); b_in[i]=float(i); } /*for(int i=0;i<array_size;i++) { b_in[i]=rand()%16000; }*/ float h_out[array_size]; float *da_in; float *db_in; float *d_out; int temp=array_size; int array_bytes1=array_bytes; time_t t,t1; srand((unsigned) time(&t)); t1=clock(); while(temp>1) { //printf("abc"); if((temp)%2==1) { a_in[temp]=0; //printf("con fail\n"); temp++; array_bytes1+=8; } temp=temp/2; array_bytes1/=2; cudaMalloc((void **)&da_in,array_bytes1); cudaMalloc((void **)&db_in,array_bytes1); cudaMalloc((void **)&d_out,array_bytes1); cudaMemcpy(da_in,a_in,array_bytes1,cudaMemcpyHostToDevice); cudaMemcpy(db_in,a_in+(temp),array_bytes1,cudaMemcpyHostToDevice); //kernel func<<<dim3(160,1,1),dim3(100,1,1)>>>(da_in,db_in,d_out); //copying back cudaMemcpy(h_out,d_out,array_bytes1,cudaMemcpyDeviceToHost); for(int i=0;i<temp;i++) { // a_in[i]=h_out[i]; // printf("%d=%f",i+1,h_out[i]); // printf(((i%4)!=3)? "\t":"\n"); a_in[i]=h_out[i]; } cudaFree(da_in); cudaFree(d_out); cudaFree(db_in); //printf("\n"); } t1=clock()-t1; double time_taken = ((double)t1)/CLOCKS_PER_SEC; printf("parallel execution gave answer as%f- time taken as %f\n",a_in[0],time_taken); }
//Submitted by GAutham M 15co118 and yashwanth 15co154 #include<stdio.h> #include<stdlib.h> #include<hip/hip_runtime.h> #include <time.h> __global__ void func(float *da_in,float *db_in,float *d_out) { int idx = blockIdx.x*100 + threadIdx.x; d_out[idx] = da_in[idx] + db_in[idx]; } int main() { const int array_size = 16000; const int array_bytes = array_size* sizeof(float); float a_in[array_size],b_in[array_size]; for(int i=0;i<array_size;i++) { a_in[i] = float(i); b_in[i]=float(i); } /*for(int i=0;i<array_size;i++) { b_in[i]=rand()%16000; }*/ float h_out[array_size]; float *da_in; float *db_in; float *d_out; int temp=array_size; int array_bytes1=array_bytes; time_t t,t1; srand((unsigned) time(&t)); t1=clock(); while(temp>1) { //printf("abc"); if((temp)%2==1) { a_in[temp]=0; //printf("con fail\n"); temp++; array_bytes1+=8; } temp=temp/2; array_bytes1/=2; hipMalloc((void **)&da_in,array_bytes1); hipMalloc((void **)&db_in,array_bytes1); hipMalloc((void **)&d_out,array_bytes1); hipMemcpy(da_in,a_in,array_bytes1,hipMemcpyHostToDevice); hipMemcpy(db_in,a_in+(temp),array_bytes1,hipMemcpyHostToDevice); //kernel func<<<dim3(160,1,1),dim3(100,1,1)>>>(da_in,db_in,d_out); //copying back hipMemcpy(h_out,d_out,array_bytes1,hipMemcpyDeviceToHost); for(int i=0;i<temp;i++) { // a_in[i]=h_out[i]; // printf("%d=%f",i+1,h_out[i]); // printf(((i%4)!=3)? "\t":"\n"); a_in[i]=h_out[i]; } hipFree(da_in); hipFree(d_out); hipFree(db_in); //printf("\n"); } t1=clock()-t1; double time_taken = ((double)t1)/CLOCKS_PER_SEC; printf("parallel execution gave answer as%f- time taken as %f\n",a_in[0],time_taken); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
//Submitted by GAutham M 15co118 and yashwanth 15co154 #include<stdio.h> #include<stdlib.h> #include<hip/hip_runtime.h> #include <time.h> __global__ void func(float *da_in,float *db_in,float *d_out) { int idx = blockIdx.x*100 + threadIdx.x; d_out[idx] = da_in[idx] + db_in[idx]; } int main() { const int array_size = 16000; const int array_bytes = array_size* sizeof(float); float a_in[array_size],b_in[array_size]; for(int i=0;i<array_size;i++) { a_in[i] = float(i); b_in[i]=float(i); } /*for(int i=0;i<array_size;i++) { b_in[i]=rand()%16000; }*/ float h_out[array_size]; float *da_in; float *db_in; float *d_out; int temp=array_size; int array_bytes1=array_bytes; time_t t,t1; srand((unsigned) time(&t)); t1=clock(); while(temp>1) { //printf("abc"); if((temp)%2==1) { a_in[temp]=0; //printf("con fail\n"); temp++; array_bytes1+=8; } temp=temp/2; array_bytes1/=2; hipMalloc((void **)&da_in,array_bytes1); hipMalloc((void **)&db_in,array_bytes1); hipMalloc((void **)&d_out,array_bytes1); hipMemcpy(da_in,a_in,array_bytes1,hipMemcpyHostToDevice); hipMemcpy(db_in,a_in+(temp),array_bytes1,hipMemcpyHostToDevice); //kernel func<<<dim3(160,1,1),dim3(100,1,1)>>>(da_in,db_in,d_out); //copying back hipMemcpy(h_out,d_out,array_bytes1,hipMemcpyDeviceToHost); for(int i=0;i<temp;i++) { // a_in[i]=h_out[i]; // printf("%d=%f",i+1,h_out[i]); // printf(((i%4)!=3)? "\t":"\n"); a_in[i]=h_out[i]; } hipFree(da_in); hipFree(d_out); hipFree(db_in); //printf("\n"); } t1=clock()-t1; double time_taken = ((double)t1)/CLOCKS_PER_SEC; printf("parallel execution gave answer as%f- time taken as %f\n",a_in[0],time_taken); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z4funcPfS_S_ .globl _Z4funcPfS_S_ .p2align 8 .type _Z4funcPfS_S_,@function _Z4funcPfS_S_: s_load_b128 s[4:7], s[0:1], 0x0 v_mad_u64_u32 v[1:2], null, s15, 0x64, v[0:1] s_load_b64 s[0:1], s[0:1], 0x10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) v_add_f32_e32 v2, v2, v3 global_store_b32 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4funcPfS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z4funcPfS_S_, .Lfunc_end0-_Z4funcPfS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4funcPfS_S_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z4funcPfS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
//Submitted by GAutham M 15co118 and yashwanth 15co154 #include<stdio.h> #include<stdlib.h> #include<hip/hip_runtime.h> #include <time.h> __global__ void func(float *da_in,float *db_in,float *d_out) { int idx = blockIdx.x*100 + threadIdx.x; d_out[idx] = da_in[idx] + db_in[idx]; } int main() { const int array_size = 16000; const int array_bytes = array_size* sizeof(float); float a_in[array_size],b_in[array_size]; for(int i=0;i<array_size;i++) { a_in[i] = float(i); b_in[i]=float(i); } /*for(int i=0;i<array_size;i++) { b_in[i]=rand()%16000; }*/ float h_out[array_size]; float *da_in; float *db_in; float *d_out; int temp=array_size; int array_bytes1=array_bytes; time_t t,t1; srand((unsigned) time(&t)); t1=clock(); while(temp>1) { //printf("abc"); if((temp)%2==1) { a_in[temp]=0; //printf("con fail\n"); temp++; array_bytes1+=8; } temp=temp/2; array_bytes1/=2; hipMalloc((void **)&da_in,array_bytes1); hipMalloc((void **)&db_in,array_bytes1); hipMalloc((void **)&d_out,array_bytes1); hipMemcpy(da_in,a_in,array_bytes1,hipMemcpyHostToDevice); hipMemcpy(db_in,a_in+(temp),array_bytes1,hipMemcpyHostToDevice); //kernel func<<<dim3(160,1,1),dim3(100,1,1)>>>(da_in,db_in,d_out); //copying back hipMemcpy(h_out,d_out,array_bytes1,hipMemcpyDeviceToHost); for(int i=0;i<temp;i++) { // a_in[i]=h_out[i]; // printf("%d=%f",i+1,h_out[i]); // printf(((i%4)!=3)? "\t":"\n"); a_in[i]=h_out[i]; } hipFree(da_in); hipFree(d_out); hipFree(db_in); //printf("\n"); } t1=clock()-t1; double time_taken = ((double)t1)/CLOCKS_PER_SEC; printf("parallel execution gave answer as%f- time taken as %f\n",a_in[0],time_taken); }
.text .file "q2.hip" .globl _Z19__device_stub__funcPfS_S_ # -- Begin function _Z19__device_stub__funcPfS_S_ .p2align 4, 0x90 .type _Z19__device_stub__funcPfS_S_,@function _Z19__device_stub__funcPfS_S_: # @_Z19__device_stub__funcPfS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z4funcPfS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z19__device_stub__funcPfS_S_, .Lfunc_end0-_Z19__device_stub__funcPfS_S_ .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI1_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $128152, %rsp # imm = 0x1F498 .cfi_def_cfa_offset 128208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 xorl %eax, %eax .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movss %xmm0, 144(%rsp,%rax,4) incq %rax cmpq $16000, %rax # imm = 0x3E80 jne .LBB1_1 # %bb.2: movabsq $4294967396, %rbx # imm = 0x100000064 leaq 136(%rsp), %rdi callq time movl %eax, %edi callq srand movl $64000, %r12d # imm = 0xFA00 movl $16000, %r13d # imm = 0x3E80 callq clock movq %rax, 32(%rsp) # 8-byte Spill leaq 60(%rbx), %r14 leaq 64144(%rsp), %rbx jmp .LBB1_3 .p2align 4, 0x90 .LBB1_9: # %._crit_edge # in Loop: Header=BB1_3 Depth=1 movq 24(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree cmpl $3, %ebp jle .LBB1_10 .LBB1_3: # =>This Inner Loop Header: Depth=1 testb $1, %r13b je .LBB1_5 # %bb.4: # in Loop: Header=BB1_3 Depth=1 movl %r13d, %eax movl $0, 144(%rsp,%rax,4) incl %r13d addl $8, %r12d .LBB1_5: # in Loop: Header=BB1_3 Depth=1 movl %r13d, %ebp shrl $31, %r13d addl %ebp, %r13d sarl %r13d movl %r12d, %eax shrl $31, %eax addl %eax, %r12d sarl %r12d movslq %r12d, %r15 leaq 24(%rsp), %rdi movq %r15, %rsi callq hipMalloc leaq 16(%rsp), %rdi movq %r15, %rsi callq hipMalloc leaq 8(%rsp), %rdi movq %r15, %rsi callq hipMalloc movq 24(%rsp), %rdi leaq 144(%rsp), %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movslq %r13d, %rax leaq (%rsp,%rax,4), %rsi addq $144, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy movq %r14, %rdi movl $1, %esi movabsq $4294967396, %rdx # imm = 0x100000064 movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_7 # %bb.6: # in Loop: Header=BB1_3 Depth=1 movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d movl $_Z4funcPfS_S_, %edi leaq 112(%rsp), %r9 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_7: # in Loop: Header=BB1_3 Depth=1 movq 8(%rsp), %rsi movq %rbx, %rdi movq %r15, %rdx movl $2, %ecx callq hipMemcpy cmpl $2, %ebp jl .LBB1_9 # %bb.8: # %.lr.ph.preheader # in Loop: Header=BB1_3 Depth=1 leal -1(%r13), %eax leaq 4(,%rax,4), %rdx leaq 144(%rsp), %rdi movq %rbx, %rsi callq memcpy@PLT jmp .LBB1_9 .LBB1_10: callq clock subq 32(%rsp), %rax # 8-byte Folded Reload cvtsi2sd %rax, %xmm1 divsd .LCPI1_0(%rip), %xmm1 movss 144(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $2, %al callq printf xorl %eax, %eax addq $128152, %rsp # imm = 0x1F498 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z4funcPfS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z4funcPfS_S_,@object # @_Z4funcPfS_S_ .section .rodata,"a",@progbits .globl _Z4funcPfS_S_ .p2align 3, 0x0 _Z4funcPfS_S_: .quad _Z19__device_stub__funcPfS_S_ .size _Z4funcPfS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "parallel execution gave answer as%f- time taken as %f\n" .size .L.str, 55 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z4funcPfS_S_" .size .L__unnamed_1, 14 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z19__device_stub__funcPfS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z4funcPfS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z4funcPfS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e220000002500 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R6, R6, 0x64, R3 ; /* 0x0000006406067824 */ /* 0x001fca00078e0203 */ /*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fc800078e0207 */ /*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x0c0fe400078e0207 */ /*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea2000c1e1900 */ /*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fc800078e0207 */ /*00b0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */ /* 0x004fca0000000000 */ /*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z4funcPfS_S_ .globl _Z4funcPfS_S_ .p2align 8 .type _Z4funcPfS_S_,@function _Z4funcPfS_S_: s_load_b128 s[4:7], s[0:1], 0x0 v_mad_u64_u32 v[1:2], null, s15, 0x64, v[0:1] s_load_b64 s[0:1], s[0:1], 0x10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) v_add_f32_e32 v2, v2, v3 global_store_b32 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4funcPfS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z4funcPfS_S_, .Lfunc_end0-_Z4funcPfS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4funcPfS_S_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z4funcPfS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000350f6_00000000-6_q2.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z27__device_stub__Z4funcPfS_S_PfS_S_ .type _Z27__device_stub__Z4funcPfS_S_PfS_S_, @function _Z27__device_stub__Z4funcPfS_S_PfS_S_: .LFB2082: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z4funcPfS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z27__device_stub__Z4funcPfS_S_PfS_S_, .-_Z27__device_stub__Z4funcPfS_S_PfS_S_ .globl _Z4funcPfS_S_ .type _Z4funcPfS_S_, @function _Z4funcPfS_S_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z27__device_stub__Z4funcPfS_S_PfS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z4funcPfS_S_, .-_Z4funcPfS_S_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string "parallel execution gave answer as%f- time taken as %f\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 leaq -126976(%rsp), %r11 .cfi_def_cfa 11, 127032 .LPSRL0: subq $4096, %rsp orq $0, (%rsp) cmpq %r11, %rsp jne .LPSRL0 .cfi_def_cfa_register 7 subq $1144, %rsp .cfi_def_cfa_offset 128176 movq %fs:40, %rax movq %rax, 128104(%rsp) xorl %eax, %eax .L12: pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 movss %xmm0, 96(%rsp,%rax,4) addq $1, %rax cmpq $16000, %rax jne .L12 leaq 64(%rsp), %rdi call time@PLT movl %eax, %edi call srand@PLT call clock@PLT movq %rax, %rbp movl $64000, 4(%rsp) movl $16000, %r14d leaq 40(%rsp), %rax movq %rax, 8(%rsp) leaq 48(%rsp), %rax movq %rax, 16(%rsp) leaq 96(%rsp), %r12 leaq 64096(%rsp), %r13 movq %rbp, 24(%rsp) jmp .L16 .L13: movl %r15d, %ebx shrl $31, %ebx addl %r15d, %ebx sarl %ebx movl %ebx, %r14d movl 4(%rsp), %eax movl %eax, %ebp shrl $31, %ebp addl %eax, %ebp sarl %ebp movl %ebp, 4(%rsp) movslq %ebp, %rbp movq %rbp, %rsi movq 8(%rsp), %rdi call cudaMalloc@PLT movq %rbp, %rsi movq 16(%rsp), %rdi call cudaMalloc@PLT leaq 56(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbp, %rdx movq %r12, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movslq %ebx, %rbx salq $2, %rbx leaq (%r12,%rbx), %rsi movl $1, %ecx movq %rbp, %rdx movq 48(%rsp), %rdi call cudaMemcpy@PLT movl $100, 84(%rsp) movl $1, 88(%rsp) movl $1, 92(%rsp) movl $160, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $0, %r9d movl $0, %r8d movq 84(%rsp), %rdx movl $1, %ecx movq 72(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L23 .L14: movl $2, %ecx movq %rbp, %rdx movq 56(%rsp), %rsi movq %r13, %rdi call cudaMemcpy@PLT movl $0, %eax .L15: movss (%rax,%r13), %xmm0 movss %xmm0, (%rax,%r12) addq $4, %rax cmpq %rax, %rbx jne .L15 movq 40(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT cmpl $3, %r15d jle .L24 .L16: movl %r14d, %edx shrl $31, %edx leal (%r14,%rdx), %eax andl $1, %eax subl %edx, %eax movl %r14d, %r15d cmpl $1, %eax jne .L13 movslq %r14d, %rax movl $0x00000000, 96(%rsp,%rax,4) leal 1(%r14), %r15d addl $8, 4(%rsp) jmp .L13 .L23: movq 56(%rsp), %rdx movq 48(%rsp), %rsi movq 40(%rsp), %rdi call _Z27__device_stub__Z4funcPfS_S_PfS_S_ jmp .L14 .L24: movq 24(%rsp), %rbp call clock@PLT subq %rbp, %rax pxor %xmm1, %xmm1 cvtsi2sdq %rax, %xmm1 pxor %xmm0, %xmm0 cvtss2sd 96(%rsp), %xmm0 divsd .LC1(%rip), %xmm1 leaq .LC2(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT movq 128104(%rsp), %rax subq %fs:40, %rax jne .L25 movl $0, %eax addq $128120, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L25: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC3: .string "_Z4funcPfS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z4funcPfS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1093567616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "q2.hip" .globl _Z19__device_stub__funcPfS_S_ # -- Begin function _Z19__device_stub__funcPfS_S_ .p2align 4, 0x90 .type _Z19__device_stub__funcPfS_S_,@function _Z19__device_stub__funcPfS_S_: # @_Z19__device_stub__funcPfS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z4funcPfS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z19__device_stub__funcPfS_S_, .Lfunc_end0-_Z19__device_stub__funcPfS_S_ .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI1_0: .quad 0x412e848000000000 # double 1.0E+6 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $128152, %rsp # imm = 0x1F498 .cfi_def_cfa_offset 128208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 xorl %eax, %eax .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movss %xmm0, 144(%rsp,%rax,4) incq %rax cmpq $16000, %rax # imm = 0x3E80 jne .LBB1_1 # %bb.2: movabsq $4294967396, %rbx # imm = 0x100000064 leaq 136(%rsp), %rdi callq time movl %eax, %edi callq srand movl $64000, %r12d # imm = 0xFA00 movl $16000, %r13d # imm = 0x3E80 callq clock movq %rax, 32(%rsp) # 8-byte Spill leaq 60(%rbx), %r14 leaq 64144(%rsp), %rbx jmp .LBB1_3 .p2align 4, 0x90 .LBB1_9: # %._crit_edge # in Loop: Header=BB1_3 Depth=1 movq 24(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree cmpl $3, %ebp jle .LBB1_10 .LBB1_3: # =>This Inner Loop Header: Depth=1 testb $1, %r13b je .LBB1_5 # %bb.4: # in Loop: Header=BB1_3 Depth=1 movl %r13d, %eax movl $0, 144(%rsp,%rax,4) incl %r13d addl $8, %r12d .LBB1_5: # in Loop: Header=BB1_3 Depth=1 movl %r13d, %ebp shrl $31, %r13d addl %ebp, %r13d sarl %r13d movl %r12d, %eax shrl $31, %eax addl %eax, %r12d sarl %r12d movslq %r12d, %r15 leaq 24(%rsp), %rdi movq %r15, %rsi callq hipMalloc leaq 16(%rsp), %rdi movq %r15, %rsi callq hipMalloc leaq 8(%rsp), %rdi movq %r15, %rsi callq hipMalloc movq 24(%rsp), %rdi leaq 144(%rsp), %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movslq %r13d, %rax leaq (%rsp,%rax,4), %rsi addq $144, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy movq %r14, %rdi movl $1, %esi movabsq $4294967396, %rdx # imm = 0x100000064 movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_7 # %bb.6: # in Loop: Header=BB1_3 Depth=1 movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d movl $_Z4funcPfS_S_, %edi leaq 112(%rsp), %r9 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_7: # in Loop: Header=BB1_3 Depth=1 movq 8(%rsp), %rsi movq %rbx, %rdi movq %r15, %rdx movl $2, %ecx callq hipMemcpy cmpl $2, %ebp jl .LBB1_9 # %bb.8: # %.lr.ph.preheader # in Loop: Header=BB1_3 Depth=1 leal -1(%r13), %eax leaq 4(,%rax,4), %rdx leaq 144(%rsp), %rdi movq %rbx, %rsi callq memcpy@PLT jmp .LBB1_9 .LBB1_10: callq clock subq 32(%rsp), %rax # 8-byte Folded Reload cvtsi2sd %rax, %xmm1 divsd .LCPI1_0(%rip), %xmm1 movss 144(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $2, %al callq printf xorl %eax, %eax addq $128152, %rsp # imm = 0x1F498 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z4funcPfS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z4funcPfS_S_,@object # @_Z4funcPfS_S_ .section .rodata,"a",@progbits .globl _Z4funcPfS_S_ .p2align 3, 0x0 _Z4funcPfS_S_: .quad _Z19__device_stub__funcPfS_S_ .size _Z4funcPfS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "parallel execution gave answer as%f- time taken as %f\n" .size .L.str, 55 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z4funcPfS_S_" .size .L__unnamed_1, 14 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z19__device_stub__funcPfS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z4funcPfS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <thrust/sort.h> #include <set> using namespace std; int main(int argc, char const *argv[]) { /* code */ string a, b; int n, m; cin>>n>>m; // cin>>n>>m; int *array = new int [2*m]; int *array2 = new int [2*m]; cout<<n<<"\t"<<m<<endl; for (int i = 0; i < m; ++i) { /* code */ int start; int end; cin>>start>>end; array[2*i]=start; array2[2*i]=end; array[2*i+1]=end; array2[2*i+1]=start; // } // for (int i=0; i<2*m; i++){ // cout<<array[i]<<"\t"<<array2[i]<<endl; // } thrust::sort_by_key(array2, array2 + 2*m, array); thrust::sort_by_key(array, array + 2*m, array2); // int prev1 = array[0]; // int prev2 = array2[0]; // // // // cout<<array[0]<<"\t"<<array2[0]<<endl; // // int count = 1; // for (int i=0; i<2*m; i++){ cout<<array[i]<<"\t"<<array2[i]<<endl; } // typedef pair<int, int> pairs; // // pairs temp; // // set<pairs> setTemp; // set<pairs> :: iterator it; // // int count = 0; // // for (int i=0; i<m; i++){ // if (array[i]==array2[i]){ // continue; // } // // if (array[i]<array2[i]){ // temp.first = array[i]; // temp.second = array2[i]; // } // else{ // temp.first = array2[i]; // temp.second = array[i]; // } // // it = setTemp.find(temp); // // if (it==setTemp.end()){ // setTemp.insert(temp); // cout<<array[i]<<"\t"<<array2[i]<<endl; // count++; // } // // } //cout<<count<<endl; return 0; }
code for sm_80 Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <thrust/sort.h> #include <set> using namespace std; int main(int argc, char const *argv[]) { /* code */ string a, b; int n, m; cin>>n>>m; // cin>>n>>m; int *array = new int [2*m]; int *array2 = new int [2*m]; cout<<n<<"\t"<<m<<endl; for (int i = 0; i < m; ++i) { /* code */ int start; int end; cin>>start>>end; array[2*i]=start; array2[2*i]=end; array[2*i+1]=end; array2[2*i+1]=start; // } // for (int i=0; i<2*m; i++){ // cout<<array[i]<<"\t"<<array2[i]<<endl; // } thrust::sort_by_key(array2, array2 + 2*m, array); thrust::sort_by_key(array, array + 2*m, array2); // int prev1 = array[0]; // int prev2 = array2[0]; // // // // cout<<array[0]<<"\t"<<array2[0]<<endl; // // int count = 1; // for (int i=0; i<2*m; i++){ cout<<array[i]<<"\t"<<array2[i]<<endl; } // typedef pair<int, int> pairs; // // pairs temp; // // set<pairs> setTemp; // set<pairs> :: iterator it; // // int count = 0; // // for (int i=0; i<m; i++){ // if (array[i]==array2[i]){ // continue; // } // // if (array[i]<array2[i]){ // temp.first = array[i]; // temp.second = array2[i]; // } // else{ // temp.first = array2[i]; // temp.second = array[i]; // } // // it = setTemp.find(temp); // // if (it==setTemp.end()){ // setTemp.insert(temp); // cout<<array[i]<<"\t"<<array2[i]<<endl; // count++; // } // // } //cout<<count<<endl; return 0; }
#include <hip/hip_runtime.h> #include <iostream> #include <thrust/sort.h> #include <set> using namespace std; int main(int argc, char const *argv[]) { /* code */ string a, b; int n, m; cin>>n>>m; // cin>>n>>m; int *array = new int [2*m]; int *array2 = new int [2*m]; cout<<n<<"\t"<<m<<endl; for (int i = 0; i < m; ++i) { /* code */ int start; int end; cin>>start>>end; array[2*i]=start; array2[2*i]=end; array[2*i+1]=end; array2[2*i+1]=start; // } // for (int i=0; i<2*m; i++){ // cout<<array[i]<<"\t"<<array2[i]<<endl; // } thrust::sort_by_key(array2, array2 + 2*m, array); thrust::sort_by_key(array, array + 2*m, array2); // int prev1 = array[0]; // int prev2 = array2[0]; // // // // cout<<array[0]<<"\t"<<array2[0]<<endl; // // int count = 1; // for (int i=0; i<2*m; i++){ cout<<array[i]<<"\t"<<array2[i]<<endl; } // typedef pair<int, int> pairs; // // pairs temp; // // set<pairs> setTemp; // set<pairs> :: iterator it; // // int count = 0; // // for (int i=0; i<m; i++){ // if (array[i]==array2[i]){ // continue; // } // // if (array[i]<array2[i]){ // temp.first = array[i]; // temp.second = array2[i]; // } // else{ // temp.first = array2[i]; // temp.second = array[i]; // } // // it = setTemp.find(temp); // // if (it==setTemp.end()){ // setTemp.insert(temp); // cout<<array[i]<<"\t"<<array2[i]<<endl; // count++; // } // // } //cout<<count<<endl; return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <thrust/sort.h> #include <set> using namespace std; int main(int argc, char const *argv[]) { /* code */ string a, b; int n, m; cin>>n>>m; // cin>>n>>m; int *array = new int [2*m]; int *array2 = new int [2*m]; cout<<n<<"\t"<<m<<endl; for (int i = 0; i < m; ++i) { /* code */ int start; int end; cin>>start>>end; array[2*i]=start; array2[2*i]=end; array[2*i+1]=end; array2[2*i+1]=start; // } // for (int i=0; i<2*m; i++){ // cout<<array[i]<<"\t"<<array2[i]<<endl; // } thrust::sort_by_key(array2, array2 + 2*m, array); thrust::sort_by_key(array, array + 2*m, array2); // int prev1 = array[0]; // int prev2 = array2[0]; // // // // cout<<array[0]<<"\t"<<array2[0]<<endl; // // int count = 1; // for (int i=0; i<2*m; i++){ cout<<array[i]<<"\t"<<array2[i]<<endl; } // typedef pair<int, int> pairs; // // pairs temp; // // set<pairs> setTemp; // set<pairs> :: iterator it; // // int count = 0; // // for (int i=0; i<m; i++){ // if (array[i]==array2[i]){ // continue; // } // // if (array[i]<array2[i]){ // temp.first = array[i]; // temp.second = array2[i]; // } // else{ // temp.first = array2[i]; // temp.second = array[i]; // } // // it = setTemp.find(temp); // // if (it==setTemp.end()){ // setTemp.insert(temp); // cout<<array[i]<<"\t"<<array2[i]<<endl; // count++; // } // // } //cout<<count<<endl; return 0; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "warpStandard.cuh" #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <vector> #include <string> #include <iostream> #include <numeric> #include <sys/time.h> #include <sstream> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <stdint.h> #include <stdio.h> #include <unistd.h> extern __shared__ unsigned rngShmem[]; __global__ void CalcPiKernel(unsigned *state, unsigned N, unsigned *hits) { unsigned rngRegs[WarpStandard_REG_COUNT]; WarpStandard_LoadState(state, rngRegs, rngShmem); unsigned acc=0; for(unsigned i=0;i<N;i++) { unsigned long x=WarpStandard_Generate(rngRegs, rngShmem); unsigned long y=WarpStandard_Generate(rngRegs, rngShmem); x=(x*x)>>3; y=(y*y)>>3; if(x+y <= (1UL<<61)) { acc++; } } hits[blockIdx.x*blockDim.x+threadIdx.x]=acc; WarpStandard_SaveState(rngRegs, rngShmem, state); } int main(int,char *[]) { int devId=-1; cudaDeviceProp devProps; cudaGetDevice(&devId); cudaGetDeviceProperties(&devProps, devId); unsigned gridSize=devProps.multiProcessorCount; unsigned blockSize=256; unsigned totalThreads=blockSize*gridSize; unsigned totalRngs=totalThreads/WarpStandard_K; unsigned rngsPerBlock=blockSize/WarpStandard_K; unsigned sharedMemBytesPerBlock=rngsPerBlock*WarpStandard_K*4; fprintf(stderr, "gridSize=%u, blockSize=%u, totalThreads=%u\n", gridSize, blockSize, totalThreads); unsigned seedBytes=totalRngs*4*WarpStandard_STATE_WORDS; std::vector<uint32_t> seedHost(seedBytes/4); void *seedDevice=0; if(cudaMalloc(&seedDevice, seedBytes)) { fprintf(stderr, "Error couldn't allocate state array of size %u\n", seedBytes); exit(1); } int fr=open("/dev/urandom", O_RDONLY); if(seedBytes!=read(fr, &seedHost[0], seedBytes)) { fprintf(stderr, "Couldn't seed RNGs.\n"); exit(1); } //cudaMemcpy(seedDevice, &seedHost[0], seedBytes, cudaMemcpyHostToDevice); std::vector<uint32_t>hitsHost(totalThreads, 0); void *hitsDevice=0; if(cudaMalloc(&hitsDevice, totalThreads*4)) { fprintf(stderr, "Error: couldn't allocate hits array of size %u.\n", totalThreads*4); exit(1); } if(cudaMemcpy(hitsDevice, &hitsHost[0], totalThreads*4, cudaMemcpyHostToDevice)) { fprintf(stderr, "Error: couldn't copy hits array to device.\n"); exit(1); } unsigned K=8; unsigned N=65536; double totalHits=0, totalSamples=0; for(unsigned i=0;i<K;i++) { N=N*2; double outputsPerKernel=totalThreads*double(N); CalcPiKernel<<<gridSize,blockSize,sharedMemBytesPerBlock>>>((unsigned*)seedDevice, N, (unsigned*)hitsDevice); cudaMemcpy(&hitsHost[0], hitsDevice, 4*totalThreads, cudaMemcpyDeviceToHost); //for(unsigned i=0;i<hitsHost.size();i++) //{ // fprintf(stdout, "hitsHost[%u]=%u\n", i, hitsHost[i]); //} totalSamples+=outputsPerKernel; totalHits += std::accumulate(hitsHost.begin(), hitsHost.end(), 0.0); double estimate=4*totalHits/totalSamples; fprintf(stdout, "totalHits=%lg, totalSamples=%lg\n", totalHits, totalSamples); fprintf(stdout, "samples=2^%lg, estimate=%.16lf, error=%lg\n", log(totalSamples)/log(2), estimate, std::abs(estimate-M_PI)); } return 0; }
.file "tmpxft_000f6443_00000000-6_warpStandard.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB4153: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4153: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z22WarpStandard_LoadStatePKjPjS1_ .type _Z22WarpStandard_LoadStatePKjPjS1_, @function _Z22WarpStandard_LoadStatePKjPjS1_: .LFB2027: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2027: .size _Z22WarpStandard_LoadStatePKjPjS1_, .-_Z22WarpStandard_LoadStatePKjPjS1_ .globl _Z22WarpStandard_SaveStatePKjS0_Pj .type _Z22WarpStandard_SaveStatePKjS0_Pj, @function _Z22WarpStandard_SaveStatePKjS0_Pj: .LFB2028: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2028: .size _Z22WarpStandard_SaveStatePKjS0_Pj, .-_Z22WarpStandard_SaveStatePKjS0_Pj .globl _Z21WarpStandard_GeneratePjS_ .type _Z21WarpStandard_GeneratePjS_, @function _Z21WarpStandard_GeneratePjS_: .LFB2029: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2029: .size _Z21WarpStandard_GeneratePjS_, .-_Z21WarpStandard_GeneratePjS_ .globl _Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_ .type _Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_, @function _Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_: .LFB4175: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L13 .L9: movq 120(%rsp), %rax subq %fs:40, %rax jne .L14 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L13: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12CalcPiKernelPjjS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L9 .L14: call __stack_chk_fail@PLT .cfi_endproc .LFE4175: .size _Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_, .-_Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_ .globl _Z12CalcPiKernelPjjS_ .type _Z12CalcPiKernelPjjS_, @function _Z12CalcPiKernelPjjS_: .LFB4176: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4176: .size _Z12CalcPiKernelPjjS_, .-_Z12CalcPiKernelPjjS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z12CalcPiKernelPjjS_" .LC1: .string "WarpStandard_Q" .LC2: .string "WarpStandard_Z1" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB4178: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z12CalcPiKernelPjjS_(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $256, %r9d movl $0, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _ZL14WarpStandard_Q(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $128, %r9d movl $0, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _ZL15WarpStandard_Z1(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4178: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .text._ZNSt6vectorIjSaIjEED2Ev,"axG",@progbits,_ZNSt6vectorIjSaIjEED5Ev,comdat .align 2 .weak _ZNSt6vectorIjSaIjEED2Ev .type _ZNSt6vectorIjSaIjEED2Ev, @function _ZNSt6vectorIjSaIjEED2Ev: .LFB4488: .cfi_startproc endbr64 movq (%rdi), %rax testq %rax, %rax je .L22 subq $8, %rsp .cfi_def_cfa_offset 16 movq 16(%rdi), %rsi subq %rax, %rsi movq %rax, %rdi call _ZdlPvm@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .L22: ret .cfi_endproc .LFE4488: .size _ZNSt6vectorIjSaIjEED2Ev, .-_ZNSt6vectorIjSaIjEED2Ev .weak _ZNSt6vectorIjSaIjEED1Ev .set _ZNSt6vectorIjSaIjEED1Ev,_ZNSt6vectorIjSaIjEED2Ev .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC4: .string "gridSize=%u, blockSize=%u, totalThreads=%u\n" .align 8 .LC5: .string "Error couldn't allocate state array of size %u\n" .section .rodata.str1.1 .LC6: .string "/dev/urandom" .LC7: .string "Couldn't seed RNGs.\n" .section .rodata.str1.8 .align 8 .LC8: .string "Error: couldn't allocate hits array of size %u.\n" .align 8 .LC9: .string "Error: couldn't copy hits array to device.\n" .align 8 .LC11: .string "totalHits=%lg, totalSamples=%lg\n" .align 8 .LC15: .string "samples=2^%lg, estimate=%.16lf, error=%lg\n" .text .globl main .type main, @function main: .LFB4149: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA4149 endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $1192, %rsp .cfi_def_cfa_offset 1248 movq %fs:40, %rax movq %rax, 1176(%rsp) xorl %eax, %eax movl $-1, 36(%rsp) leaq 36(%rsp), %rdi .LEHB0: call cudaGetDevice@PLT leaq 144(%rsp), %rdi movl 36(%rsp), %esi call cudaGetDeviceProperties_v2@PLT movl 532(%rsp), %r14d movl %r14d, %eax sall $8, %eax movl %eax, %ebx movl %eax, 28(%rsp) movl %eax, %r9d movl $256, %r8d movl %r14d, %ecx leaq .LC4(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT leal 0(,%rbx,4), %ebp movq %rbx, %rdx andl $1073741568, %edx je .L26 leaq 0(,%rdx,4), %r12 movq %r12, %rdi call _Znwm@PLT .LEHE0: movq %rax, %rbx movq %rax, 80(%rsp) leaq (%rax,%r12), %rdx movq %rdx, 96(%rsp) movl $0, (%rax) leaq 4(%rax), %rax cmpq %rdx, %rax je .L27 .L28: movl $0, (%rax) addq $4, %rax cmpq %rdx, %rax jne .L28 jmp .L27 .L70: testl %eax, %eax jne .L59 movl $0, %esi leaq .LC6(%rip), %rdi .LEHB1: call __open_2@PLT jmp .L60 .L59: movl %ebp, %ecx leaq .LC5(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L60: movl %eax, %edi movl %ebp, %ebp movq $-1, %rcx movq %r12, %rdx movq %rbx, %rsi call __read_chk@PLT cmpq %rax, %rbp jne .L61 movl 28(%rsp), %eax movq $0, 120(%rsp) movq $0, 128(%rsp) testq %rax, %rax je .L31 leaq 0(,%rax,4), %r12 movq %r12, %rdi call _Znwm@PLT jmp .L62 .L61: leaq .LC7(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT .LEHE1: movl $1, %edi call exit@PLT .L62: movq %rax, %rbp movq %rax, 112(%rsp) addq %rax, %r12 movq %r12, 128(%rsp) .L32: movl $0, (%rax) addq $4, %rax cmpq %r12, %rax jne .L32 .L47: movq %r12, 120(%rsp) movq $0, 48(%rsp) movl %r14d, %ebx sall $10, %ebx movl %ebx, %r15d leaq 48(%rsp), %rdi movq %r15, %rsi .LEHB2: call cudaMalloc@PLT testl %eax, %eax jne .L63 movl $1, %ecx movq %r15, %rdx movq %rbp, %rsi movq 48(%rsp), %rdi call cudaMemcpy@PLT jmp .L64 .L63: movl %ebx, %ecx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L64: testl %eax, %eax jne .L65 movl $8, %r13d movq $0x000000000, (%rsp) movq $0x000000000, 8(%rsp) movl $65536, %ebx jmp .L34 .L65: leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L68: testl %eax, %eax jne .L39 movq 48(%rsp), %rdx movl %ebx, %esi movq 40(%rsp), %rdi call _Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_ .L39: movl $2, %ecx movq %r15, %rdx movq 48(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT movsd (%rsp), %xmm5 addsd 16(%rsp), %xmm5 movsd %xmm5, (%rsp) cmpq %rbp, %r12 je .L50 movq %rbp, %rax movq %r12, %rcx pxor %xmm0, %xmm0 .L43: movl (%rax), %edx pxor %xmm1, %xmm1 cvtsi2sdq %rdx, %xmm1 addsd %xmm1, %xmm0 addq $4, %rax cmpq %rax, %rcx jne .L43 .L40: addsd 8(%rsp), %xmm0 movapd %xmm0, %xmm3 movsd %xmm0, 8(%rsp) mulsd .LC10(%rip), %xmm0 movsd (%rsp), %xmm4 divsd %xmm4, %xmm0 movsd %xmm0, 16(%rsp) movapd %xmm4, %xmm1 movapd %xmm3, %xmm0 leaq .LC11(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $2, %eax call __fprintf_chk@PLT jmp .L66 .L50: pxor %xmm0, %xmm0 jmp .L40 .L66: movsd (%rsp), %xmm0 call log@PLT movsd 16(%rsp), %xmm1 movapd %xmm1, %xmm2 subsd .LC12(%rip), %xmm2 andpd .LC13(%rip), %xmm2 divsd .LC14(%rip), %xmm0 leaq .LC15(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $3, %eax call __fprintf_chk@PLT subl $1, %r13d je .L67 .L34: addl %ebx, %ebx movl 28(%rsp), %eax pxor %xmm1, %xmm1 cvtsi2sdq %rax, %xmm1 movl %ebx, %eax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 mulsd %xmm0, %xmm1 movsd %xmm1, 16(%rsp) movl $256, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl %r14d, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $0, %r9d movl $1024, %r8d movq 68(%rsp), %rdx movl $1, %ecx movq 56(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT .LEHE2: jmp .L68 .L67: leaq 112(%rsp), %rdi call _ZNSt6vectorIjSaIjEED1Ev leaq 80(%rsp), %rdi call _ZNSt6vectorIjSaIjEED1Ev movq 1176(%rsp), %rax subq %fs:40, %rax jne .L69 movl $0, %eax addq $1192, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L52: .cfi_restore_state endbr64 movq %rax, %rbx leaq 112(%rsp), %rdi call _ZNSt6vectorIjSaIjEED1Ev .L45: leaq 80(%rsp), %rdi call _ZNSt6vectorIjSaIjEED1Ev movq 1176(%rsp), %rax subq %fs:40, %rax je .L46 call __stack_chk_fail@PLT .L51: endbr64 movq %rax, %rbx jmp .L45 .L46: movq %rbx, %rdi .LEHB3: call _Unwind_Resume@PLT .LEHE3: .L31: movq $0, 112(%rsp) movq $0, 128(%rsp) movl $0, %ebp movl $0, %r12d jmp .L47 .L26: movq $0, 80(%rsp) movq $0, 96(%rsp) movl $0, %ebx movl $0, %edx .L27: movq %rdx, 88(%rsp) movq $0, 40(%rsp) movl %ebp, %r12d leaq 40(%rsp), %rdi movq %r12, %rsi .LEHB4: call cudaMalloc@PLT .LEHE4: jmp .L70 .L69: call __stack_chk_fail@PLT .cfi_endproc .LFE4149: .globl __gxx_personality_v0 .section .gcc_except_table,"a",@progbits .LLSDA4149: .byte 0xff .byte 0xff .byte 0x1 .uleb128 .LLSDACSE4149-.LLSDACSB4149 .LLSDACSB4149: .uleb128 .LEHB0-.LFB4149 .uleb128 .LEHE0-.LEHB0 .uleb128 0 .uleb128 0 .uleb128 .LEHB1-.LFB4149 .uleb128 .LEHE1-.LEHB1 .uleb128 .L51-.LFB4149 .uleb128 0 .uleb128 .LEHB2-.LFB4149 .uleb128 .LEHE2-.LEHB2 .uleb128 .L52-.LFB4149 .uleb128 0 .uleb128 .LEHB3-.LFB4149 .uleb128 .LEHE3-.LEHB3 .uleb128 0 .uleb128 0 .uleb128 .LEHB4-.LFB4149 .uleb128 .LEHE4-.LEHB4 .uleb128 .L51-.LFB4149 .uleb128 0 .LLSDACSE4149: .text .size main, .-main .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata .align 32 .type _ZL15WarpStandard_Z1, @object .size _ZL15WarpStandard_Z1, 128 _ZL15WarpStandard_Z1: .long 0 .long 1 .long 0 .long 1 .long 1 .long 1 .long 0 .long 0 .long 1 .long 0 .long 0 .long 1 .long 0 .long 0 .long 1 .long 0 .long 0 .long 1 .long 0 .long 1 .long 0 .long 1 .long 0 .long 1 .long 0 .long 1 .long 0 .long 1 .long 1 .long 1 .long 0 .long 1 .align 32 .type _ZL14WarpStandard_Q, @object .size _ZL14WarpStandard_Q, 256 _ZL14WarpStandard_Q: .long 29 .long 24 .long 5 .long 23 .long 14 .long 26 .long 11 .long 31 .long 9 .long 3 .long 1 .long 28 .long 0 .long 2 .long 22 .long 20 .long 18 .long 15 .long 27 .long 13 .long 10 .long 16 .long 8 .long 17 .long 25 .long 12 .long 19 .long 30 .long 7 .long 6 .long 4 .long 21 .long 5 .long 14 .long 28 .long 24 .long 19 .long 13 .long 0 .long 17 .long 11 .long 20 .long 7 .long 10 .long 6 .long 15 .long 2 .long 9 .long 8 .long 23 .long 4 .long 30 .long 12 .long 25 .long 3 .long 21 .long 26 .long 27 .long 31 .long 18 .long 22 .long 16 .long 29 .long 1 .globl WarpStandard_post_processing .section .rodata.str1.1 .LC16: .string "addtaps" .section .data.rel.local,"aw" .align 8 .type WarpStandard_post_processing, @object .size WarpStandard_post_processing, 8 WarpStandard_post_processing: .quad .LC16 .globl WarpStandard_name .section .rodata.str1.8 .align 8 .LC17: .string "WarpRNG[CorrelatedU32Rng;k=32;g=16;rs=0;w=32;n=1024;hash=deac2e12ec6e615]" .section .data.rel.local .align 8 .type WarpStandard_name, @object .size WarpStandard_name, 8 WarpStandard_name: .quad .LC17 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC10: .long 0 .long 1074790400 .align 8 .LC12: .long 1413754136 .long 1074340347 .section .rodata.cst16,"aM",@progbits,16 .align 16 .LC13: .long -1 .long 2147483647 .long 0 .long 0 .section .rodata.cst8 .align 8 .LC14: .long -17155601 .long 1072049730 .hidden DW.ref.__gxx_personality_v0 .weak DW.ref.__gxx_personality_v0 .section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat .align 8 .type DW.ref.__gxx_personality_v0, @object .size DW.ref.__gxx_personality_v0, 8 DW.ref.__gxx_personality_v0: .quad __gxx_personality_v0 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "warpStandard.cuh" #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <vector> #include <string> #include <iostream> #include <numeric> #include <sys/time.h> #include <sstream> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <stdint.h> #include <stdio.h> #include <unistd.h> extern __shared__ unsigned rngShmem[]; __global__ void CalcPiKernel(unsigned *state, unsigned N, unsigned *hits) { unsigned rngRegs[WarpStandard_REG_COUNT]; WarpStandard_LoadState(state, rngRegs, rngShmem); unsigned acc=0; for(unsigned i=0;i<N;i++) { unsigned long x=WarpStandard_Generate(rngRegs, rngShmem); unsigned long y=WarpStandard_Generate(rngRegs, rngShmem); x=(x*x)>>3; y=(y*y)>>3; if(x+y <= (1UL<<61)) { acc++; } } hits[blockIdx.x*blockDim.x+threadIdx.x]=acc; WarpStandard_SaveState(rngRegs, rngShmem, state); } int main(int,char *[]) { int devId=-1; cudaDeviceProp devProps; cudaGetDevice(&devId); cudaGetDeviceProperties(&devProps, devId); unsigned gridSize=devProps.multiProcessorCount; unsigned blockSize=256; unsigned totalThreads=blockSize*gridSize; unsigned totalRngs=totalThreads/WarpStandard_K; unsigned rngsPerBlock=blockSize/WarpStandard_K; unsigned sharedMemBytesPerBlock=rngsPerBlock*WarpStandard_K*4; fprintf(stderr, "gridSize=%u, blockSize=%u, totalThreads=%u\n", gridSize, blockSize, totalThreads); unsigned seedBytes=totalRngs*4*WarpStandard_STATE_WORDS; std::vector<uint32_t> seedHost(seedBytes/4); void *seedDevice=0; if(cudaMalloc(&seedDevice, seedBytes)) { fprintf(stderr, "Error couldn't allocate state array of size %u\n", seedBytes); exit(1); } int fr=open("/dev/urandom", O_RDONLY); if(seedBytes!=read(fr, &seedHost[0], seedBytes)) { fprintf(stderr, "Couldn't seed RNGs.\n"); exit(1); } //cudaMemcpy(seedDevice, &seedHost[0], seedBytes, cudaMemcpyHostToDevice); std::vector<uint32_t>hitsHost(totalThreads, 0); void *hitsDevice=0; if(cudaMalloc(&hitsDevice, totalThreads*4)) { fprintf(stderr, "Error: couldn't allocate hits array of size %u.\n", totalThreads*4); exit(1); } if(cudaMemcpy(hitsDevice, &hitsHost[0], totalThreads*4, cudaMemcpyHostToDevice)) { fprintf(stderr, "Error: couldn't copy hits array to device.\n"); exit(1); } unsigned K=8; unsigned N=65536; double totalHits=0, totalSamples=0; for(unsigned i=0;i<K;i++) { N=N*2; double outputsPerKernel=totalThreads*double(N); CalcPiKernel<<<gridSize,blockSize,sharedMemBytesPerBlock>>>((unsigned*)seedDevice, N, (unsigned*)hitsDevice); cudaMemcpy(&hitsHost[0], hitsDevice, 4*totalThreads, cudaMemcpyDeviceToHost); //for(unsigned i=0;i<hitsHost.size();i++) //{ // fprintf(stdout, "hitsHost[%u]=%u\n", i, hitsHost[i]); //} totalSamples+=outputsPerKernel; totalHits += std::accumulate(hitsHost.begin(), hitsHost.end(), 0.0); double estimate=4*totalHits/totalSamples; fprintf(stdout, "totalHits=%lg, totalSamples=%lg\n", totalHits, totalSamples); fprintf(stdout, "samples=2^%lg, estimate=%.16lf, error=%lg\n", log(totalSamples)/log(2), estimate, std::abs(estimate-M_PI)); } return 0; }
#include <hip/hip_runtime.h> #include <stdint.h> // Public constants const unsigned WarpStandard_K=32; const unsigned WarpStandard_REG_COUNT=3; const unsigned WarpStandard_STATE_WORDS=32; const uint32_t WarpStandard_TEST_DATA[WarpStandard_STATE_WORDS]={ 0x8cf35fea, 0xe1dd819e, 0x4a7d0a8e, 0xe0c05911, 0xfd053b8d, 0x30643089, 0x6f6ac111, 0xc4869595, 0x9416b7be, 0xe6d329e8, 0x5af0f5bf, 0xc5c742b5, 0x7197e922, 0x71aa35b4, 0x2070b9d1, 0x2bb34804, 0x7754a517, 0xe725315e, 0x7f9dd497, 0x043b58bf, 0x83ffa33d, 0x2532905a, 0xbdfe0c8a, 0x16f68671, 0x0d14da2e, 0x847efd5f, 0x1edeec64, 0x1bebdf9b, 0xf74d4ff3, 0xd404774b, 0x8ee32599, 0xefe0c405 }; // Private constants const char *WarpStandard_name="WarpRNG[CorrelatedU32Rng;k=32;g=16;rs=0;w=32;n=1024;hash=deac2e12ec6e615]"; const char *WarpStandard_post_processing="addtaps"; const unsigned WarpStandard_N=1024; const unsigned WarpStandard_W=32; const unsigned WarpStandard_G=16; const unsigned WarpStandard_SR=0; __device__ const unsigned WarpStandard_Q[2][32]={ {29,24,5,23,14,26,11,31,9,3,1,28,0,2,22,20,18,15,27,13,10,16,8,17,25,12,19,30,7,6,4,21}, {5,14,28,24,19,13,0,17,11,20,7,10,6,15,2,9,8,23,4,30,12,25,3,21,26,27,31,18,22,16,29,1} }; const unsigned WarpStandard_Z0=2; __device__ const unsigned WarpStandard_Z1[32]={ 0,1,0,1,1,1,0,0,1,0,0,1,0,0,1,0,0,1,0,1,0,1,0,1,0,1,0,1,1,1,0,1}; const unsigned WarpStandard_SHMEM_WORDS=32; const unsigned WarpStandard_GMEM_WORDS=0; // Public functions __device__ void WarpStandard_LoadState(const unsigned *seed, unsigned *regs, unsigned *shmem) { unsigned offset=threadIdx.x % 32; unsigned base=threadIdx.x-offset; // setup constants regs[0]=WarpStandard_Z1[offset]; regs[1]=base + WarpStandard_Q[0][offset]; regs[2]=base + WarpStandard_Q[1][offset]; // Setup state unsigned stateOff=blockDim.x * blockIdx.x * 1 + threadIdx.x * 1; shmem[threadIdx.x]=seed[stateOff]; } __device__ void WarpStandard_SaveState(const unsigned *regs, const unsigned *shmem, unsigned *seed) { unsigned stateOff=blockDim.x * blockIdx.x * 1 + threadIdx.x * 1; seed[stateOff] = shmem[threadIdx.x]; } __device__ unsigned WarpStandard_Generate(unsigned *regs, unsigned *shmem) { unsigned t0=shmem[regs[1]], t1=shmem[regs[2]]; unsigned res=(t0<<WarpStandard_Z0) ^ (t1>>regs[0]); shmem[threadIdx.x]=res; return t0+t1; };
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdint.h> // Public constants const unsigned WarpStandard_K=32; const unsigned WarpStandard_REG_COUNT=3; const unsigned WarpStandard_STATE_WORDS=32; const uint32_t WarpStandard_TEST_DATA[WarpStandard_STATE_WORDS]={ 0x8cf35fea, 0xe1dd819e, 0x4a7d0a8e, 0xe0c05911, 0xfd053b8d, 0x30643089, 0x6f6ac111, 0xc4869595, 0x9416b7be, 0xe6d329e8, 0x5af0f5bf, 0xc5c742b5, 0x7197e922, 0x71aa35b4, 0x2070b9d1, 0x2bb34804, 0x7754a517, 0xe725315e, 0x7f9dd497, 0x043b58bf, 0x83ffa33d, 0x2532905a, 0xbdfe0c8a, 0x16f68671, 0x0d14da2e, 0x847efd5f, 0x1edeec64, 0x1bebdf9b, 0xf74d4ff3, 0xd404774b, 0x8ee32599, 0xefe0c405 }; // Private constants const char *WarpStandard_name="WarpRNG[CorrelatedU32Rng;k=32;g=16;rs=0;w=32;n=1024;hash=deac2e12ec6e615]"; const char *WarpStandard_post_processing="addtaps"; const unsigned WarpStandard_N=1024; const unsigned WarpStandard_W=32; const unsigned WarpStandard_G=16; const unsigned WarpStandard_SR=0; __device__ const unsigned WarpStandard_Q[2][32]={ {29,24,5,23,14,26,11,31,9,3,1,28,0,2,22,20,18,15,27,13,10,16,8,17,25,12,19,30,7,6,4,21}, {5,14,28,24,19,13,0,17,11,20,7,10,6,15,2,9,8,23,4,30,12,25,3,21,26,27,31,18,22,16,29,1} }; const unsigned WarpStandard_Z0=2; __device__ const unsigned WarpStandard_Z1[32]={ 0,1,0,1,1,1,0,0,1,0,0,1,0,0,1,0,0,1,0,1,0,1,0,1,0,1,0,1,1,1,0,1}; const unsigned WarpStandard_SHMEM_WORDS=32; const unsigned WarpStandard_GMEM_WORDS=0; // Public functions __device__ void WarpStandard_LoadState(const unsigned *seed, unsigned *regs, unsigned *shmem) { unsigned offset=threadIdx.x % 32; unsigned base=threadIdx.x-offset; // setup constants regs[0]=WarpStandard_Z1[offset]; regs[1]=base + WarpStandard_Q[0][offset]; regs[2]=base + WarpStandard_Q[1][offset]; // Setup state unsigned stateOff=blockDim.x * blockIdx.x * 1 + threadIdx.x * 1; shmem[threadIdx.x]=seed[stateOff]; } __device__ void WarpStandard_SaveState(const unsigned *regs, const unsigned *shmem, unsigned *seed) { unsigned stateOff=blockDim.x * blockIdx.x * 1 + threadIdx.x * 1; seed[stateOff] = shmem[threadIdx.x]; } __device__ unsigned WarpStandard_Generate(unsigned *regs, unsigned *shmem) { unsigned t0=shmem[regs[1]], t1=shmem[regs[2]]; unsigned res=(t0<<WarpStandard_Z0) ^ (t1>>regs[0]); shmem[threadIdx.x]=res; return t0+t1; };
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdint.h> // Public constants const unsigned WarpStandard_K=32; const unsigned WarpStandard_REG_COUNT=3; const unsigned WarpStandard_STATE_WORDS=32; const uint32_t WarpStandard_TEST_DATA[WarpStandard_STATE_WORDS]={ 0x8cf35fea, 0xe1dd819e, 0x4a7d0a8e, 0xe0c05911, 0xfd053b8d, 0x30643089, 0x6f6ac111, 0xc4869595, 0x9416b7be, 0xe6d329e8, 0x5af0f5bf, 0xc5c742b5, 0x7197e922, 0x71aa35b4, 0x2070b9d1, 0x2bb34804, 0x7754a517, 0xe725315e, 0x7f9dd497, 0x043b58bf, 0x83ffa33d, 0x2532905a, 0xbdfe0c8a, 0x16f68671, 0x0d14da2e, 0x847efd5f, 0x1edeec64, 0x1bebdf9b, 0xf74d4ff3, 0xd404774b, 0x8ee32599, 0xefe0c405 }; // Private constants const char *WarpStandard_name="WarpRNG[CorrelatedU32Rng;k=32;g=16;rs=0;w=32;n=1024;hash=deac2e12ec6e615]"; const char *WarpStandard_post_processing="addtaps"; const unsigned WarpStandard_N=1024; const unsigned WarpStandard_W=32; const unsigned WarpStandard_G=16; const unsigned WarpStandard_SR=0; __device__ const unsigned WarpStandard_Q[2][32]={ {29,24,5,23,14,26,11,31,9,3,1,28,0,2,22,20,18,15,27,13,10,16,8,17,25,12,19,30,7,6,4,21}, {5,14,28,24,19,13,0,17,11,20,7,10,6,15,2,9,8,23,4,30,12,25,3,21,26,27,31,18,22,16,29,1} }; const unsigned WarpStandard_Z0=2; __device__ const unsigned WarpStandard_Z1[32]={ 0,1,0,1,1,1,0,0,1,0,0,1,0,0,1,0,0,1,0,1,0,1,0,1,0,1,0,1,1,1,0,1}; const unsigned WarpStandard_SHMEM_WORDS=32; const unsigned WarpStandard_GMEM_WORDS=0; // Public functions __device__ void WarpStandard_LoadState(const unsigned *seed, unsigned *regs, unsigned *shmem) { unsigned offset=threadIdx.x % 32; unsigned base=threadIdx.x-offset; // setup constants regs[0]=WarpStandard_Z1[offset]; regs[1]=base + WarpStandard_Q[0][offset]; regs[2]=base + WarpStandard_Q[1][offset]; // Setup state unsigned stateOff=blockDim.x * blockIdx.x * 1 + threadIdx.x * 1; shmem[threadIdx.x]=seed[stateOff]; } __device__ void WarpStandard_SaveState(const unsigned *regs, const unsigned *shmem, unsigned *seed) { unsigned stateOff=blockDim.x * blockIdx.x * 1 + threadIdx.x * 1; seed[stateOff] = shmem[threadIdx.x]; } __device__ unsigned WarpStandard_Generate(unsigned *regs, unsigned *shmem) { unsigned t0=shmem[regs[1]], t1=shmem[regs[2]]; unsigned res=(t0<<WarpStandard_Z0) ^ (t1>>regs[0]); shmem[threadIdx.x]=res; return t0+t1; };
.text .file "warpStandard.hip" .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "WarpRNG[CorrelatedU32Rng .size .L.str, 74 .type WarpStandard_name,@object # @WarpStandard_name .data .globl WarpStandard_name .p2align 3, 0x0 WarpStandard_name: .quad .L.str .size WarpStandard_name, 8 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "addtaps" .size .L.str.1, 8 .type WarpStandard_post_processing,@object # @WarpStandard_post_processing .data .globl WarpStandard_post_processing .p2align 3, 0x0 WarpStandard_post_processing: .quad .L.str.1 .size WarpStandard_post_processing, 8 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000f6443_00000000-6_warpStandard.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB4153: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4153: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z22WarpStandard_LoadStatePKjPjS1_ .type _Z22WarpStandard_LoadStatePKjPjS1_, @function _Z22WarpStandard_LoadStatePKjPjS1_: .LFB2027: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2027: .size _Z22WarpStandard_LoadStatePKjPjS1_, .-_Z22WarpStandard_LoadStatePKjPjS1_ .globl _Z22WarpStandard_SaveStatePKjS0_Pj .type _Z22WarpStandard_SaveStatePKjS0_Pj, @function _Z22WarpStandard_SaveStatePKjS0_Pj: .LFB2028: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2028: .size _Z22WarpStandard_SaveStatePKjS0_Pj, .-_Z22WarpStandard_SaveStatePKjS0_Pj .globl _Z21WarpStandard_GeneratePjS_ .type _Z21WarpStandard_GeneratePjS_, @function _Z21WarpStandard_GeneratePjS_: .LFB2029: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2029: .size _Z21WarpStandard_GeneratePjS_, .-_Z21WarpStandard_GeneratePjS_ .globl _Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_ .type _Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_, @function _Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_: .LFB4175: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L13 .L9: movq 120(%rsp), %rax subq %fs:40, %rax jne .L14 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L13: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12CalcPiKernelPjjS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L9 .L14: call __stack_chk_fail@PLT .cfi_endproc .LFE4175: .size _Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_, .-_Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_ .globl _Z12CalcPiKernelPjjS_ .type _Z12CalcPiKernelPjjS_, @function _Z12CalcPiKernelPjjS_: .LFB4176: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4176: .size _Z12CalcPiKernelPjjS_, .-_Z12CalcPiKernelPjjS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z12CalcPiKernelPjjS_" .LC1: .string "WarpStandard_Q" .LC2: .string "WarpStandard_Z1" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB4178: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z12CalcPiKernelPjjS_(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $256, %r9d movl $0, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _ZL14WarpStandard_Q(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $128, %r9d movl $0, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _ZL15WarpStandard_Z1(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4178: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .text._ZNSt6vectorIjSaIjEED2Ev,"axG",@progbits,_ZNSt6vectorIjSaIjEED5Ev,comdat .align 2 .weak _ZNSt6vectorIjSaIjEED2Ev .type _ZNSt6vectorIjSaIjEED2Ev, @function _ZNSt6vectorIjSaIjEED2Ev: .LFB4488: .cfi_startproc endbr64 movq (%rdi), %rax testq %rax, %rax je .L22 subq $8, %rsp .cfi_def_cfa_offset 16 movq 16(%rdi), %rsi subq %rax, %rsi movq %rax, %rdi call _ZdlPvm@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .L22: ret .cfi_endproc .LFE4488: .size _ZNSt6vectorIjSaIjEED2Ev, .-_ZNSt6vectorIjSaIjEED2Ev .weak _ZNSt6vectorIjSaIjEED1Ev .set _ZNSt6vectorIjSaIjEED1Ev,_ZNSt6vectorIjSaIjEED2Ev .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC4: .string "gridSize=%u, blockSize=%u, totalThreads=%u\n" .align 8 .LC5: .string "Error couldn't allocate state array of size %u\n" .section .rodata.str1.1 .LC6: .string "/dev/urandom" .LC7: .string "Couldn't seed RNGs.\n" .section .rodata.str1.8 .align 8 .LC8: .string "Error: couldn't allocate hits array of size %u.\n" .align 8 .LC9: .string "Error: couldn't copy hits array to device.\n" .align 8 .LC11: .string "totalHits=%lg, totalSamples=%lg\n" .align 8 .LC15: .string "samples=2^%lg, estimate=%.16lf, error=%lg\n" .text .globl main .type main, @function main: .LFB4149: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA4149 endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $1192, %rsp .cfi_def_cfa_offset 1248 movq %fs:40, %rax movq %rax, 1176(%rsp) xorl %eax, %eax movl $-1, 36(%rsp) leaq 36(%rsp), %rdi .LEHB0: call cudaGetDevice@PLT leaq 144(%rsp), %rdi movl 36(%rsp), %esi call cudaGetDeviceProperties_v2@PLT movl 532(%rsp), %r14d movl %r14d, %eax sall $8, %eax movl %eax, %ebx movl %eax, 28(%rsp) movl %eax, %r9d movl $256, %r8d movl %r14d, %ecx leaq .LC4(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT leal 0(,%rbx,4), %ebp movq %rbx, %rdx andl $1073741568, %edx je .L26 leaq 0(,%rdx,4), %r12 movq %r12, %rdi call _Znwm@PLT .LEHE0: movq %rax, %rbx movq %rax, 80(%rsp) leaq (%rax,%r12), %rdx movq %rdx, 96(%rsp) movl $0, (%rax) leaq 4(%rax), %rax cmpq %rdx, %rax je .L27 .L28: movl $0, (%rax) addq $4, %rax cmpq %rdx, %rax jne .L28 jmp .L27 .L70: testl %eax, %eax jne .L59 movl $0, %esi leaq .LC6(%rip), %rdi .LEHB1: call __open_2@PLT jmp .L60 .L59: movl %ebp, %ecx leaq .LC5(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L60: movl %eax, %edi movl %ebp, %ebp movq $-1, %rcx movq %r12, %rdx movq %rbx, %rsi call __read_chk@PLT cmpq %rax, %rbp jne .L61 movl 28(%rsp), %eax movq $0, 120(%rsp) movq $0, 128(%rsp) testq %rax, %rax je .L31 leaq 0(,%rax,4), %r12 movq %r12, %rdi call _Znwm@PLT jmp .L62 .L61: leaq .LC7(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT .LEHE1: movl $1, %edi call exit@PLT .L62: movq %rax, %rbp movq %rax, 112(%rsp) addq %rax, %r12 movq %r12, 128(%rsp) .L32: movl $0, (%rax) addq $4, %rax cmpq %r12, %rax jne .L32 .L47: movq %r12, 120(%rsp) movq $0, 48(%rsp) movl %r14d, %ebx sall $10, %ebx movl %ebx, %r15d leaq 48(%rsp), %rdi movq %r15, %rsi .LEHB2: call cudaMalloc@PLT testl %eax, %eax jne .L63 movl $1, %ecx movq %r15, %rdx movq %rbp, %rsi movq 48(%rsp), %rdi call cudaMemcpy@PLT jmp .L64 .L63: movl %ebx, %ecx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L64: testl %eax, %eax jne .L65 movl $8, %r13d movq $0x000000000, (%rsp) movq $0x000000000, 8(%rsp) movl $65536, %ebx jmp .L34 .L65: leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L68: testl %eax, %eax jne .L39 movq 48(%rsp), %rdx movl %ebx, %esi movq 40(%rsp), %rdi call _Z35__device_stub__Z12CalcPiKernelPjjS_PjjS_ .L39: movl $2, %ecx movq %r15, %rdx movq 48(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT movsd (%rsp), %xmm5 addsd 16(%rsp), %xmm5 movsd %xmm5, (%rsp) cmpq %rbp, %r12 je .L50 movq %rbp, %rax movq %r12, %rcx pxor %xmm0, %xmm0 .L43: movl (%rax), %edx pxor %xmm1, %xmm1 cvtsi2sdq %rdx, %xmm1 addsd %xmm1, %xmm0 addq $4, %rax cmpq %rax, %rcx jne .L43 .L40: addsd 8(%rsp), %xmm0 movapd %xmm0, %xmm3 movsd %xmm0, 8(%rsp) mulsd .LC10(%rip), %xmm0 movsd (%rsp), %xmm4 divsd %xmm4, %xmm0 movsd %xmm0, 16(%rsp) movapd %xmm4, %xmm1 movapd %xmm3, %xmm0 leaq .LC11(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $2, %eax call __fprintf_chk@PLT jmp .L66 .L50: pxor %xmm0, %xmm0 jmp .L40 .L66: movsd (%rsp), %xmm0 call log@PLT movsd 16(%rsp), %xmm1 movapd %xmm1, %xmm2 subsd .LC12(%rip), %xmm2 andpd .LC13(%rip), %xmm2 divsd .LC14(%rip), %xmm0 leaq .LC15(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $3, %eax call __fprintf_chk@PLT subl $1, %r13d je .L67 .L34: addl %ebx, %ebx movl 28(%rsp), %eax pxor %xmm1, %xmm1 cvtsi2sdq %rax, %xmm1 movl %ebx, %eax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 mulsd %xmm0, %xmm1 movsd %xmm1, 16(%rsp) movl $256, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl %r14d, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $0, %r9d movl $1024, %r8d movq 68(%rsp), %rdx movl $1, %ecx movq 56(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT .LEHE2: jmp .L68 .L67: leaq 112(%rsp), %rdi call _ZNSt6vectorIjSaIjEED1Ev leaq 80(%rsp), %rdi call _ZNSt6vectorIjSaIjEED1Ev movq 1176(%rsp), %rax subq %fs:40, %rax jne .L69 movl $0, %eax addq $1192, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L52: .cfi_restore_state endbr64 movq %rax, %rbx leaq 112(%rsp), %rdi call _ZNSt6vectorIjSaIjEED1Ev .L45: leaq 80(%rsp), %rdi call _ZNSt6vectorIjSaIjEED1Ev movq 1176(%rsp), %rax subq %fs:40, %rax je .L46 call __stack_chk_fail@PLT .L51: endbr64 movq %rax, %rbx jmp .L45 .L46: movq %rbx, %rdi .LEHB3: call _Unwind_Resume@PLT .LEHE3: .L31: movq $0, 112(%rsp) movq $0, 128(%rsp) movl $0, %ebp movl $0, %r12d jmp .L47 .L26: movq $0, 80(%rsp) movq $0, 96(%rsp) movl $0, %ebx movl $0, %edx .L27: movq %rdx, 88(%rsp) movq $0, 40(%rsp) movl %ebp, %r12d leaq 40(%rsp), %rdi movq %r12, %rsi .LEHB4: call cudaMalloc@PLT .LEHE4: jmp .L70 .L69: call __stack_chk_fail@PLT .cfi_endproc .LFE4149: .globl __gxx_personality_v0 .section .gcc_except_table,"a",@progbits .LLSDA4149: .byte 0xff .byte 0xff .byte 0x1 .uleb128 .LLSDACSE4149-.LLSDACSB4149 .LLSDACSB4149: .uleb128 .LEHB0-.LFB4149 .uleb128 .LEHE0-.LEHB0 .uleb128 0 .uleb128 0 .uleb128 .LEHB1-.LFB4149 .uleb128 .LEHE1-.LEHB1 .uleb128 .L51-.LFB4149 .uleb128 0 .uleb128 .LEHB2-.LFB4149 .uleb128 .LEHE2-.LEHB2 .uleb128 .L52-.LFB4149 .uleb128 0 .uleb128 .LEHB3-.LFB4149 .uleb128 .LEHE3-.LEHB3 .uleb128 0 .uleb128 0 .uleb128 .LEHB4-.LFB4149 .uleb128 .LEHE4-.LEHB4 .uleb128 .L51-.LFB4149 .uleb128 0 .LLSDACSE4149: .text .size main, .-main .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata .align 32 .type _ZL15WarpStandard_Z1, @object .size _ZL15WarpStandard_Z1, 128 _ZL15WarpStandard_Z1: .long 0 .long 1 .long 0 .long 1 .long 1 .long 1 .long 0 .long 0 .long 1 .long 0 .long 0 .long 1 .long 0 .long 0 .long 1 .long 0 .long 0 .long 1 .long 0 .long 1 .long 0 .long 1 .long 0 .long 1 .long 0 .long 1 .long 0 .long 1 .long 1 .long 1 .long 0 .long 1 .align 32 .type _ZL14WarpStandard_Q, @object .size _ZL14WarpStandard_Q, 256 _ZL14WarpStandard_Q: .long 29 .long 24 .long 5 .long 23 .long 14 .long 26 .long 11 .long 31 .long 9 .long 3 .long 1 .long 28 .long 0 .long 2 .long 22 .long 20 .long 18 .long 15 .long 27 .long 13 .long 10 .long 16 .long 8 .long 17 .long 25 .long 12 .long 19 .long 30 .long 7 .long 6 .long 4 .long 21 .long 5 .long 14 .long 28 .long 24 .long 19 .long 13 .long 0 .long 17 .long 11 .long 20 .long 7 .long 10 .long 6 .long 15 .long 2 .long 9 .long 8 .long 23 .long 4 .long 30 .long 12 .long 25 .long 3 .long 21 .long 26 .long 27 .long 31 .long 18 .long 22 .long 16 .long 29 .long 1 .globl WarpStandard_post_processing .section .rodata.str1.1 .LC16: .string "addtaps" .section .data.rel.local,"aw" .align 8 .type WarpStandard_post_processing, @object .size WarpStandard_post_processing, 8 WarpStandard_post_processing: .quad .LC16 .globl WarpStandard_name .section .rodata.str1.8 .align 8 .LC17: .string "WarpRNG[CorrelatedU32Rng;k=32;g=16;rs=0;w=32;n=1024;hash=deac2e12ec6e615]" .section .data.rel.local .align 8 .type WarpStandard_name, @object .size WarpStandard_name, 8 WarpStandard_name: .quad .LC17 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC10: .long 0 .long 1074790400 .align 8 .LC12: .long 1413754136 .long 1074340347 .section .rodata.cst16,"aM",@progbits,16 .align 16 .LC13: .long -1 .long 2147483647 .long 0 .long 0 .section .rodata.cst8 .align 8 .LC14: .long -17155601 .long 1072049730 .hidden DW.ref.__gxx_personality_v0 .weak DW.ref.__gxx_personality_v0 .section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat .align 8 .type DW.ref.__gxx_personality_v0, @object .size DW.ref.__gxx_personality_v0, 8 DW.ref.__gxx_personality_v0: .quad __gxx_personality_v0 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "warpStandard.hip" .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "WarpRNG[CorrelatedU32Rng .size .L.str, 74 .type WarpStandard_name,@object # @WarpStandard_name .data .globl WarpStandard_name .p2align 3, 0x0 WarpStandard_name: .quad .L.str .size WarpStandard_name, 8 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "addtaps" .size .L.str.1, 8 .type WarpStandard_post_processing,@object # @WarpStandard_post_processing .data .globl WarpStandard_post_processing .p2align 3, 0x0 WarpStandard_post_processing: .quad .L.str.1 .size WarpStandard_post_processing, 8 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
__global__ void Mandelbrot(float* out,const double* re,const double* im){ int idx = blockIdx.x*blockDim.x + threadIdx.x; double zr=0; double zi=0; double cr=re[idx]; double ci=im[idx]; double count=0; for(int i=1;i<1000;i++){ double zrt=zr*zr-zi*zi; double zit=2*zr*zi; zr=zrt+cr; zi=zit+ci; if(zr*zr+zi*zi<4)count++; } out[idx]=count; }
code for sm_80 Function : _Z10MandelbrotPfPKdS1_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ IMAD.MOV.U32 R7, RZ, RZ, 0x8 ; /* 0x00000008ff077424 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fc800078e0203 */ /*0060*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fc800078e0207 */ /*0070*/ IMAD.WIDE R2, R0, R7, c[0x0][0x170] ; /* 0x00005c0000027625 */ /* 0x000fe400078e0207 */ /*0080*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1b00 */ /*0090*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ee2000c1e1b00 */ /*00a0*/ IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e7224 */ /* 0x000fe200078e00ff */ /*00b0*/ DADD R6, RZ, R4 ; /* 0x00000000ff067229 */ /* 0x004e080000000004 */ /*00c0*/ DFMA R8, RZ, RZ, R2 ; /* 0x000000ffff08722b */ /* 0x008fc80000000002 */ /*00d0*/ DMUL R10, R6, R6 ; /* 0x00000006060a7228 */ /* 0x001e080000000000 */ /*00e0*/ DADD R6, R6, R6 ; /* 0x0000000006067229 */ /* 0x000fc80000000006 */ /*00f0*/ DFMA R12, -R8, R8, R10 ; /* 0x00000008080c722b */ /* 0x001e08000000010a */ /*0100*/ DFMA R10, R8, R8, R10 ; /* 0x00000008080a722b */ /* 0x000fc8000000000a */ /*0110*/ DADD R12, R4, R12 ; /* 0x00000000040c7229 */ /* 0x001e08000000000c */ /*0120*/ DFMA R6, R8, R6, R2 ; /* 0x000000060806722b */ /* 0x000fc80000000002 */ /*0130*/ DMUL R8, R12, R12 ; /* 0x0000000c0c087228 */ /* 0x001e080000000000 */ /*0140*/ DSETP.GEU.AND P1, PT, R10, 4, PT ; /* 0x401000000a00742a */ /* 0x000e480003f2e000 */ /*0150*/ DFMA R10, -R6.reuse, R6.reuse, R8.reuse ; /* 0x00000006060a722b */ /* 0x1c1e240000000108 */ /*0160*/ FSEL R15, RZ, 1.875, P1 ; /* 0x3ff00000ff0f7808 */ /* 0x002fe40000800000 */ /*0170*/ DFMA R8, R6, R6, R8 ; /* 0x000000060608722b */ /* 0x000e480000000008 */ /*0180*/ DADD R12, R12, R12 ; /* 0x000000000c0c7229 */ /* 0x000e88000000000c */ /*0190*/ DADD R10, R4, R10 ; /* 0x00000000040a7229 */ /* 0x001e08000000000a */ /*01a0*/ DSETP.GEU.AND P0, PT, R8, 4, PT ; /* 0x401000000800742a */ /* 0x002fc80003f0e000 */ /*01b0*/ DFMA R6, R6, R12, R2 ; /* 0x0000000c0606722b */ /* 0x004fc80000000002 */ /*01c0*/ DADD R8, R14, 1 ; /* 0x3ff000000e087429 */ /* 0x000e480000000000 */ /*01d0*/ DMUL R12, R10, R10 ; /* 0x0000000a0a0c7228 */ /* 0x001e080000000000 */ /*01e0*/ DADD R10, R10, R10 ; /* 0x000000000a0a7229 */ /* 0x000ea4000000000a */ /*01f0*/ FSEL R19, R9, R15, !P0 ; /* 0x0000000f09137208 */ /* 0x002fe40004000000 */ /*0200*/ DFMA R16, R6, R6, R12 ; /* 0x000000060610722b */ /* 0x001fe2000000000c */ /*0210*/ FSEL R18, R8, RZ, !P0 ; /* 0x000000ff08127208 */ /* 0x000fc60004000000 */ /*0220*/ DFMA R12, -R6, R6, R12 ; /* 0x00000006060c722b */ /* 0x000e08000000010c */ /*0230*/ DFMA R6, R6, R10, R2 ; /* 0x0000000a0606722b */ /* 0x004e480000000002 */ /*0240*/ DADD R12, R4, R12 ; /* 0x00000000040c7229 */ /* 0x001e08000000000c */ /*0250*/ DMUL R14, R6, R6 ; /* 0x00000006060e7228 */ /* 0x002e480000000000 */ /*0260*/ DADD R8, R18, 1 ; /* 0x3ff0000012087429 */ /* 0x000fc80000000000 */ /*0270*/ DSETP.GEU.AND P1, PT, R16, 4, PT ; /* 0x401000001000742a */ /* 0x000e880003f2e000 */ /*0280*/ DMUL R10, R12, R12 ; /* 0x0000000c0c0a7228 */ /* 0x001e080000000000 */ /*0290*/ DFMA R14, R12, R12, -R14 ; /* 0x0000000c0c0e722b */ /* 0x002e48000000080e */ /*02a0*/ DADD R16, R12, R12 ; /* 0x000000000c107229 */ /* 0x0005e4000000000c */ /*02b0*/ FSEL R12, R8, R18, !P1 ; /* 0x00000012080c7208 */ /* 0x004fe40004800000 */ /*02c0*/ DFMA R10, R6, R6, R10 ; /* 0x00000006060a722b */ /* 0x001e22000000000a */ /*02d0*/ FSEL R13, R9, R19, !P1 ; /* 0x00000013090d7208 */ /* 0x000fc60004800000 */ /*02e0*/ DADD R14, R4, R14 ; /* 0x00000000040e7229 */ /* 0x002e48000000000e */ /*02f0*/ DSETP.GEU.AND P0, PT, R10, 4, PT ; /* 0x401000000a00742a */ /* 0x001fc80003f0e000 */ /*0300*/ DADD R8, R12, 1 ; /* 0x3ff000000c087429 */ /* 0x000fc80000000000 */ /*0310*/ DFMA R6, R6, R16, R2 ; /* 0x000000100606722b */ /* 0x000fc80000000002 */ /*0320*/ DMUL R10, R14, R14 ; /* 0x0000000e0e0a7228 */ /* 0x002e080000000000 */ /*0330*/ DADD R14, R14, R14 ; /* 0x000000000e0e7229 */ /* 0x000e48000000000e */ /*0340*/ DFMA R16, R6, R6, R10 ; /* 0x000000060610722b */ /* 0x001fc8000000000a */ /*0350*/ DFMA R10, -R6, R6, R10 ; /* 0x00000006060a722b */ /* 0x000e08000000010a */ /*0360*/ DFMA R6, R6, R14, R2 ; /* 0x0000000e0606722b */ /* 0x0023e40000000002 */ /*0370*/ FSEL R14, R8, R12, !P0 ; /* 0x0000000c080e7208 */ /* 0x002fe40004000000 */ /*0380*/ FSEL R15, R9, R13, !P0 ; /* 0x0000000d090f7208 */ /* 0x000fe20004000000 */ /*0390*/ DADD R10, R4, R10 ; /* 0x00000000040a7229 */ /* 0x001e08000000000a */ /*03a0*/ DSETP.GEU.AND P0, PT, R16, 4, PT ; /* 0x401000001000742a */ /* 0x000fc80003f0e000 */ /*03b0*/ DADD R8, R14, 1 ; /* 0x3ff000000e087429 */ /* 0x000e480000000000 */ /*03c0*/ DMUL R12, R10, R10 ; /* 0x0000000a0a0c7228 */ /* 0x001e080000000000 */ /*03d0*/ DADD R10, R10, R10 ; /* 0x000000000a0a7229 */ /* 0x000ea4000000000a */ /*03e0*/ FSEL R18, R8, R14, !P0 ; /* 0x0000000e08127208 */ /* 0x002fe40004000000 */ /*03f0*/ FSEL R19, R9, R15, !P0 ; /* 0x0000000f09137208 */ /* 0x000fe20004000000 */ /*0400*/ DFMA R20, R6, R6, R12 ; /* 0x000000060614722b */ /* 0x001fc8000000000c */ /*0410*/ DFMA R12, -R6, R6, R12 ; /* 0x00000006060c722b */ /* 0x000e08000000010c */ /*0420*/ DADD R8, R18, 1 ; /* 0x3ff0000012087429 */ /* 0x000fc80000000000 */ /*0430*/ DFMA R6, R6, R10, R2 ; /* 0x0000000a0606722b */ /* 0x004e480000000002 */ /*0440*/ DADD R16, R4, R12 ; /* 0x0000000004107229 */ /* 0x001fc8000000000c */ /*0450*/ DSETP.GEU.AND P0, PT, R20, 4, PT ; /* 0x401000001400742a */ /* 0x000e080003f0e000 */ /*0460*/ DMUL R14, R6, R6 ; /* 0x00000006060e7228 */ /* 0x002fe40000000000 */ /*0470*/ FSEL R18, R8, R18, !P0 ; /* 0x0000001208127208 */ /* 0x001fe40004000000 */ /*0480*/ DMUL R10, R16, R16 ; /* 0x00000010100a7228 */ /* 0x000e220000000000 */ /*0490*/ FSEL R19, R9, R19, !P0 ; /* 0x0000001309137208 */ /* 0x000fca0004000000 */ /*04a0*/ DADD R12, R10, R14 ; /* 0x000000000a0c7229 */ /* 0x001e08000000000e */ /*04b0*/ DADD R8, R18, 1 ; /* 0x3ff0000012087429 */ /* 0x000fc80000000000 */ /*04c0*/ DSETP.GEU.AND P0, PT, R12, 4, PT ; /* 0x401000000c00742a */ /* 0x0010640003f0e000 */ /*04d0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x4 ; /* 0x00000004ff0c7424 */ /* 0x001fc800078e00ff */ /*04e0*/ FSEL R8, R8, R18, !P0 ; /* 0x0000001208087208 */ /* 0x002fe40004000000 */ /*04f0*/ FSEL R9, R9, R19, !P0 ; /* 0x0000001309097208 */ /* 0x000fe40004000000 */ /*0500*/ DADD R16, R16, R16 ; /* 0x0000000010107229 */ /* 0x000e220000000010 */ /*0510*/ IADD3 R12, R12, 0x10, RZ ; /* 0x000000100c0c7810 */ /* 0x000fc60007ffe0ff */ /*0520*/ DADD R10, -R14, R10 ; /* 0x000000000e0a7229 */ /* 0x000e48000000010a */ /*0530*/ DFMA R16, R16, R6, R2 ; /* 0x000000061010722b */ /* 0x001e080000000002 */ /*0540*/ DADD R10, R4, R10 ; /* 0x00000000040a7229 */ /* 0x002fc8000000000a */ /*0550*/ DMUL R6, R16, R16 ; /* 0x0000001010067228 */ /* 0x001e0c0000000000 */ /*0560*/ DFMA R14, R10, R10, -R6 ; /* 0x0000000a0a0e722b */ /* 0x001e080000000806 */ /*0570*/ DMUL R6, R10, R10 ; /* 0x0000000a0a067228 */ /* 0x000e480000000000 */ /*0580*/ DADD R10, R10, R10 ; /* 0x000000000a0a7229 */ /* 0x000e88000000000a */ /*0590*/ DADD R14, R4, R14 ; /* 0x00000000040e7229 */ /* 0x001e08000000000e */ /*05a0*/ DFMA R6, R16, R16, R6 ; /* 0x000000101006722b */ /* 0x002e480000000006 */ /*05b0*/ DFMA R10, R16, R10, R2 ; /* 0x0000000a100a722b */ /* 0x004fc80000000002 */ /*05c0*/ DMUL R16, R14, R14 ; /* 0x0000000e0e107228 */ /* 0x001e080000000000 */ /*05d0*/ DSETP.GEU.AND P1, PT, R6, 4, PT ; /* 0x401000000600742a */ /* 0x002fc80003f2e000 */ /*05e0*/ DFMA R6, -R10, R10, R16 ; /* 0x0000000a0a06722b */ /* 0x001e080000000110 */ /*05f0*/ DFMA R16, R10, R10, R16 ; /* 0x0000000a0a10722b */ /* 0x000e480000000010 */ /*0600*/ DADD R14, R14, R14 ; /* 0x000000000e0e7229 */ /* 0x000e88000000000e */ /*0610*/ DADD R6, R4, R6 ; /* 0x0000000004067229 */ /* 0x001e080000000006 */ /*0620*/ DSETP.GEU.AND P0, PT, R16, 4, PT ; /* 0x401000001000742a */ /* 0x002fc80003f0e000 */ /*0630*/ DFMA R14, R10, R14, R2 ; /* 0x0000000e0a0e722b */ /* 0x004fc80000000002 */ /*0640*/ DMUL R16, R6, R6 ; /* 0x0000000606107228 */ /* 0x001e080000000000 */ /*0650*/ DADD R10, R8, 1 ; /* 0x3ff00000080a7429 */ /* 0x000e480000000000 */ /*0660*/ DFMA R18, R14, R14, R16 ; /* 0x0000000e0e12722b */ /* 0x001fc80000000010 */ /*0670*/ DFMA R16, -R14, R14, R16 ; /* 0x0000000e0e10722b */ /* 0x000e240000000110 */ /*0680*/ FSEL R20, R10, R8, !P1 ; /* 0x000000080a147208 */ /* 0x002fe40004800000 */ /*0690*/ DADD R6, R6, R6 ; /* 0x0000000006067229 */ /* 0x000e620000000006 */ /*06a0*/ FSEL R21, R11, R9, !P1 ; /* 0x000000090b157208 */ /* 0x000fc60004800000 */ /*06b0*/ DADD R16, R4, R16 ; /* 0x0000000004107229 */ /* 0x001e080000000010 */ /*06c0*/ DFMA R6, R14, R6, R2 ; /* 0x000000060e06722b */ /* 0x002fc80000000002 */ /*06d0*/ DMUL R10, R16, R16 ; /* 0x00000010100a7228 */ /* 0x001fc80000000000 */ /*06e0*/ DADD R8, R20, 1 ; /* 0x3ff0000014087429 */ /* 0x000e080000000000 */ /*06f0*/ DADD R16, R16, R16 ; /* 0x0000000010107229 */ /* 0x000e480000000010 */ /*0700*/ DSETP.GEU.AND P1, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x000fe40003f2e000 */ /*0710*/ FSEL R8, R8, R20, !P0 ; /* 0x0000001408087208 */ /* 0x001fe40004000000 */ /*0720*/ DFMA R18, R6, R6, R10 ; /* 0x000000060612722b */ /* 0x000e22000000000a */ /*0730*/ FSEL R9, R9, R21, !P0 ; /* 0x0000001509097208 */ /* 0x000fc60004000000 */ /*0740*/ DFMA R14, -R6, R6, R10 ; /* 0x00000006060e722b */ /* 0x000e88000000010a */ /*0750*/ DFMA R10, R6, R16, R2 ; /* 0x00000010060a722b */ /* 0x002e480000000002 */ /*0760*/ DSETP.GEU.AND P0, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x001fc80003f0e000 */ /*0770*/ DADD R14, R4, R14 ; /* 0x00000000040e7229 */ /* 0x004fc8000000000e */ /*0780*/ DMUL R18, R10, R10 ; /* 0x0000000a0a127228 */ /* 0x002e080000000000 */ /*0790*/ DADD R6, R8, 1 ; /* 0x3ff0000008067429 */ /* 0x000e480000000000 */ /*07a0*/ DFMA R18, R14, R14, -R18 ; /* 0x0000000e0e12722b */ /* 0x001e080000000812 */ /*07b0*/ DMUL R16, R14.reuse, R14 ; /* 0x0000000e0e107228 */ /* 0x040ea40000000000 */ /*07c0*/ FSEL R6, R6, R8, !P1 ; /* 0x0000000806067208 */ /* 0x002fe40004800000 */ /*07d0*/ DADD R20, R14, R14 ; /* 0x000000000e147229 */ /* 0x000e62000000000e */ /*07e0*/ FSEL R7, R7, R9, !P1 ; /* 0x0000000907077208 */ /* 0x000fc60004800000 */ /*07f0*/ DADD R18, R4, R18 ; /* 0x0000000004127229 */ /* 0x001e080000000012 */ /*0800*/ DFMA R16, R10, R10, R16 ; /* 0x0000000a0a10722b */ /* 0x004e880000000010 */ /*0810*/ DFMA R10, R10, R20, R2 ; /* 0x000000140a0a722b */ /* 0x002fc80000000002 */ /*0820*/ DMUL R14, R18, R18 ; /* 0x00000012120e7228 */ /* 0x001e080000000000 */ /*0830*/ DADD R8, R6, 1 ; /* 0x3ff0000006087429 */ /* 0x000e480000000000 */ /*0840*/ DSETP.GEU.AND P1, PT, R16, 4, PT ; /* 0x401000001000742a */ /* 0x004fc80003f2e000 */ /*0850*/ DFMA R16, R10.reuse, R10.reuse, R14.reuse ; /* 0x0000000a0a10722b */ /* 0x1c1fe4000000000e */ /*0860*/ FSEL R6, R8, R6, !P0 ; /* 0x0000000608067208 */ /* 0x002fe40004000000 */ /*0870*/ DFMA R14, -R10, R10, R14 ; /* 0x0000000a0a0e722b */ /* 0x000e22000000010e */ /*0880*/ FSEL R7, R9, R7, !P0 ; /* 0x0000000709077208 */ /* 0x000fc60004000000 */ /*0890*/ DADD R18, R18, R18 ; /* 0x0000000012127229 */ /* 0x000e480000000012 */ /*08a0*/ DADD R14, R4, R14 ; /* 0x00000000040e7229 */ /* 0x001e08000000000e */ /*08b0*/ DSETP.GEU.AND P0, PT, R16, 4, PT ; /* 0x401000001000742a */ /* 0x000fc80003f0e000 */ /*08c0*/ DFMA R10, R10, R18, R2 ; /* 0x000000120a0a722b */ /* 0x002fc80000000002 */ /*08d0*/ DMUL R16, R14, R14 ; /* 0x0000000e0e107228 */ /* 0x001e080000000000 */ /*08e0*/ DADD R8, R6, 1 ; /* 0x3ff0000006087429 */ /* 0x000fc80000000000 */ /*08f0*/ DADD R14, R14, R14 ; /* 0x000000000e0e7229 */ /* 0x000e48000000000e */ /*0900*/ DFMA R18, R10, R10, R16 ; /* 0x0000000a0a12722b */ /* 0x001fc80000000010 */ /*0910*/ DFMA R16, -R10, R10, R16 ; /* 0x0000000a0a10722b */ /* 0x000e080000000110 */ /*0920*/ DFMA R10, R10, R14, R2 ; /* 0x0000000e0a0a722b */ /* 0x0023e40000000002 */ /*0930*/ FSEL R14, R8, R6, !P1 ; /* 0x00000006080e7208 */ /* 0x002fe40004800000 */ /*0940*/ FSEL R15, R9, R7, !P1 ; /* 0x00000007090f7208 */ /* 0x000fe20004800000 */ /*0950*/ DADD R16, R4, R16 ; /* 0x0000000004107229 */ /* 0x001e080000000010 */ /*0960*/ DSETP.GEU.AND P1, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x000fc80003f2e000 */ /*0970*/ DADD R6, R14, 1 ; /* 0x3ff000000e067429 */ /* 0x000e480000000000 */ /*0980*/ DMUL R8, R16, R16 ; /* 0x0000001010087228 */ /* 0x001e080000000000 */ /*0990*/ DADD R16, R16, R16 ; /* 0x0000000010107229 */ /* 0x000ea40000000010 */ /*09a0*/ FSEL R6, R6, R14, !P0 ; /* 0x0000000e06067208 */ /* 0x002fe40004000000 */ /*09b0*/ DFMA R18, R10, R10, R8 ; /* 0x0000000a0a12722b */ /* 0x001e220000000008 */ /*09c0*/ FSEL R7, R7, R15, !P0 ; /* 0x0000000f07077208 */ /* 0x000fc60004000000 */ /*09d0*/ DFMA R8, -R10, R10, R8 ; /* 0x0000000a0a08722b */ /* 0x000e480000000108 */ /*09e0*/ DFMA R10, R10, R16, R2 ; /* 0x000000100a0a722b */ /* 0x004e880000000002 */ /*09f0*/ DSETP.GEU.AND P0, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x001fc80003f0e000 */ /*0a00*/ DADD R14, R6, 1 ; /* 0x3ff00000060e7429 */ /* 0x000e080000000000 */ /*0a10*/ DADD R8, R4, R8 ; /* 0x0000000004087229 */ /* 0x002e480000000008 */ /*0a20*/ DMUL R18, R10, R10 ; /* 0x0000000a0a127228 */ /* 0x004ea40000000000 */ /*0a30*/ FSEL R6, R14, R6, !P1 ; /* 0x000000060e067208 */ /* 0x001fe40004800000 */ /*0a40*/ FSEL R7, R15, R7, !P1 ; /* 0x000000070f077208 */ /* 0x000fe20004800000 */ /*0a50*/ DMUL R16, R8, R8 ; /* 0x0000000808107228 */ /* 0x002fc80000000000 */ /*0a60*/ DFMA R18, R8, R8, -R18 ; /* 0x000000080812722b */ /* 0x004e080000000812 */ /*0a70*/ DADD R20, R8, R8 ; /* 0x0000000008147229 */ /* 0x000e480000000008 */ /*0a80*/ DADD R18, R4, R18 ; /* 0x0000000004127229 */ /* 0x001fc80000000012 */ /*0a90*/ DADD R14, R6, 1 ; /* 0x3ff00000060e7429 */ /* 0x000e080000000000 */ /*0aa0*/ DFMA R8, R10, R10, R16 ; /* 0x0000000a0a08722b */ /* 0x000fc80000000010 */ /*0ab0*/ DFMA R10, R10, R20, R2 ; /* 0x000000140a0a722b */ /* 0x002fe40000000002 */ /*0ac0*/ FSEL R14, R14, R6, !P0 ; /* 0x000000060e0e7208 */ /* 0x001fe40004000000 */ /*0ad0*/ DMUL R16, R18, R18 ; /* 0x0000001212107228 */ /* 0x000e220000000000 */ /*0ae0*/ FSEL R15, R15, R7, !P0 ; /* 0x000000070f0f7208 */ /* 0x000fc60004000000 */ /*0af0*/ DADD R20, R18, R18 ; /* 0x0000000012147229 */ /* 0x000e480000000012 */ /*0b00*/ DFMA R18, R10, R10, R16 ; /* 0x0000000a0a12722b */ /* 0x001fc80000000010 */ /*0b10*/ DFMA R16, -R10, R10, R16 ; /* 0x0000000a0a10722b */ /* 0x000e080000000110 */ /*0b20*/ DFMA R10, R10, R20, R2 ; /* 0x000000140a0a722b */ /* 0x002fc80000000002 */ /*0b30*/ DADD R20, R4, R16 ; /* 0x0000000004147229 */ /* 0x001e080000000010 */ /*0b40*/ DADD R6, R14, 1 ; /* 0x3ff000000e067429 */ /* 0x000fc80000000000 */ /*0b50*/ DSETP.GEU.AND P0, PT, R8, 4, PT ; /* 0x401000000800742a */ /* 0x000e480003f0e000 */ /*0b60*/ DMUL R16, R20, R20 ; /* 0x0000001414107228 */ /* 0x001e240000000000 */ /*0b70*/ FSEL R14, R6, R14, !P0 ; /* 0x0000000e060e7208 */ /* 0x002fe40004000000 */ /*0b80*/ FSEL R15, R7, R15, !P0 ; /* 0x0000000f070f7208 */ /* 0x000fe20004000000 */ /*0b90*/ DSETP.GEU.AND P1, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x000fc80003f2e000 */ /*0ba0*/ DFMA R8, -R10, R10, R16 ; /* 0x0000000a0a08722b */ /* 0x001e080000000110 */ /*0bb0*/ DADD R20, R20, R20 ; /* 0x0000000014147229 */ /* 0x000e480000000014 */ /*0bc0*/ DADD R18, R14, 1 ; /* 0x3ff000000e127429 */ /* 0x000fc80000000000 */ /*0bd0*/ DADD R22, R4, R8 ; /* 0x0000000004167229 */ /* 0x001e080000000008 */ /*0be0*/ DFMA R6, R10, R20, R2 ; /* 0x000000140a06722b */ /* 0x002fc80000000002 */ /*0bf0*/ DMUL R20, R22, R22 ; /* 0x0000001616147228 */ /* 0x001e080000000000 */ /*0c00*/ DFMA R8, R10, R10, R16 ; /* 0x0000000a0a08722b */ /* 0x0003e40000000010 */ /*0c10*/ FSEL R10, R18, R14, !P1 ; /* 0x0000000e120a7208 */ /* 0x002fe40004800000 */ /*0c20*/ DADD R16, R22, R22 ; /* 0x0000000016107229 */ /* 0x000e620000000016 */ /*0c30*/ FSEL R11, R19, R15, !P1 ; /* 0x0000000f130b7208 */ /* 0x000fc60004800000 */ /*0c40*/ DFMA R22, R6, R6, R20 ; /* 0x000000060616722b */ /* 0x001fc80000000014 */ /*0c50*/ DFMA R20, -R6, R6, R20 ; /* 0x000000060614722b */ /* 0x000e080000000114 */ /*0c60*/ DFMA R6, R16, R6, R2 ; /* 0x000000061006722b */ /* 0x002fc80000000002 */ /*0c70*/ DADD R16, R10, 1 ; /* 0x3ff000000a107429 */ /* 0x000fc80000000000 */ /*0c80*/ DSETP.GEU.AND P1, PT, R8, 4, PT ; /* 0x401000000800742a */ /* 0x000e480003f2e000 */ /*0c90*/ DADD R20, R4, R20 ; /* 0x0000000004147229 */ /* 0x001e240000000014 */ /*0ca0*/ FSEL R8, R16, R10, !P1 ; /* 0x0000000a10087208 */ /* 0x002fe40004800000 */ /*0cb0*/ FSEL R9, R17, R11, !P1 ; /* 0x0000000b11097208 */ /* 0x000fe20004800000 */ /*0cc0*/ DMUL R14, R6, R6 ; /* 0x00000006060e7228 */ /* 0x000e480000000000 */ /*0cd0*/ DSETP.GEU.AND P0, PT, R22, 4, PT ; /* 0x401000001600742a */ /* 0x000fc80003f0e000 */ /*0ce0*/ DADD R16, R8, 1 ; /* 0x3ff0000008107429 */ /* 0x000e880000000000 */ /*0cf0*/ DMUL R10, R20, R20 ; /* 0x00000014140a7228 */ /* 0x001e080000000000 */ /*0d00*/ DFMA R14, R20, R20, -R14 ; /* 0x00000014140e722b */ /* 0x002e64000000080e */ /*0d10*/ FSEL R16, R16, R8, !P0 ; /* 0x0000000810107208 */ /* 0x004fe40004000000 */ /*0d20*/ FSEL R17, R17, R9, !P0 ; /* 0x0000000911117208 */ /* 0x000fe20004000000 */ /*0d30*/ DFMA R10, R6, R6, R10 ; /* 0x00000006060a722b */ /* 0x001e08000000000a */ /*0d40*/ DADD R20, R20, R20 ; /* 0x0000000014147229 */ /* 0x000e880000000014 */ /*0d50*/ DADD R14, R4, R14 ; /* 0x00000000040e7229 */ /* 0x002fc8000000000e */ /*0d60*/ DADD R8, R16, 1 ; /* 0x3ff0000010087429 */ /* 0x000fc80000000000 */ /*0d70*/ DSETP.GEU.AND P1, PT, R10, 4, PT ; /* 0x401000000a00742a */ /* 0x001e080003f2e000 */ /*0d80*/ DFMA R6, R6, R20, R2 ; /* 0x000000140606722b */ /* 0x004fe40000000002 */ /*0d90*/ FSEL R16, R8, R16, !P1 ; /* 0x0000001008107208 */ /* 0x001fe40004800000 */ /*0da0*/ DMUL R10, R14.reuse, R14 ; /* 0x0000000e0e0a7228 */ /* 0x040e220000000000 */ /*0db0*/ FSEL R17, R9, R17, !P1 ; /* 0x0000001109117208 */ /* 0x000fe40004800000 */ /*0dc0*/ ISETP.NE.AND P1, PT, R12, 0x3e4, PT ; /* 0x000003e40c00780c */ /* 0x000fe20003f25270 */ /*0dd0*/ DADD R14, R14, R14 ; /* 0x000000000e0e7229 */ /* 0x000fc8000000000e */ /*0de0*/ DFMA R18, R6, R6, R10 ; /* 0x000000060612722b */ /* 0x001fc8000000000a */ /*0df0*/ DFMA R10, -R6, R6, R10 ; /* 0x00000006060a722b */ /* 0x000e08000000010a */ /*0e00*/ DADD R8, R16, 1 ; /* 0x3ff0000010087429 */ /* 0x000fc80000000000 */ /*0e10*/ DADD R10, R4, R10 ; /* 0x00000000040a7229 */ /* 0x001e08000000000a */ /*0e20*/ DFMA R6, R6, R14, R2 ; /* 0x0000000e0606722b */ /* 0x000fc80000000002 */ /*0e30*/ DSETP.GEU.AND P0, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x000e480003f0e000 */ /*0e40*/ DMUL R14, R10.reuse, R10 ; /* 0x0000000a0a0e7228 */ /* 0x041e240000000000 */ /*0e50*/ FSEL R20, R8, R16, !P0 ; /* 0x0000001008147208 */ /* 0x002fe40004000000 */ /*0e60*/ FSEL R21, R9, R17, !P0 ; /* 0x0000001109157208 */ /* 0x000fe20004000000 */ /*0e70*/ DADD R10, R10, R10 ; /* 0x000000000a0a7229 */ /* 0x000e48000000000a */ /*0e80*/ DFMA R18, R6, R6, R14 ; /* 0x000000060612722b */ /* 0x001fc8000000000e */ /*0e90*/ DFMA R14, -R6, R6, R14 ; /* 0x00000006060e722b */ /* 0x000e08000000010e */ /*0ea0*/ DADD R8, R20, 1 ; /* 0x3ff0000014087429 */ /* 0x000fc80000000000 */ /*0eb0*/ DFMA R6, R6, R10, R2 ; /* 0x0000000a0606722b */ /* 0x002e480000000002 */ /*0ec0*/ DADD R16, R4, R14 ; /* 0x0000000004107229 */ /* 0x001fc8000000000e */ /*0ed0*/ DSETP.GEU.AND P0, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x000e080003f0e000 */ /*0ee0*/ DMUL R14, R6, R6 ; /* 0x00000006060e7228 */ /* 0x002fe40000000000 */ /*0ef0*/ FSEL R18, R8, R20, !P0 ; /* 0x0000001408127208 */ /* 0x001fe40004000000 */ /*0f00*/ DMUL R10, R16, R16 ; /* 0x00000010100a7228 */ /* 0x000e220000000000 */ /*0f10*/ FSEL R19, R9, R21, !P0 ; /* 0x0000001509137208 */ /* 0x000fca0004000000 */ /*0f20*/ DADD R20, R10, R14 ; /* 0x000000000a147229 */ /* 0x001e08000000000e */ /*0f30*/ DADD R8, R18, 1 ; /* 0x3ff0000012087429 */ /* 0x000fc80000000000 */ /*0f40*/ DSETP.GEU.AND P0, PT, R20, 4, PT ; /* 0x401000001400742a */ /* 0x001e0c0003f0e000 */ /*0f50*/ FSEL R8, R8, R18, !P0 ; /* 0x0000001208087208 */ /* 0x001fe40004000000 */ /*0f60*/ FSEL R9, R9, R19, !P0 ; /* 0x0000001309097208 */ /* 0x000fe20004000000 */ /*0f70*/ @P1 BRA 0x500 ; /* 0xfffff58000001947 */ /* 0x000fea000383ffff */ /*0f80*/ F2F.F32.F64 R9, R8 ; /* 0x0000000800097310 */ /* 0x000e220000301000 */ /*0f90*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fc800078e00ff */ /*0fa0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fca00078e0203 */ /*0fb0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x001fe2000c101904 */ /*0fc0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0fd0*/ BRA 0xfd0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0fe0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ff0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1000*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1010*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1020*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
__global__ void Mandelbrot(float* out,const double* re,const double* im){ int idx = blockIdx.x*blockDim.x + threadIdx.x; double zr=0; double zi=0; double cr=re[idx]; double ci=im[idx]; double count=0; for(int i=1;i<1000;i++){ double zrt=zr*zr-zi*zi; double zit=2*zr*zi; zr=zrt+cr; zi=zit+ci; if(zr*zr+zi*zi<4)count++; } out[idx]=count; }
.file "tmpxft_0009c681_00000000-6_Mandelbrot_kern.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z36__device_stub__Z10MandelbrotPfPKdS1_PfPKdS1_ .type _Z36__device_stub__Z10MandelbrotPfPKdS1_PfPKdS1_, @function _Z36__device_stub__Z10MandelbrotPfPKdS1_PfPKdS1_: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z10MandelbrotPfPKdS1_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z36__device_stub__Z10MandelbrotPfPKdS1_PfPKdS1_, .-_Z36__device_stub__Z10MandelbrotPfPKdS1_PfPKdS1_ .globl _Z10MandelbrotPfPKdS1_ .type _Z10MandelbrotPfPKdS1_, @function _Z10MandelbrotPfPKdS1_: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z10MandelbrotPfPKdS1_PfPKdS1_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z10MandelbrotPfPKdS1_, .-_Z10MandelbrotPfPKdS1_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z10MandelbrotPfPKdS1_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z10MandelbrotPfPKdS1_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
__global__ void Mandelbrot(float* out,const double* re,const double* im){ int idx = blockIdx.x*blockDim.x + threadIdx.x; double zr=0; double zi=0; double cr=re[idx]; double ci=im[idx]; double count=0; for(int i=1;i<1000;i++){ double zrt=zr*zr-zi*zi; double zit=2*zr*zi; zr=zrt+cr; zi=zit+ci; if(zr*zr+zi*zi<4)count++; } out[idx]=count; }
#include <hip/hip_runtime.h> __global__ void Mandelbrot(float* out,const double* re,const double* im){ int idx = blockIdx.x*blockDim.x + threadIdx.x; double zr=0; double zi=0; double cr=re[idx]; double ci=im[idx]; double count=0; for(int i=1;i<1000;i++){ double zrt=zr*zr-zi*zi; double zit=2*zr*zi; zr=zrt+cr; zi=zit+ci; if(zr*zr+zi*zi<4)count++; } out[idx]=count; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> __global__ void Mandelbrot(float* out,const double* re,const double* im){ int idx = blockIdx.x*blockDim.x + threadIdx.x; double zr=0; double zi=0; double cr=re[idx]; double ci=im[idx]; double count=0; for(int i=1;i<1000;i++){ double zrt=zr*zr-zi*zi; double zit=2*zr*zi; zr=zrt+cr; zi=zit+ci; if(zr*zr+zi*zi<4)count++; } out[idx]=count; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10MandelbrotPfPKdS1_ .globl _Z10MandelbrotPfPKdS1_ .p2align 8 .type _Z10MandelbrotPfPKdS1_,@function _Z10MandelbrotPfPKdS1_: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b128 s[4:7], s[0:1], 0x8 v_mov_b32_e32 v9, 0 v_dual_mov_b32 v10, 0 :: v_dual_mov_b32 v11, 0 v_mov_b32_e32 v12, 0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_movk_i32 s2, 0x3e7 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 3, v[1:2] v_add_co_u32 v5, vcc_lo, s4, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v6, vcc_lo, s5, v4, vcc_lo v_add_co_u32 v7, vcc_lo, s6, v3 v_add_co_ci_u32_e32 v8, vcc_lo, s7, v4, vcc_lo global_load_b64 v[3:4], v[5:6], off global_load_b64 v[5:6], v[7:8], off v_mov_b32_e32 v7, 0 v_mov_b32_e32 v8, 0 .p2align 6 .LBB0_1: v_mul_f64 v[13:14], v[11:12], v[11:12] v_add_f64 v[15:16], v[9:10], v[9:10] s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2) s_cmp_eq_u32 s2, 0 v_fma_f64 v[9:10], v[9:10], v[9:10], -v[13:14] s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_fma_f64 v[11:12], v[11:12], v[15:16], v[5:6] v_add_f64 v[15:16], v[7:8], 1.0 v_add_f64 v[9:10], v[3:4], v[9:10] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f64 v[13:14], v[11:12], v[11:12] v_fma_f64 v[13:14], v[9:10], v[9:10], v[13:14] s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_f64_e32 vcc_lo, 4.0, v[13:14] v_dual_cndmask_b32 v8, v8, v16 :: v_dual_cndmask_b32 v7, v7, v15 s_cbranch_scc0 .LBB0_1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_cvt_f32_f64_e32 v3, v[7:8] s_load_b64 s[0:1], s[0:1], 0x0 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v3, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10MandelbrotPfPKdS1_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 17 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10MandelbrotPfPKdS1_, .Lfunc_end0-_Z10MandelbrotPfPKdS1_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10MandelbrotPfPKdS1_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10MandelbrotPfPKdS1_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 17 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> __global__ void Mandelbrot(float* out,const double* re,const double* im){ int idx = blockIdx.x*blockDim.x + threadIdx.x; double zr=0; double zi=0; double cr=re[idx]; double ci=im[idx]; double count=0; for(int i=1;i<1000;i++){ double zrt=zr*zr-zi*zi; double zit=2*zr*zi; zr=zrt+cr; zi=zit+ci; if(zr*zr+zi*zi<4)count++; } out[idx]=count; }
.text .file "Mandelbrot_kern.hip" .globl _Z25__device_stub__MandelbrotPfPKdS1_ # -- Begin function _Z25__device_stub__MandelbrotPfPKdS1_ .p2align 4, 0x90 .type _Z25__device_stub__MandelbrotPfPKdS1_,@function _Z25__device_stub__MandelbrotPfPKdS1_: # @_Z25__device_stub__MandelbrotPfPKdS1_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z10MandelbrotPfPKdS1_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z25__device_stub__MandelbrotPfPKdS1_, .Lfunc_end0-_Z25__device_stub__MandelbrotPfPKdS1_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10MandelbrotPfPKdS1_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z10MandelbrotPfPKdS1_,@object # @_Z10MandelbrotPfPKdS1_ .section .rodata,"a",@progbits .globl _Z10MandelbrotPfPKdS1_ .p2align 3, 0x0 _Z10MandelbrotPfPKdS1_: .quad _Z25__device_stub__MandelbrotPfPKdS1_ .size _Z10MandelbrotPfPKdS1_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z10MandelbrotPfPKdS1_" .size .L__unnamed_1, 23 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__MandelbrotPfPKdS1_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10MandelbrotPfPKdS1_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z10MandelbrotPfPKdS1_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ IMAD.MOV.U32 R7, RZ, RZ, 0x8 ; /* 0x00000008ff077424 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fc800078e0203 */ /*0060*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fc800078e0207 */ /*0070*/ IMAD.WIDE R2, R0, R7, c[0x0][0x170] ; /* 0x00005c0000027625 */ /* 0x000fe400078e0207 */ /*0080*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1b00 */ /*0090*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ee2000c1e1b00 */ /*00a0*/ IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e7224 */ /* 0x000fe200078e00ff */ /*00b0*/ DADD R6, RZ, R4 ; /* 0x00000000ff067229 */ /* 0x004e080000000004 */ /*00c0*/ DFMA R8, RZ, RZ, R2 ; /* 0x000000ffff08722b */ /* 0x008fc80000000002 */ /*00d0*/ DMUL R10, R6, R6 ; /* 0x00000006060a7228 */ /* 0x001e080000000000 */ /*00e0*/ DADD R6, R6, R6 ; /* 0x0000000006067229 */ /* 0x000fc80000000006 */ /*00f0*/ DFMA R12, -R8, R8, R10 ; /* 0x00000008080c722b */ /* 0x001e08000000010a */ /*0100*/ DFMA R10, R8, R8, R10 ; /* 0x00000008080a722b */ /* 0x000fc8000000000a */ /*0110*/ DADD R12, R4, R12 ; /* 0x00000000040c7229 */ /* 0x001e08000000000c */ /*0120*/ DFMA R6, R8, R6, R2 ; /* 0x000000060806722b */ /* 0x000fc80000000002 */ /*0130*/ DMUL R8, R12, R12 ; /* 0x0000000c0c087228 */ /* 0x001e080000000000 */ /*0140*/ DSETP.GEU.AND P1, PT, R10, 4, PT ; /* 0x401000000a00742a */ /* 0x000e480003f2e000 */ /*0150*/ DFMA R10, -R6.reuse, R6.reuse, R8.reuse ; /* 0x00000006060a722b */ /* 0x1c1e240000000108 */ /*0160*/ FSEL R15, RZ, 1.875, P1 ; /* 0x3ff00000ff0f7808 */ /* 0x002fe40000800000 */ /*0170*/ DFMA R8, R6, R6, R8 ; /* 0x000000060608722b */ /* 0x000e480000000008 */ /*0180*/ DADD R12, R12, R12 ; /* 0x000000000c0c7229 */ /* 0x000e88000000000c */ /*0190*/ DADD R10, R4, R10 ; /* 0x00000000040a7229 */ /* 0x001e08000000000a */ /*01a0*/ DSETP.GEU.AND P0, PT, R8, 4, PT ; /* 0x401000000800742a */ /* 0x002fc80003f0e000 */ /*01b0*/ DFMA R6, R6, R12, R2 ; /* 0x0000000c0606722b */ /* 0x004fc80000000002 */ /*01c0*/ DADD R8, R14, 1 ; /* 0x3ff000000e087429 */ /* 0x000e480000000000 */ /*01d0*/ DMUL R12, R10, R10 ; /* 0x0000000a0a0c7228 */ /* 0x001e080000000000 */ /*01e0*/ DADD R10, R10, R10 ; /* 0x000000000a0a7229 */ /* 0x000ea4000000000a */ /*01f0*/ FSEL R19, R9, R15, !P0 ; /* 0x0000000f09137208 */ /* 0x002fe40004000000 */ /*0200*/ DFMA R16, R6, R6, R12 ; /* 0x000000060610722b */ /* 0x001fe2000000000c */ /*0210*/ FSEL R18, R8, RZ, !P0 ; /* 0x000000ff08127208 */ /* 0x000fc60004000000 */ /*0220*/ DFMA R12, -R6, R6, R12 ; /* 0x00000006060c722b */ /* 0x000e08000000010c */ /*0230*/ DFMA R6, R6, R10, R2 ; /* 0x0000000a0606722b */ /* 0x004e480000000002 */ /*0240*/ DADD R12, R4, R12 ; /* 0x00000000040c7229 */ /* 0x001e08000000000c */ /*0250*/ DMUL R14, R6, R6 ; /* 0x00000006060e7228 */ /* 0x002e480000000000 */ /*0260*/ DADD R8, R18, 1 ; /* 0x3ff0000012087429 */ /* 0x000fc80000000000 */ /*0270*/ DSETP.GEU.AND P1, PT, R16, 4, PT ; /* 0x401000001000742a */ /* 0x000e880003f2e000 */ /*0280*/ DMUL R10, R12, R12 ; /* 0x0000000c0c0a7228 */ /* 0x001e080000000000 */ /*0290*/ DFMA R14, R12, R12, -R14 ; /* 0x0000000c0c0e722b */ /* 0x002e48000000080e */ /*02a0*/ DADD R16, R12, R12 ; /* 0x000000000c107229 */ /* 0x0005e4000000000c */ /*02b0*/ FSEL R12, R8, R18, !P1 ; /* 0x00000012080c7208 */ /* 0x004fe40004800000 */ /*02c0*/ DFMA R10, R6, R6, R10 ; /* 0x00000006060a722b */ /* 0x001e22000000000a */ /*02d0*/ FSEL R13, R9, R19, !P1 ; /* 0x00000013090d7208 */ /* 0x000fc60004800000 */ /*02e0*/ DADD R14, R4, R14 ; /* 0x00000000040e7229 */ /* 0x002e48000000000e */ /*02f0*/ DSETP.GEU.AND P0, PT, R10, 4, PT ; /* 0x401000000a00742a */ /* 0x001fc80003f0e000 */ /*0300*/ DADD R8, R12, 1 ; /* 0x3ff000000c087429 */ /* 0x000fc80000000000 */ /*0310*/ DFMA R6, R6, R16, R2 ; /* 0x000000100606722b */ /* 0x000fc80000000002 */ /*0320*/ DMUL R10, R14, R14 ; /* 0x0000000e0e0a7228 */ /* 0x002e080000000000 */ /*0330*/ DADD R14, R14, R14 ; /* 0x000000000e0e7229 */ /* 0x000e48000000000e */ /*0340*/ DFMA R16, R6, R6, R10 ; /* 0x000000060610722b */ /* 0x001fc8000000000a */ /*0350*/ DFMA R10, -R6, R6, R10 ; /* 0x00000006060a722b */ /* 0x000e08000000010a */ /*0360*/ DFMA R6, R6, R14, R2 ; /* 0x0000000e0606722b */ /* 0x0023e40000000002 */ /*0370*/ FSEL R14, R8, R12, !P0 ; /* 0x0000000c080e7208 */ /* 0x002fe40004000000 */ /*0380*/ FSEL R15, R9, R13, !P0 ; /* 0x0000000d090f7208 */ /* 0x000fe20004000000 */ /*0390*/ DADD R10, R4, R10 ; /* 0x00000000040a7229 */ /* 0x001e08000000000a */ /*03a0*/ DSETP.GEU.AND P0, PT, R16, 4, PT ; /* 0x401000001000742a */ /* 0x000fc80003f0e000 */ /*03b0*/ DADD R8, R14, 1 ; /* 0x3ff000000e087429 */ /* 0x000e480000000000 */ /*03c0*/ DMUL R12, R10, R10 ; /* 0x0000000a0a0c7228 */ /* 0x001e080000000000 */ /*03d0*/ DADD R10, R10, R10 ; /* 0x000000000a0a7229 */ /* 0x000ea4000000000a */ /*03e0*/ FSEL R18, R8, R14, !P0 ; /* 0x0000000e08127208 */ /* 0x002fe40004000000 */ /*03f0*/ FSEL R19, R9, R15, !P0 ; /* 0x0000000f09137208 */ /* 0x000fe20004000000 */ /*0400*/ DFMA R20, R6, R6, R12 ; /* 0x000000060614722b */ /* 0x001fc8000000000c */ /*0410*/ DFMA R12, -R6, R6, R12 ; /* 0x00000006060c722b */ /* 0x000e08000000010c */ /*0420*/ DADD R8, R18, 1 ; /* 0x3ff0000012087429 */ /* 0x000fc80000000000 */ /*0430*/ DFMA R6, R6, R10, R2 ; /* 0x0000000a0606722b */ /* 0x004e480000000002 */ /*0440*/ DADD R16, R4, R12 ; /* 0x0000000004107229 */ /* 0x001fc8000000000c */ /*0450*/ DSETP.GEU.AND P0, PT, R20, 4, PT ; /* 0x401000001400742a */ /* 0x000e080003f0e000 */ /*0460*/ DMUL R14, R6, R6 ; /* 0x00000006060e7228 */ /* 0x002fe40000000000 */ /*0470*/ FSEL R18, R8, R18, !P0 ; /* 0x0000001208127208 */ /* 0x001fe40004000000 */ /*0480*/ DMUL R10, R16, R16 ; /* 0x00000010100a7228 */ /* 0x000e220000000000 */ /*0490*/ FSEL R19, R9, R19, !P0 ; /* 0x0000001309137208 */ /* 0x000fca0004000000 */ /*04a0*/ DADD R12, R10, R14 ; /* 0x000000000a0c7229 */ /* 0x001e08000000000e */ /*04b0*/ DADD R8, R18, 1 ; /* 0x3ff0000012087429 */ /* 0x000fc80000000000 */ /*04c0*/ DSETP.GEU.AND P0, PT, R12, 4, PT ; /* 0x401000000c00742a */ /* 0x0010640003f0e000 */ /*04d0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x4 ; /* 0x00000004ff0c7424 */ /* 0x001fc800078e00ff */ /*04e0*/ FSEL R8, R8, R18, !P0 ; /* 0x0000001208087208 */ /* 0x002fe40004000000 */ /*04f0*/ FSEL R9, R9, R19, !P0 ; /* 0x0000001309097208 */ /* 0x000fe40004000000 */ /*0500*/ DADD R16, R16, R16 ; /* 0x0000000010107229 */ /* 0x000e220000000010 */ /*0510*/ IADD3 R12, R12, 0x10, RZ ; /* 0x000000100c0c7810 */ /* 0x000fc60007ffe0ff */ /*0520*/ DADD R10, -R14, R10 ; /* 0x000000000e0a7229 */ /* 0x000e48000000010a */ /*0530*/ DFMA R16, R16, R6, R2 ; /* 0x000000061010722b */ /* 0x001e080000000002 */ /*0540*/ DADD R10, R4, R10 ; /* 0x00000000040a7229 */ /* 0x002fc8000000000a */ /*0550*/ DMUL R6, R16, R16 ; /* 0x0000001010067228 */ /* 0x001e0c0000000000 */ /*0560*/ DFMA R14, R10, R10, -R6 ; /* 0x0000000a0a0e722b */ /* 0x001e080000000806 */ /*0570*/ DMUL R6, R10, R10 ; /* 0x0000000a0a067228 */ /* 0x000e480000000000 */ /*0580*/ DADD R10, R10, R10 ; /* 0x000000000a0a7229 */ /* 0x000e88000000000a */ /*0590*/ DADD R14, R4, R14 ; /* 0x00000000040e7229 */ /* 0x001e08000000000e */ /*05a0*/ DFMA R6, R16, R16, R6 ; /* 0x000000101006722b */ /* 0x002e480000000006 */ /*05b0*/ DFMA R10, R16, R10, R2 ; /* 0x0000000a100a722b */ /* 0x004fc80000000002 */ /*05c0*/ DMUL R16, R14, R14 ; /* 0x0000000e0e107228 */ /* 0x001e080000000000 */ /*05d0*/ DSETP.GEU.AND P1, PT, R6, 4, PT ; /* 0x401000000600742a */ /* 0x002fc80003f2e000 */ /*05e0*/ DFMA R6, -R10, R10, R16 ; /* 0x0000000a0a06722b */ /* 0x001e080000000110 */ /*05f0*/ DFMA R16, R10, R10, R16 ; /* 0x0000000a0a10722b */ /* 0x000e480000000010 */ /*0600*/ DADD R14, R14, R14 ; /* 0x000000000e0e7229 */ /* 0x000e88000000000e */ /*0610*/ DADD R6, R4, R6 ; /* 0x0000000004067229 */ /* 0x001e080000000006 */ /*0620*/ DSETP.GEU.AND P0, PT, R16, 4, PT ; /* 0x401000001000742a */ /* 0x002fc80003f0e000 */ /*0630*/ DFMA R14, R10, R14, R2 ; /* 0x0000000e0a0e722b */ /* 0x004fc80000000002 */ /*0640*/ DMUL R16, R6, R6 ; /* 0x0000000606107228 */ /* 0x001e080000000000 */ /*0650*/ DADD R10, R8, 1 ; /* 0x3ff00000080a7429 */ /* 0x000e480000000000 */ /*0660*/ DFMA R18, R14, R14, R16 ; /* 0x0000000e0e12722b */ /* 0x001fc80000000010 */ /*0670*/ DFMA R16, -R14, R14, R16 ; /* 0x0000000e0e10722b */ /* 0x000e240000000110 */ /*0680*/ FSEL R20, R10, R8, !P1 ; /* 0x000000080a147208 */ /* 0x002fe40004800000 */ /*0690*/ DADD R6, R6, R6 ; /* 0x0000000006067229 */ /* 0x000e620000000006 */ /*06a0*/ FSEL R21, R11, R9, !P1 ; /* 0x000000090b157208 */ /* 0x000fc60004800000 */ /*06b0*/ DADD R16, R4, R16 ; /* 0x0000000004107229 */ /* 0x001e080000000010 */ /*06c0*/ DFMA R6, R14, R6, R2 ; /* 0x000000060e06722b */ /* 0x002fc80000000002 */ /*06d0*/ DMUL R10, R16, R16 ; /* 0x00000010100a7228 */ /* 0x001fc80000000000 */ /*06e0*/ DADD R8, R20, 1 ; /* 0x3ff0000014087429 */ /* 0x000e080000000000 */ /*06f0*/ DADD R16, R16, R16 ; /* 0x0000000010107229 */ /* 0x000e480000000010 */ /*0700*/ DSETP.GEU.AND P1, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x000fe40003f2e000 */ /*0710*/ FSEL R8, R8, R20, !P0 ; /* 0x0000001408087208 */ /* 0x001fe40004000000 */ /*0720*/ DFMA R18, R6, R6, R10 ; /* 0x000000060612722b */ /* 0x000e22000000000a */ /*0730*/ FSEL R9, R9, R21, !P0 ; /* 0x0000001509097208 */ /* 0x000fc60004000000 */ /*0740*/ DFMA R14, -R6, R6, R10 ; /* 0x00000006060e722b */ /* 0x000e88000000010a */ /*0750*/ DFMA R10, R6, R16, R2 ; /* 0x00000010060a722b */ /* 0x002e480000000002 */ /*0760*/ DSETP.GEU.AND P0, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x001fc80003f0e000 */ /*0770*/ DADD R14, R4, R14 ; /* 0x00000000040e7229 */ /* 0x004fc8000000000e */ /*0780*/ DMUL R18, R10, R10 ; /* 0x0000000a0a127228 */ /* 0x002e080000000000 */ /*0790*/ DADD R6, R8, 1 ; /* 0x3ff0000008067429 */ /* 0x000e480000000000 */ /*07a0*/ DFMA R18, R14, R14, -R18 ; /* 0x0000000e0e12722b */ /* 0x001e080000000812 */ /*07b0*/ DMUL R16, R14.reuse, R14 ; /* 0x0000000e0e107228 */ /* 0x040ea40000000000 */ /*07c0*/ FSEL R6, R6, R8, !P1 ; /* 0x0000000806067208 */ /* 0x002fe40004800000 */ /*07d0*/ DADD R20, R14, R14 ; /* 0x000000000e147229 */ /* 0x000e62000000000e */ /*07e0*/ FSEL R7, R7, R9, !P1 ; /* 0x0000000907077208 */ /* 0x000fc60004800000 */ /*07f0*/ DADD R18, R4, R18 ; /* 0x0000000004127229 */ /* 0x001e080000000012 */ /*0800*/ DFMA R16, R10, R10, R16 ; /* 0x0000000a0a10722b */ /* 0x004e880000000010 */ /*0810*/ DFMA R10, R10, R20, R2 ; /* 0x000000140a0a722b */ /* 0x002fc80000000002 */ /*0820*/ DMUL R14, R18, R18 ; /* 0x00000012120e7228 */ /* 0x001e080000000000 */ /*0830*/ DADD R8, R6, 1 ; /* 0x3ff0000006087429 */ /* 0x000e480000000000 */ /*0840*/ DSETP.GEU.AND P1, PT, R16, 4, PT ; /* 0x401000001000742a */ /* 0x004fc80003f2e000 */ /*0850*/ DFMA R16, R10.reuse, R10.reuse, R14.reuse ; /* 0x0000000a0a10722b */ /* 0x1c1fe4000000000e */ /*0860*/ FSEL R6, R8, R6, !P0 ; /* 0x0000000608067208 */ /* 0x002fe40004000000 */ /*0870*/ DFMA R14, -R10, R10, R14 ; /* 0x0000000a0a0e722b */ /* 0x000e22000000010e */ /*0880*/ FSEL R7, R9, R7, !P0 ; /* 0x0000000709077208 */ /* 0x000fc60004000000 */ /*0890*/ DADD R18, R18, R18 ; /* 0x0000000012127229 */ /* 0x000e480000000012 */ /*08a0*/ DADD R14, R4, R14 ; /* 0x00000000040e7229 */ /* 0x001e08000000000e */ /*08b0*/ DSETP.GEU.AND P0, PT, R16, 4, PT ; /* 0x401000001000742a */ /* 0x000fc80003f0e000 */ /*08c0*/ DFMA R10, R10, R18, R2 ; /* 0x000000120a0a722b */ /* 0x002fc80000000002 */ /*08d0*/ DMUL R16, R14, R14 ; /* 0x0000000e0e107228 */ /* 0x001e080000000000 */ /*08e0*/ DADD R8, R6, 1 ; /* 0x3ff0000006087429 */ /* 0x000fc80000000000 */ /*08f0*/ DADD R14, R14, R14 ; /* 0x000000000e0e7229 */ /* 0x000e48000000000e */ /*0900*/ DFMA R18, R10, R10, R16 ; /* 0x0000000a0a12722b */ /* 0x001fc80000000010 */ /*0910*/ DFMA R16, -R10, R10, R16 ; /* 0x0000000a0a10722b */ /* 0x000e080000000110 */ /*0920*/ DFMA R10, R10, R14, R2 ; /* 0x0000000e0a0a722b */ /* 0x0023e40000000002 */ /*0930*/ FSEL R14, R8, R6, !P1 ; /* 0x00000006080e7208 */ /* 0x002fe40004800000 */ /*0940*/ FSEL R15, R9, R7, !P1 ; /* 0x00000007090f7208 */ /* 0x000fe20004800000 */ /*0950*/ DADD R16, R4, R16 ; /* 0x0000000004107229 */ /* 0x001e080000000010 */ /*0960*/ DSETP.GEU.AND P1, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x000fc80003f2e000 */ /*0970*/ DADD R6, R14, 1 ; /* 0x3ff000000e067429 */ /* 0x000e480000000000 */ /*0980*/ DMUL R8, R16, R16 ; /* 0x0000001010087228 */ /* 0x001e080000000000 */ /*0990*/ DADD R16, R16, R16 ; /* 0x0000000010107229 */ /* 0x000ea40000000010 */ /*09a0*/ FSEL R6, R6, R14, !P0 ; /* 0x0000000e06067208 */ /* 0x002fe40004000000 */ /*09b0*/ DFMA R18, R10, R10, R8 ; /* 0x0000000a0a12722b */ /* 0x001e220000000008 */ /*09c0*/ FSEL R7, R7, R15, !P0 ; /* 0x0000000f07077208 */ /* 0x000fc60004000000 */ /*09d0*/ DFMA R8, -R10, R10, R8 ; /* 0x0000000a0a08722b */ /* 0x000e480000000108 */ /*09e0*/ DFMA R10, R10, R16, R2 ; /* 0x000000100a0a722b */ /* 0x004e880000000002 */ /*09f0*/ DSETP.GEU.AND P0, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x001fc80003f0e000 */ /*0a00*/ DADD R14, R6, 1 ; /* 0x3ff00000060e7429 */ /* 0x000e080000000000 */ /*0a10*/ DADD R8, R4, R8 ; /* 0x0000000004087229 */ /* 0x002e480000000008 */ /*0a20*/ DMUL R18, R10, R10 ; /* 0x0000000a0a127228 */ /* 0x004ea40000000000 */ /*0a30*/ FSEL R6, R14, R6, !P1 ; /* 0x000000060e067208 */ /* 0x001fe40004800000 */ /*0a40*/ FSEL R7, R15, R7, !P1 ; /* 0x000000070f077208 */ /* 0x000fe20004800000 */ /*0a50*/ DMUL R16, R8, R8 ; /* 0x0000000808107228 */ /* 0x002fc80000000000 */ /*0a60*/ DFMA R18, R8, R8, -R18 ; /* 0x000000080812722b */ /* 0x004e080000000812 */ /*0a70*/ DADD R20, R8, R8 ; /* 0x0000000008147229 */ /* 0x000e480000000008 */ /*0a80*/ DADD R18, R4, R18 ; /* 0x0000000004127229 */ /* 0x001fc80000000012 */ /*0a90*/ DADD R14, R6, 1 ; /* 0x3ff00000060e7429 */ /* 0x000e080000000000 */ /*0aa0*/ DFMA R8, R10, R10, R16 ; /* 0x0000000a0a08722b */ /* 0x000fc80000000010 */ /*0ab0*/ DFMA R10, R10, R20, R2 ; /* 0x000000140a0a722b */ /* 0x002fe40000000002 */ /*0ac0*/ FSEL R14, R14, R6, !P0 ; /* 0x000000060e0e7208 */ /* 0x001fe40004000000 */ /*0ad0*/ DMUL R16, R18, R18 ; /* 0x0000001212107228 */ /* 0x000e220000000000 */ /*0ae0*/ FSEL R15, R15, R7, !P0 ; /* 0x000000070f0f7208 */ /* 0x000fc60004000000 */ /*0af0*/ DADD R20, R18, R18 ; /* 0x0000000012147229 */ /* 0x000e480000000012 */ /*0b00*/ DFMA R18, R10, R10, R16 ; /* 0x0000000a0a12722b */ /* 0x001fc80000000010 */ /*0b10*/ DFMA R16, -R10, R10, R16 ; /* 0x0000000a0a10722b */ /* 0x000e080000000110 */ /*0b20*/ DFMA R10, R10, R20, R2 ; /* 0x000000140a0a722b */ /* 0x002fc80000000002 */ /*0b30*/ DADD R20, R4, R16 ; /* 0x0000000004147229 */ /* 0x001e080000000010 */ /*0b40*/ DADD R6, R14, 1 ; /* 0x3ff000000e067429 */ /* 0x000fc80000000000 */ /*0b50*/ DSETP.GEU.AND P0, PT, R8, 4, PT ; /* 0x401000000800742a */ /* 0x000e480003f0e000 */ /*0b60*/ DMUL R16, R20, R20 ; /* 0x0000001414107228 */ /* 0x001e240000000000 */ /*0b70*/ FSEL R14, R6, R14, !P0 ; /* 0x0000000e060e7208 */ /* 0x002fe40004000000 */ /*0b80*/ FSEL R15, R7, R15, !P0 ; /* 0x0000000f070f7208 */ /* 0x000fe20004000000 */ /*0b90*/ DSETP.GEU.AND P1, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x000fc80003f2e000 */ /*0ba0*/ DFMA R8, -R10, R10, R16 ; /* 0x0000000a0a08722b */ /* 0x001e080000000110 */ /*0bb0*/ DADD R20, R20, R20 ; /* 0x0000000014147229 */ /* 0x000e480000000014 */ /*0bc0*/ DADD R18, R14, 1 ; /* 0x3ff000000e127429 */ /* 0x000fc80000000000 */ /*0bd0*/ DADD R22, R4, R8 ; /* 0x0000000004167229 */ /* 0x001e080000000008 */ /*0be0*/ DFMA R6, R10, R20, R2 ; /* 0x000000140a06722b */ /* 0x002fc80000000002 */ /*0bf0*/ DMUL R20, R22, R22 ; /* 0x0000001616147228 */ /* 0x001e080000000000 */ /*0c00*/ DFMA R8, R10, R10, R16 ; /* 0x0000000a0a08722b */ /* 0x0003e40000000010 */ /*0c10*/ FSEL R10, R18, R14, !P1 ; /* 0x0000000e120a7208 */ /* 0x002fe40004800000 */ /*0c20*/ DADD R16, R22, R22 ; /* 0x0000000016107229 */ /* 0x000e620000000016 */ /*0c30*/ FSEL R11, R19, R15, !P1 ; /* 0x0000000f130b7208 */ /* 0x000fc60004800000 */ /*0c40*/ DFMA R22, R6, R6, R20 ; /* 0x000000060616722b */ /* 0x001fc80000000014 */ /*0c50*/ DFMA R20, -R6, R6, R20 ; /* 0x000000060614722b */ /* 0x000e080000000114 */ /*0c60*/ DFMA R6, R16, R6, R2 ; /* 0x000000061006722b */ /* 0x002fc80000000002 */ /*0c70*/ DADD R16, R10, 1 ; /* 0x3ff000000a107429 */ /* 0x000fc80000000000 */ /*0c80*/ DSETP.GEU.AND P1, PT, R8, 4, PT ; /* 0x401000000800742a */ /* 0x000e480003f2e000 */ /*0c90*/ DADD R20, R4, R20 ; /* 0x0000000004147229 */ /* 0x001e240000000014 */ /*0ca0*/ FSEL R8, R16, R10, !P1 ; /* 0x0000000a10087208 */ /* 0x002fe40004800000 */ /*0cb0*/ FSEL R9, R17, R11, !P1 ; /* 0x0000000b11097208 */ /* 0x000fe20004800000 */ /*0cc0*/ DMUL R14, R6, R6 ; /* 0x00000006060e7228 */ /* 0x000e480000000000 */ /*0cd0*/ DSETP.GEU.AND P0, PT, R22, 4, PT ; /* 0x401000001600742a */ /* 0x000fc80003f0e000 */ /*0ce0*/ DADD R16, R8, 1 ; /* 0x3ff0000008107429 */ /* 0x000e880000000000 */ /*0cf0*/ DMUL R10, R20, R20 ; /* 0x00000014140a7228 */ /* 0x001e080000000000 */ /*0d00*/ DFMA R14, R20, R20, -R14 ; /* 0x00000014140e722b */ /* 0x002e64000000080e */ /*0d10*/ FSEL R16, R16, R8, !P0 ; /* 0x0000000810107208 */ /* 0x004fe40004000000 */ /*0d20*/ FSEL R17, R17, R9, !P0 ; /* 0x0000000911117208 */ /* 0x000fe20004000000 */ /*0d30*/ DFMA R10, R6, R6, R10 ; /* 0x00000006060a722b */ /* 0x001e08000000000a */ /*0d40*/ DADD R20, R20, R20 ; /* 0x0000000014147229 */ /* 0x000e880000000014 */ /*0d50*/ DADD R14, R4, R14 ; /* 0x00000000040e7229 */ /* 0x002fc8000000000e */ /*0d60*/ DADD R8, R16, 1 ; /* 0x3ff0000010087429 */ /* 0x000fc80000000000 */ /*0d70*/ DSETP.GEU.AND P1, PT, R10, 4, PT ; /* 0x401000000a00742a */ /* 0x001e080003f2e000 */ /*0d80*/ DFMA R6, R6, R20, R2 ; /* 0x000000140606722b */ /* 0x004fe40000000002 */ /*0d90*/ FSEL R16, R8, R16, !P1 ; /* 0x0000001008107208 */ /* 0x001fe40004800000 */ /*0da0*/ DMUL R10, R14.reuse, R14 ; /* 0x0000000e0e0a7228 */ /* 0x040e220000000000 */ /*0db0*/ FSEL R17, R9, R17, !P1 ; /* 0x0000001109117208 */ /* 0x000fe40004800000 */ /*0dc0*/ ISETP.NE.AND P1, PT, R12, 0x3e4, PT ; /* 0x000003e40c00780c */ /* 0x000fe20003f25270 */ /*0dd0*/ DADD R14, R14, R14 ; /* 0x000000000e0e7229 */ /* 0x000fc8000000000e */ /*0de0*/ DFMA R18, R6, R6, R10 ; /* 0x000000060612722b */ /* 0x001fc8000000000a */ /*0df0*/ DFMA R10, -R6, R6, R10 ; /* 0x00000006060a722b */ /* 0x000e08000000010a */ /*0e00*/ DADD R8, R16, 1 ; /* 0x3ff0000010087429 */ /* 0x000fc80000000000 */ /*0e10*/ DADD R10, R4, R10 ; /* 0x00000000040a7229 */ /* 0x001e08000000000a */ /*0e20*/ DFMA R6, R6, R14, R2 ; /* 0x0000000e0606722b */ /* 0x000fc80000000002 */ /*0e30*/ DSETP.GEU.AND P0, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x000e480003f0e000 */ /*0e40*/ DMUL R14, R10.reuse, R10 ; /* 0x0000000a0a0e7228 */ /* 0x041e240000000000 */ /*0e50*/ FSEL R20, R8, R16, !P0 ; /* 0x0000001008147208 */ /* 0x002fe40004000000 */ /*0e60*/ FSEL R21, R9, R17, !P0 ; /* 0x0000001109157208 */ /* 0x000fe20004000000 */ /*0e70*/ DADD R10, R10, R10 ; /* 0x000000000a0a7229 */ /* 0x000e48000000000a */ /*0e80*/ DFMA R18, R6, R6, R14 ; /* 0x000000060612722b */ /* 0x001fc8000000000e */ /*0e90*/ DFMA R14, -R6, R6, R14 ; /* 0x00000006060e722b */ /* 0x000e08000000010e */ /*0ea0*/ DADD R8, R20, 1 ; /* 0x3ff0000014087429 */ /* 0x000fc80000000000 */ /*0eb0*/ DFMA R6, R6, R10, R2 ; /* 0x0000000a0606722b */ /* 0x002e480000000002 */ /*0ec0*/ DADD R16, R4, R14 ; /* 0x0000000004107229 */ /* 0x001fc8000000000e */ /*0ed0*/ DSETP.GEU.AND P0, PT, R18, 4, PT ; /* 0x401000001200742a */ /* 0x000e080003f0e000 */ /*0ee0*/ DMUL R14, R6, R6 ; /* 0x00000006060e7228 */ /* 0x002fe40000000000 */ /*0ef0*/ FSEL R18, R8, R20, !P0 ; /* 0x0000001408127208 */ /* 0x001fe40004000000 */ /*0f00*/ DMUL R10, R16, R16 ; /* 0x00000010100a7228 */ /* 0x000e220000000000 */ /*0f10*/ FSEL R19, R9, R21, !P0 ; /* 0x0000001509137208 */ /* 0x000fca0004000000 */ /*0f20*/ DADD R20, R10, R14 ; /* 0x000000000a147229 */ /* 0x001e08000000000e */ /*0f30*/ DADD R8, R18, 1 ; /* 0x3ff0000012087429 */ /* 0x000fc80000000000 */ /*0f40*/ DSETP.GEU.AND P0, PT, R20, 4, PT ; /* 0x401000001400742a */ /* 0x001e0c0003f0e000 */ /*0f50*/ FSEL R8, R8, R18, !P0 ; /* 0x0000001208087208 */ /* 0x001fe40004000000 */ /*0f60*/ FSEL R9, R9, R19, !P0 ; /* 0x0000001309097208 */ /* 0x000fe20004000000 */ /*0f70*/ @P1 BRA 0x500 ; /* 0xfffff58000001947 */ /* 0x000fea000383ffff */ /*0f80*/ F2F.F32.F64 R9, R8 ; /* 0x0000000800097310 */ /* 0x000e220000301000 */ /*0f90*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fc800078e00ff */ /*0fa0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fca00078e0203 */ /*0fb0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x001fe2000c101904 */ /*0fc0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0fd0*/ BRA 0xfd0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0fe0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ff0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1000*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1010*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1020*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*1070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10MandelbrotPfPKdS1_ .globl _Z10MandelbrotPfPKdS1_ .p2align 8 .type _Z10MandelbrotPfPKdS1_,@function _Z10MandelbrotPfPKdS1_: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b128 s[4:7], s[0:1], 0x8 v_mov_b32_e32 v9, 0 v_dual_mov_b32 v10, 0 :: v_dual_mov_b32 v11, 0 v_mov_b32_e32 v12, 0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_movk_i32 s2, 0x3e7 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 3, v[1:2] v_add_co_u32 v5, vcc_lo, s4, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v6, vcc_lo, s5, v4, vcc_lo v_add_co_u32 v7, vcc_lo, s6, v3 v_add_co_ci_u32_e32 v8, vcc_lo, s7, v4, vcc_lo global_load_b64 v[3:4], v[5:6], off global_load_b64 v[5:6], v[7:8], off v_mov_b32_e32 v7, 0 v_mov_b32_e32 v8, 0 .p2align 6 .LBB0_1: v_mul_f64 v[13:14], v[11:12], v[11:12] v_add_f64 v[15:16], v[9:10], v[9:10] s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2) s_cmp_eq_u32 s2, 0 v_fma_f64 v[9:10], v[9:10], v[9:10], -v[13:14] s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_fma_f64 v[11:12], v[11:12], v[15:16], v[5:6] v_add_f64 v[15:16], v[7:8], 1.0 v_add_f64 v[9:10], v[3:4], v[9:10] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f64 v[13:14], v[11:12], v[11:12] v_fma_f64 v[13:14], v[9:10], v[9:10], v[13:14] s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_f64_e32 vcc_lo, 4.0, v[13:14] v_dual_cndmask_b32 v8, v8, v16 :: v_dual_cndmask_b32 v7, v7, v15 s_cbranch_scc0 .LBB0_1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_cvt_f32_f64_e32 v3, v[7:8] s_load_b64 s[0:1], s[0:1], 0x0 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v3, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10MandelbrotPfPKdS1_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 17 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10MandelbrotPfPKdS1_, .Lfunc_end0-_Z10MandelbrotPfPKdS1_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10MandelbrotPfPKdS1_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10MandelbrotPfPKdS1_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 17 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0009c681_00000000-6_Mandelbrot_kern.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z36__device_stub__Z10MandelbrotPfPKdS1_PfPKdS1_ .type _Z36__device_stub__Z10MandelbrotPfPKdS1_PfPKdS1_, @function _Z36__device_stub__Z10MandelbrotPfPKdS1_PfPKdS1_: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z10MandelbrotPfPKdS1_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z36__device_stub__Z10MandelbrotPfPKdS1_PfPKdS1_, .-_Z36__device_stub__Z10MandelbrotPfPKdS1_PfPKdS1_ .globl _Z10MandelbrotPfPKdS1_ .type _Z10MandelbrotPfPKdS1_, @function _Z10MandelbrotPfPKdS1_: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z10MandelbrotPfPKdS1_PfPKdS1_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z10MandelbrotPfPKdS1_, .-_Z10MandelbrotPfPKdS1_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z10MandelbrotPfPKdS1_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z10MandelbrotPfPKdS1_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "Mandelbrot_kern.hip" .globl _Z25__device_stub__MandelbrotPfPKdS1_ # -- Begin function _Z25__device_stub__MandelbrotPfPKdS1_ .p2align 4, 0x90 .type _Z25__device_stub__MandelbrotPfPKdS1_,@function _Z25__device_stub__MandelbrotPfPKdS1_: # @_Z25__device_stub__MandelbrotPfPKdS1_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z10MandelbrotPfPKdS1_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z25__device_stub__MandelbrotPfPKdS1_, .Lfunc_end0-_Z25__device_stub__MandelbrotPfPKdS1_ .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10MandelbrotPfPKdS1_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z10MandelbrotPfPKdS1_,@object # @_Z10MandelbrotPfPKdS1_ .section .rodata,"a",@progbits .globl _Z10MandelbrotPfPKdS1_ .p2align 3, 0x0 _Z10MandelbrotPfPKdS1_: .quad _Z25__device_stub__MandelbrotPfPKdS1_ .size _Z10MandelbrotPfPKdS1_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z10MandelbrotPfPKdS1_" .size .L__unnamed_1, 23 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__MandelbrotPfPKdS1_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10MandelbrotPfPKdS1_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
// Created by luozhiwang (luozw1994@outlook.com) // Date: 2020/1/2 #include <cuda.h> #include <vector> #include <random> // 此章节主要是关于稀疏矩阵计算,对应不同类型的稀疏矩阵有不同的存储格式。 // 主要是介绍为主,没什么代码。此处就是Dense-Matrix转CSR,ELL,COO格式 class Matrix{ public: int row; int column; int num; std::vector<std::vector<float>> data; Matrix(const std::vector<std::vector<float>> &data){ this->row = data.size(); this->column = data[0].size(); for (int r = 0; r < data.size(); ++r){ std::vector<float> tmp; for (int c = 0; c < data[0].size(); ++c){ tmp.push_back(data[r][c]); } this->data.push_back(tmp); } } void show(){ printf(" =================== Origin Matrix ===================>\n"); for (int r = 0; r < this->row; ++r){ for(int c = 0; c < this->column; ++c){ printf("%.3f ", data[r][c]); } printf("\n"); } printf("\n"); } }; class CSR{ public: int column; int row; std::vector<int> col_idx; std::vector<int> row_ptr; std::vector<float> data; CSR(const Matrix &matrix){ this->column = matrix.data[0].size(); this->row = matrix.data.size(); int count = 0; row_ptr.push_back(0); for (int r = 0; r < this->row; ++r){ for (int c = 0; c < this->column; ++c){ float tmp = matrix.data[r][c]; if (tmp != 0){ ++count; data.push_back(tmp); col_idx.push_back(c); } } row_ptr.push_back(count); } } void show(){ printf(" =================== CSR ===================>\n"); printf("CSR data ===> "); for (int i = 0; i < data.size(); ++i){ printf("%.3f ", data[i]); } printf("\nCSR col_idx ===> "); for (int i = 0; i < col_idx.size(); ++i){ printf("%d ", col_idx[i]); } printf("\nCSR row_ptr ===> "); for (int i = 0; i < row_ptr.size(); ++i){ printf("%d ", row_ptr[i]); } printf("\n\n"); } }; class COO{ public: int column; int row; std::vector<int> col_idx; std::vector<int> row_idx; std::vector<float> data; COO(const Matrix &matrix){ this->column = matrix.column; this->row = matrix.row; for (int r = 0; r < this->row; ++r){ for (int c = 0; c < this->column; ++c){ float tmp = matrix.data[r][c]; if (tmp != 0){ data.push_back(tmp); col_idx.push_back(c); row_idx.push_back(r); } } } } void show(){ printf(" =================== COO ===================>\n"); printf("COO data ===> "); for (int i = 0; i < data.size(); ++i){ printf("%.3f ", data[i]); } printf("\nCOO col_idx ===> "); for (int i = 0; i < col_idx.size(); ++i){ printf("%d ", col_idx[i]); } printf("\nCOO row_ptr ===> "); for (int i = 0; i < row_idx.size(); ++i){ printf("%d ", row_idx[i]); } printf("\n\n"); } }; class ELL{ public: std::vector<std::vector<float>> data; std::vector<std::vector<int>> col_idx; ELL(const Matrix &matrix){ int max_len = 0; for (int r = 0; r < matrix.row; ++r){ std::vector<int> tmp_col; std::vector<float> tmp_data; for (int c = 0; c < matrix.column; ++c){ float tmp = matrix.data[r][c]; if (tmp != 0){ printf("%d ", c); tmp_col.push_back(c); tmp_data.push_back(tmp); } } if(max_len < tmp_data.size()){ max_len = tmp_data.size(); } data.push_back(tmp_data); col_idx.push_back(tmp_col); } for (int r = 0; r < data.size(); ++r){ for (int c = data[r].size(); c < max_len; ++c){ data[r].push_back(0); col_idx[r].push_back(0); } } } void show(){ printf(" =================== ELL ===================>\n"); for (int r = 0; r < data.size(); ++r){ for (int c = 0; c < data[0].size(); ++c){ printf("%.3f ", data[r][c]); } printf(" "); for (int c = 0; c < col_idx[0].size(); ++c){ printf("%d ", col_idx[r][c]); // printf("%d ", c); } printf("\n"); } printf("\n"); } }; const int ROW = 10; const int COL = 10; int main(int args, char **argv){ // 构建稀疏矩阵 std::default_random_engine e; std::uniform_real_distribution<float > probability(0, 1); std::uniform_real_distribution<float > number(0, 10); std::vector<std::vector<float>> data; for (int i=0; i<ROW; ++i){ std::vector<float> tmp; for (int j = 0; j < COL; ++j){ if(probability(e) < 0.1){ tmp.push_back(number(e)); }else{ tmp.push_back(0); } } data.push_back(tmp); } Matrix matrix{data}; matrix.show(); CSR csr{matrix}; csr.show(); COO coo{matrix}; coo.show(); ELL ell(matrix); ell.show(); }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
// Created by luozhiwang (luozw1994@outlook.com) // Date: 2020/1/2 #include <cuda.h> #include <vector> #include <random> // 此章节主要是关于稀疏矩阵计算,对应不同类型的稀疏矩阵有不同的存储格式。 // 主要是介绍为主,没什么代码。此处就是Dense-Matrix转CSR,ELL,COO格式 class Matrix{ public: int row; int column; int num; std::vector<std::vector<float>> data; Matrix(const std::vector<std::vector<float>> &data){ this->row = data.size(); this->column = data[0].size(); for (int r = 0; r < data.size(); ++r){ std::vector<float> tmp; for (int c = 0; c < data[0].size(); ++c){ tmp.push_back(data[r][c]); } this->data.push_back(tmp); } } void show(){ printf(" =================== Origin Matrix ===================>\n"); for (int r = 0; r < this->row; ++r){ for(int c = 0; c < this->column; ++c){ printf("%.3f ", data[r][c]); } printf("\n"); } printf("\n"); } }; class CSR{ public: int column; int row; std::vector<int> col_idx; std::vector<int> row_ptr; std::vector<float> data; CSR(const Matrix &matrix){ this->column = matrix.data[0].size(); this->row = matrix.data.size(); int count = 0; row_ptr.push_back(0); for (int r = 0; r < this->row; ++r){ for (int c = 0; c < this->column; ++c){ float tmp = matrix.data[r][c]; if (tmp != 0){ ++count; data.push_back(tmp); col_idx.push_back(c); } } row_ptr.push_back(count); } } void show(){ printf(" =================== CSR ===================>\n"); printf("CSR data ===> "); for (int i = 0; i < data.size(); ++i){ printf("%.3f ", data[i]); } printf("\nCSR col_idx ===> "); for (int i = 0; i < col_idx.size(); ++i){ printf("%d ", col_idx[i]); } printf("\nCSR row_ptr ===> "); for (int i = 0; i < row_ptr.size(); ++i){ printf("%d ", row_ptr[i]); } printf("\n\n"); } }; class COO{ public: int column; int row; std::vector<int> col_idx; std::vector<int> row_idx; std::vector<float> data; COO(const Matrix &matrix){ this->column = matrix.column; this->row = matrix.row; for (int r = 0; r < this->row; ++r){ for (int c = 0; c < this->column; ++c){ float tmp = matrix.data[r][c]; if (tmp != 0){ data.push_back(tmp); col_idx.push_back(c); row_idx.push_back(r); } } } } void show(){ printf(" =================== COO ===================>\n"); printf("COO data ===> "); for (int i = 0; i < data.size(); ++i){ printf("%.3f ", data[i]); } printf("\nCOO col_idx ===> "); for (int i = 0; i < col_idx.size(); ++i){ printf("%d ", col_idx[i]); } printf("\nCOO row_ptr ===> "); for (int i = 0; i < row_idx.size(); ++i){ printf("%d ", row_idx[i]); } printf("\n\n"); } }; class ELL{ public: std::vector<std::vector<float>> data; std::vector<std::vector<int>> col_idx; ELL(const Matrix &matrix){ int max_len = 0; for (int r = 0; r < matrix.row; ++r){ std::vector<int> tmp_col; std::vector<float> tmp_data; for (int c = 0; c < matrix.column; ++c){ float tmp = matrix.data[r][c]; if (tmp != 0){ printf("%d ", c); tmp_col.push_back(c); tmp_data.push_back(tmp); } } if(max_len < tmp_data.size()){ max_len = tmp_data.size(); } data.push_back(tmp_data); col_idx.push_back(tmp_col); } for (int r = 0; r < data.size(); ++r){ for (int c = data[r].size(); c < max_len; ++c){ data[r].push_back(0); col_idx[r].push_back(0); } } } void show(){ printf(" =================== ELL ===================>\n"); for (int r = 0; r < data.size(); ++r){ for (int c = 0; c < data[0].size(); ++c){ printf("%.3f ", data[r][c]); } printf(" "); for (int c = 0; c < col_idx[0].size(); ++c){ printf("%d ", col_idx[r][c]); // printf("%d ", c); } printf("\n"); } printf("\n"); } }; const int ROW = 10; const int COL = 10; int main(int args, char **argv){ // 构建稀疏矩阵 std::default_random_engine e; std::uniform_real_distribution<float > probability(0, 1); std::uniform_real_distribution<float > number(0, 10); std::vector<std::vector<float>> data; for (int i=0; i<ROW; ++i){ std::vector<float> tmp; for (int j = 0; j < COL; ++j){ if(probability(e) < 0.1){ tmp.push_back(number(e)); }else{ tmp.push_back(0); } } data.push_back(tmp); } Matrix matrix{data}; matrix.show(); CSR csr{matrix}; csr.show(); COO coo{matrix}; coo.show(); ELL ell(matrix); ell.show(); }
// Created by luozhiwang (luozw1994@outlook.com) // Date: 2020/1/2 #include <hip/hip_runtime.h> #include <vector> #include <random> // 此章节主要是关于稀疏矩阵计算,对应不同类型的稀疏矩阵有不同的存储格式。 // 主要是介绍为主,没什么代码。此处就是Dense-Matrix转CSR,ELL,COO格式 class Matrix{ public: int row; int column; int num; std::vector<std::vector<float>> data; Matrix(const std::vector<std::vector<float>> &data){ this->row = data.size(); this->column = data[0].size(); for (int r = 0; r < data.size(); ++r){ std::vector<float> tmp; for (int c = 0; c < data[0].size(); ++c){ tmp.push_back(data[r][c]); } this->data.push_back(tmp); } } void show(){ printf(" =================== Origin Matrix ===================>\n"); for (int r = 0; r < this->row; ++r){ for(int c = 0; c < this->column; ++c){ printf("%.3f ", data[r][c]); } printf("\n"); } printf("\n"); } }; class CSR{ public: int column; int row; std::vector<int> col_idx; std::vector<int> row_ptr; std::vector<float> data; CSR(const Matrix &matrix){ this->column = matrix.data[0].size(); this->row = matrix.data.size(); int count = 0; row_ptr.push_back(0); for (int r = 0; r < this->row; ++r){ for (int c = 0; c < this->column; ++c){ float tmp = matrix.data[r][c]; if (tmp != 0){ ++count; data.push_back(tmp); col_idx.push_back(c); } } row_ptr.push_back(count); } } void show(){ printf(" =================== CSR ===================>\n"); printf("CSR data ===> "); for (int i = 0; i < data.size(); ++i){ printf("%.3f ", data[i]); } printf("\nCSR col_idx ===> "); for (int i = 0; i < col_idx.size(); ++i){ printf("%d ", col_idx[i]); } printf("\nCSR row_ptr ===> "); for (int i = 0; i < row_ptr.size(); ++i){ printf("%d ", row_ptr[i]); } printf("\n\n"); } }; class COO{ public: int column; int row; std::vector<int> col_idx; std::vector<int> row_idx; std::vector<float> data; COO(const Matrix &matrix){ this->column = matrix.column; this->row = matrix.row; for (int r = 0; r < this->row; ++r){ for (int c = 0; c < this->column; ++c){ float tmp = matrix.data[r][c]; if (tmp != 0){ data.push_back(tmp); col_idx.push_back(c); row_idx.push_back(r); } } } } void show(){ printf(" =================== COO ===================>\n"); printf("COO data ===> "); for (int i = 0; i < data.size(); ++i){ printf("%.3f ", data[i]); } printf("\nCOO col_idx ===> "); for (int i = 0; i < col_idx.size(); ++i){ printf("%d ", col_idx[i]); } printf("\nCOO row_ptr ===> "); for (int i = 0; i < row_idx.size(); ++i){ printf("%d ", row_idx[i]); } printf("\n\n"); } }; class ELL{ public: std::vector<std::vector<float>> data; std::vector<std::vector<int>> col_idx; ELL(const Matrix &matrix){ int max_len = 0; for (int r = 0; r < matrix.row; ++r){ std::vector<int> tmp_col; std::vector<float> tmp_data; for (int c = 0; c < matrix.column; ++c){ float tmp = matrix.data[r][c]; if (tmp != 0){ printf("%d ", c); tmp_col.push_back(c); tmp_data.push_back(tmp); } } if(max_len < tmp_data.size()){ max_len = tmp_data.size(); } data.push_back(tmp_data); col_idx.push_back(tmp_col); } for (int r = 0; r < data.size(); ++r){ for (int c = data[r].size(); c < max_len; ++c){ data[r].push_back(0); col_idx[r].push_back(0); } } } void show(){ printf(" =================== ELL ===================>\n"); for (int r = 0; r < data.size(); ++r){ for (int c = 0; c < data[0].size(); ++c){ printf("%.3f ", data[r][c]); } printf(" "); for (int c = 0; c < col_idx[0].size(); ++c){ printf("%d ", col_idx[r][c]); // printf("%d ", c); } printf("\n"); } printf("\n"); } }; const int ROW = 10; const int COL = 10; int main(int args, char **argv){ // 构建稀疏矩阵 std::default_random_engine e; std::uniform_real_distribution<float > probability(0, 1); std::uniform_real_distribution<float > number(0, 10); std::vector<std::vector<float>> data; for (int i=0; i<ROW; ++i){ std::vector<float> tmp; for (int j = 0; j < COL; ++j){ if(probability(e) < 0.1){ tmp.push_back(number(e)); }else{ tmp.push_back(0); } } data.push_back(tmp); } Matrix matrix{data}; matrix.show(); CSR csr{matrix}; csr.show(); COO coo{matrix}; coo.show(); ELL ell(matrix); ell.show(); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
// Created by luozhiwang (luozw1994@outlook.com) // Date: 2020/1/2 #include <hip/hip_runtime.h> #include <vector> #include <random> // 此章节主要是关于稀疏矩阵计算,对应不同类型的稀疏矩阵有不同的存储格式。 // 主要是介绍为主,没什么代码。此处就是Dense-Matrix转CSR,ELL,COO格式 class Matrix{ public: int row; int column; int num; std::vector<std::vector<float>> data; Matrix(const std::vector<std::vector<float>> &data){ this->row = data.size(); this->column = data[0].size(); for (int r = 0; r < data.size(); ++r){ std::vector<float> tmp; for (int c = 0; c < data[0].size(); ++c){ tmp.push_back(data[r][c]); } this->data.push_back(tmp); } } void show(){ printf(" =================== Origin Matrix ===================>\n"); for (int r = 0; r < this->row; ++r){ for(int c = 0; c < this->column; ++c){ printf("%.3f ", data[r][c]); } printf("\n"); } printf("\n"); } }; class CSR{ public: int column; int row; std::vector<int> col_idx; std::vector<int> row_ptr; std::vector<float> data; CSR(const Matrix &matrix){ this->column = matrix.data[0].size(); this->row = matrix.data.size(); int count = 0; row_ptr.push_back(0); for (int r = 0; r < this->row; ++r){ for (int c = 0; c < this->column; ++c){ float tmp = matrix.data[r][c]; if (tmp != 0){ ++count; data.push_back(tmp); col_idx.push_back(c); } } row_ptr.push_back(count); } } void show(){ printf(" =================== CSR ===================>\n"); printf("CSR data ===> "); for (int i = 0; i < data.size(); ++i){ printf("%.3f ", data[i]); } printf("\nCSR col_idx ===> "); for (int i = 0; i < col_idx.size(); ++i){ printf("%d ", col_idx[i]); } printf("\nCSR row_ptr ===> "); for (int i = 0; i < row_ptr.size(); ++i){ printf("%d ", row_ptr[i]); } printf("\n\n"); } }; class COO{ public: int column; int row; std::vector<int> col_idx; std::vector<int> row_idx; std::vector<float> data; COO(const Matrix &matrix){ this->column = matrix.column; this->row = matrix.row; for (int r = 0; r < this->row; ++r){ for (int c = 0; c < this->column; ++c){ float tmp = matrix.data[r][c]; if (tmp != 0){ data.push_back(tmp); col_idx.push_back(c); row_idx.push_back(r); } } } } void show(){ printf(" =================== COO ===================>\n"); printf("COO data ===> "); for (int i = 0; i < data.size(); ++i){ printf("%.3f ", data[i]); } printf("\nCOO col_idx ===> "); for (int i = 0; i < col_idx.size(); ++i){ printf("%d ", col_idx[i]); } printf("\nCOO row_ptr ===> "); for (int i = 0; i < row_idx.size(); ++i){ printf("%d ", row_idx[i]); } printf("\n\n"); } }; class ELL{ public: std::vector<std::vector<float>> data; std::vector<std::vector<int>> col_idx; ELL(const Matrix &matrix){ int max_len = 0; for (int r = 0; r < matrix.row; ++r){ std::vector<int> tmp_col; std::vector<float> tmp_data; for (int c = 0; c < matrix.column; ++c){ float tmp = matrix.data[r][c]; if (tmp != 0){ printf("%d ", c); tmp_col.push_back(c); tmp_data.push_back(tmp); } } if(max_len < tmp_data.size()){ max_len = tmp_data.size(); } data.push_back(tmp_data); col_idx.push_back(tmp_col); } for (int r = 0; r < data.size(); ++r){ for (int c = data[r].size(); c < max_len; ++c){ data[r].push_back(0); col_idx[r].push_back(0); } } } void show(){ printf(" =================== ELL ===================>\n"); for (int r = 0; r < data.size(); ++r){ for (int c = 0; c < data[0].size(); ++c){ printf("%.3f ", data[r][c]); } printf(" "); for (int c = 0; c < col_idx[0].size(); ++c){ printf("%d ", col_idx[r][c]); // printf("%d ", c); } printf("\n"); } printf("\n"); } }; const int ROW = 10; const int COL = 10; int main(int args, char **argv){ // 构建稀疏矩阵 std::default_random_engine e; std::uniform_real_distribution<float > probability(0, 1); std::uniform_real_distribution<float > number(0, 10); std::vector<std::vector<float>> data; for (int i=0; i<ROW; ++i){ std::vector<float> tmp; for (int j = 0; j < COL; ++j){ if(probability(e) < 0.1){ tmp.push_back(number(e)); }else{ tmp.push_back(0); } } data.push_back(tmp); } Matrix matrix{data}; matrix.show(); CSR csr{matrix}; csr.show(); COO coo{matrix}; coo.show(); ELL ell(matrix); ell.show(); }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void g_One_backpropagation( float* _curDelta, float* _w, float* _nextDelta, int rows, int cols, int channels) { int row = blockIdx.x; int channel = blockIdx.y; int skip = channel * rows * cols + row * cols; float* curDelta = _curDelta + skip; float* nextDelta= _nextDelta+ skip; float* w = _w + channel * cols; for(int i = 0; i < cols; i += blockDim.x){ int id = i + threadIdx.x; if(id < cols){ nextDelta[id] = curDelta[id] * w[id]; } } }
code for sm_80 Function : _Z21g_One_backpropagationPfS_S_iii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff007624 */ /* 0x000fca00078e00ff */ /*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */ /* 0x000fda0003f06270 */ /*0030*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0040*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */ /* 0x000e220000002600 */ /*0050*/ IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff097224 */ /* 0x000fe200078e00ff */ /*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0070*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e280000002500 */ /*0080*/ S2R R10, SR_TID.X ; /* 0x00000000000a7919 */ /* 0x000e620000002100 */ /*0090*/ IMAD R2, R0.reuse, c[0x0][0x178], R3 ; /* 0x00005e0000027a24 */ /* 0x041fe400078e0203 */ /*00a0*/ IMAD R0, R0, c[0x0][0x17c], RZ ; /* 0x00005f0000007a24 */ /* 0x000fc400078e02ff */ /*00b0*/ IMAD R8, R2, c[0x0][0x17c], RZ ; /* 0x00005f0002087a24 */ /* 0x002fe400078e02ff */ /*00c0*/ IMAD.IADD R5, R10, 0x1, R9 ; /* 0x000000010a057824 */ /* 0x000fe200078e0209 */ /*00d0*/ IADD3 R9, R9, c[0x0][0x0], RZ ; /* 0x0000000009097a10 */ /* 0x000fe20007ffe0ff */ /*00e0*/ BSSY B0, 0x240 ; /* 0x0000015000007945 */ /* 0x000fe60003800000 */ /*00f0*/ ISETP.GE.AND P1, PT, R5, c[0x0][0x17c], PT ; /* 0x00005f0005007a0c */ /* 0x000fe40003f26270 */ /*0100*/ ISETP.GE.AND P0, PT, R9, c[0x0][0x17c], PT ; /* 0x00005f0009007a0c */ /* 0x000fd60003f06270 */ /*0110*/ @P1 BRA 0x230 ; /* 0x0000011000001947 */ /* 0x001fea0003800000 */ /*0120*/ IADD3 R11, P1, R8, R5.reuse, RZ ; /* 0x00000005080b7210 */ /* 0x080fe40007f3e0ff */ /*0130*/ SHF.R.S32.HI R3, RZ, 0x1f, R5 ; /* 0x0000001fff037819 */ /* 0x000fe40000011405 */ /*0140*/ IADD3 R5, P2, R0, R5, RZ ; /* 0x0000000500057210 */ /* 0x000fe20007f5e0ff */ /*0150*/ IMAD.SHL.U32 R7, R11, 0x4, RZ ; /* 0x000000040b077824 */ /* 0x000fe200078e00ff */ /*0160*/ LEA.HI.X.SX32 R4, R8, R3.reuse, 0x1, P1 ; /* 0x0000000308047211 */ /* 0x080fe400008f0eff */ /*0170*/ LEA.HI.X.SX32 R6, R0, R3, 0x1, P2 ; /* 0x0000000300067211 */ /* 0x000fe400010f0eff */ /*0180*/ LEA R2, P2, R5, c[0x0][0x168], 0x2 ; /* 0x00005a0005027a11 */ /* 0x000fc400078410ff */ /*0190*/ SHF.L.U64.HI R11, R11, 0x2, R4 ; /* 0x000000020b0b7819 */ /* 0x000fe40000010204 */ /*01a0*/ IADD3 R4, P1, R7, c[0x0][0x160], RZ ; /* 0x0000580007047a10 */ /* 0x000fe40007f3e0ff */ /*01b0*/ LEA.HI.X R3, R5, c[0x0][0x16c], R6, 0x2, P2 ; /* 0x00005b0005037a11 */ /* 0x000fe400010f1406 */ /*01c0*/ IADD3.X R5, R11, c[0x0][0x164], RZ, P1, !PT ; /* 0x000059000b057a10 */ /* 0x000fc60000ffe4ff */ /*01d0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*01e0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea2000c1e1900 */ /*01f0*/ IADD3 R6, P1, R7, c[0x0][0x170], RZ ; /* 0x00005c0007067a10 */ /* 0x000fc80007f3e0ff */ /*0200*/ IADD3.X R7, R11, c[0x0][0x174], RZ, P1, !PT ; /* 0x00005d000b077a10 */ /* 0x000fe20000ffe4ff */ /*0210*/ FMUL R11, R2, R5 ; /* 0x00000005020b7220 */ /* 0x004fca0000400000 */ /*0220*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0001e6000c101904 */ /*0230*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0240*/ @!P0 BRA 0xc0 ; /* 0xfffffe7000008947 */ /* 0x000fea000383ffff */ /*0250*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0260*/ BRA 0x260; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0280*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0290*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void g_One_backpropagation( float* _curDelta, float* _w, float* _nextDelta, int rows, int cols, int channels) { int row = blockIdx.x; int channel = blockIdx.y; int skip = channel * rows * cols + row * cols; float* curDelta = _curDelta + skip; float* nextDelta= _nextDelta+ skip; float* w = _w + channel * cols; for(int i = 0; i < cols; i += blockDim.x){ int id = i + threadIdx.x; if(id < cols){ nextDelta[id] = curDelta[id] * w[id]; } } }
.file "tmpxft_000022f3_00000000-6_g_One_backpropagation.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z48__device_stub__Z21g_One_backpropagationPfS_S_iiiPfS_S_iii .type _Z48__device_stub__Z21g_One_backpropagationPfS_S_iiiPfS_S_iii, @function _Z48__device_stub__Z21g_One_backpropagationPfS_S_iiiPfS_S_iii: .LFB2051: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 20(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z21g_One_backpropagationPfS_S_iii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z48__device_stub__Z21g_One_backpropagationPfS_S_iiiPfS_S_iii, .-_Z48__device_stub__Z21g_One_backpropagationPfS_S_iiiPfS_S_iii .globl _Z21g_One_backpropagationPfS_S_iii .type _Z21g_One_backpropagationPfS_S_iii, @function _Z21g_One_backpropagationPfS_S_iii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z48__device_stub__Z21g_One_backpropagationPfS_S_iiiPfS_S_iii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z21g_One_backpropagationPfS_S_iii, .-_Z21g_One_backpropagationPfS_S_iii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z21g_One_backpropagationPfS_S_iii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z21g_One_backpropagationPfS_S_iii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void g_One_backpropagation( float* _curDelta, float* _w, float* _nextDelta, int rows, int cols, int channels) { int row = blockIdx.x; int channel = blockIdx.y; int skip = channel * rows * cols + row * cols; float* curDelta = _curDelta + skip; float* nextDelta= _nextDelta+ skip; float* w = _w + channel * cols; for(int i = 0; i < cols; i += blockDim.x){ int id = i + threadIdx.x; if(id < cols){ nextDelta[id] = curDelta[id] * w[id]; } } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void g_One_backpropagation( float* _curDelta, float* _w, float* _nextDelta, int rows, int cols, int channels) { int row = blockIdx.x; int channel = blockIdx.y; int skip = channel * rows * cols + row * cols; float* curDelta = _curDelta + skip; float* nextDelta= _nextDelta+ skip; float* w = _w + channel * cols; for(int i = 0; i < cols; i += blockDim.x){ int id = i + threadIdx.x; if(id < cols){ nextDelta[id] = curDelta[id] * w[id]; } } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void g_One_backpropagation( float* _curDelta, float* _w, float* _nextDelta, int rows, int cols, int channels) { int row = blockIdx.x; int channel = blockIdx.y; int skip = channel * rows * cols + row * cols; float* curDelta = _curDelta + skip; float* nextDelta= _nextDelta+ skip; float* w = _w + channel * cols; for(int i = 0; i < cols; i += blockDim.x){ int id = i + threadIdx.x; if(id < cols){ nextDelta[id] = curDelta[id] * w[id]; } } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z21g_One_backpropagationPfS_S_iii .globl _Z21g_One_backpropagationPfS_S_iii .p2align 8 .type _Z21g_One_backpropagationPfS_S_iii,@function _Z21g_One_backpropagationPfS_S_iii: s_load_b32 s2, s[0:1], 0x1c s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_5 s_clause 0x3 s_load_b32 s3, s[0:1], 0x18 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[8:9], s[0:1], 0x10 s_load_b32 s16, s[0:1], 0x34 s_mul_i32 s12, s15, s2 s_waitcnt lgkmcnt(0) s_mul_i32 s0, s15, s3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_add_i32 s0, s0, s14 s_mul_i32 s0, s0, s2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_ashr_i32 s1, s0, 31 s_lshl_b64 s[10:11], s[0:1], 2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) s_add_u32 s0, s4, s10 s_addc_u32 s1, s5, s11 s_add_u32 s3, s8, s10 s_addc_u32 s4, s9, s11 s_ashr_i32 s13, s12, 31 s_lshl_b64 s[8:9], s[12:13], 2 s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s5, s6, s8 s_addc_u32 s6, s7, s9 s_and_b32 s7, s16, 0xffff s_mov_b32 s8, 0 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_3 .p2align 6 .LBB0_2: s_or_b32 exec_lo, exec_lo, s9 s_add_i32 s8, s8, s7 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_ge_i32 s8, s2 s_cbranch_scc1 .LBB0_5 .LBB0_3: v_add_nc_u32_e32 v1, s8, v0 s_mov_b32 s9, exec_lo s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_i32_e64 s2, v1 s_cbranch_execz .LBB0_2 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[1:2], 2, v[1:2] v_add_co_u32 v3, vcc_lo, s0, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s1, v2, vcc_lo v_add_co_u32 v5, vcc_lo, s5, v1 v_add_co_ci_u32_e32 v6, vcc_lo, s6, v2, vcc_lo v_add_co_u32 v1, vcc_lo, s3, v1 global_load_b32 v3, v[3:4], off global_load_b32 v4, v[5:6], off v_add_co_ci_u32_e32 v2, vcc_lo, s4, v2, vcc_lo s_waitcnt vmcnt(0) v_mul_f32_e32 v3, v3, v4 global_store_b32 v[1:2], v3, off s_branch .LBB0_2 .LBB0_5: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z21g_One_backpropagationPfS_S_iii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 17 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z21g_One_backpropagationPfS_S_iii, .Lfunc_end0-_Z21g_One_backpropagationPfS_S_iii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z21g_One_backpropagationPfS_S_iii .private_segment_fixed_size: 0 .sgpr_count: 19 .sgpr_spill_count: 0 .symbol: _Z21g_One_backpropagationPfS_S_iii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void g_One_backpropagation( float* _curDelta, float* _w, float* _nextDelta, int rows, int cols, int channels) { int row = blockIdx.x; int channel = blockIdx.y; int skip = channel * rows * cols + row * cols; float* curDelta = _curDelta + skip; float* nextDelta= _nextDelta+ skip; float* w = _w + channel * cols; for(int i = 0; i < cols; i += blockDim.x){ int id = i + threadIdx.x; if(id < cols){ nextDelta[id] = curDelta[id] * w[id]; } } }
.text .file "g_One_backpropagation.hip" .globl _Z36__device_stub__g_One_backpropagationPfS_S_iii # -- Begin function _Z36__device_stub__g_One_backpropagationPfS_S_iii .p2align 4, 0x90 .type _Z36__device_stub__g_One_backpropagationPfS_S_iii,@function _Z36__device_stub__g_One_backpropagationPfS_S_iii: # @_Z36__device_stub__g_One_backpropagationPfS_S_iii .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 20(%rsp), %rax movq %rax, 120(%rsp) leaq 16(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z21g_One_backpropagationPfS_S_iii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z36__device_stub__g_One_backpropagationPfS_S_iii, .Lfunc_end0-_Z36__device_stub__g_One_backpropagationPfS_S_iii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z21g_One_backpropagationPfS_S_iii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z21g_One_backpropagationPfS_S_iii,@object # @_Z21g_One_backpropagationPfS_S_iii .section .rodata,"a",@progbits .globl _Z21g_One_backpropagationPfS_S_iii .p2align 3, 0x0 _Z21g_One_backpropagationPfS_S_iii: .quad _Z36__device_stub__g_One_backpropagationPfS_S_iii .size _Z21g_One_backpropagationPfS_S_iii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z21g_One_backpropagationPfS_S_iii" .size .L__unnamed_1, 35 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z36__device_stub__g_One_backpropagationPfS_S_iii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z21g_One_backpropagationPfS_S_iii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z21g_One_backpropagationPfS_S_iii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff007624 */ /* 0x000fca00078e00ff */ /*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */ /* 0x000fda0003f06270 */ /*0030*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0040*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */ /* 0x000e220000002600 */ /*0050*/ IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff097224 */ /* 0x000fe200078e00ff */ /*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0070*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e280000002500 */ /*0080*/ S2R R10, SR_TID.X ; /* 0x00000000000a7919 */ /* 0x000e620000002100 */ /*0090*/ IMAD R2, R0.reuse, c[0x0][0x178], R3 ; /* 0x00005e0000027a24 */ /* 0x041fe400078e0203 */ /*00a0*/ IMAD R0, R0, c[0x0][0x17c], RZ ; /* 0x00005f0000007a24 */ /* 0x000fc400078e02ff */ /*00b0*/ IMAD R8, R2, c[0x0][0x17c], RZ ; /* 0x00005f0002087a24 */ /* 0x002fe400078e02ff */ /*00c0*/ IMAD.IADD R5, R10, 0x1, R9 ; /* 0x000000010a057824 */ /* 0x000fe200078e0209 */ /*00d0*/ IADD3 R9, R9, c[0x0][0x0], RZ ; /* 0x0000000009097a10 */ /* 0x000fe20007ffe0ff */ /*00e0*/ BSSY B0, 0x240 ; /* 0x0000015000007945 */ /* 0x000fe60003800000 */ /*00f0*/ ISETP.GE.AND P1, PT, R5, c[0x0][0x17c], PT ; /* 0x00005f0005007a0c */ /* 0x000fe40003f26270 */ /*0100*/ ISETP.GE.AND P0, PT, R9, c[0x0][0x17c], PT ; /* 0x00005f0009007a0c */ /* 0x000fd60003f06270 */ /*0110*/ @P1 BRA 0x230 ; /* 0x0000011000001947 */ /* 0x001fea0003800000 */ /*0120*/ IADD3 R11, P1, R8, R5.reuse, RZ ; /* 0x00000005080b7210 */ /* 0x080fe40007f3e0ff */ /*0130*/ SHF.R.S32.HI R3, RZ, 0x1f, R5 ; /* 0x0000001fff037819 */ /* 0x000fe40000011405 */ /*0140*/ IADD3 R5, P2, R0, R5, RZ ; /* 0x0000000500057210 */ /* 0x000fe20007f5e0ff */ /*0150*/ IMAD.SHL.U32 R7, R11, 0x4, RZ ; /* 0x000000040b077824 */ /* 0x000fe200078e00ff */ /*0160*/ LEA.HI.X.SX32 R4, R8, R3.reuse, 0x1, P1 ; /* 0x0000000308047211 */ /* 0x080fe400008f0eff */ /*0170*/ LEA.HI.X.SX32 R6, R0, R3, 0x1, P2 ; /* 0x0000000300067211 */ /* 0x000fe400010f0eff */ /*0180*/ LEA R2, P2, R5, c[0x0][0x168], 0x2 ; /* 0x00005a0005027a11 */ /* 0x000fc400078410ff */ /*0190*/ SHF.L.U64.HI R11, R11, 0x2, R4 ; /* 0x000000020b0b7819 */ /* 0x000fe40000010204 */ /*01a0*/ IADD3 R4, P1, R7, c[0x0][0x160], RZ ; /* 0x0000580007047a10 */ /* 0x000fe40007f3e0ff */ /*01b0*/ LEA.HI.X R3, R5, c[0x0][0x16c], R6, 0x2, P2 ; /* 0x00005b0005037a11 */ /* 0x000fe400010f1406 */ /*01c0*/ IADD3.X R5, R11, c[0x0][0x164], RZ, P1, !PT ; /* 0x000059000b057a10 */ /* 0x000fc60000ffe4ff */ /*01d0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*01e0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea2000c1e1900 */ /*01f0*/ IADD3 R6, P1, R7, c[0x0][0x170], RZ ; /* 0x00005c0007067a10 */ /* 0x000fc80007f3e0ff */ /*0200*/ IADD3.X R7, R11, c[0x0][0x174], RZ, P1, !PT ; /* 0x00005d000b077a10 */ /* 0x000fe20000ffe4ff */ /*0210*/ FMUL R11, R2, R5 ; /* 0x00000005020b7220 */ /* 0x004fca0000400000 */ /*0220*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0001e6000c101904 */ /*0230*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0240*/ @!P0 BRA 0xc0 ; /* 0xfffffe7000008947 */ /* 0x000fea000383ffff */ /*0250*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0260*/ BRA 0x260; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0280*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0290*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z21g_One_backpropagationPfS_S_iii .globl _Z21g_One_backpropagationPfS_S_iii .p2align 8 .type _Z21g_One_backpropagationPfS_S_iii,@function _Z21g_One_backpropagationPfS_S_iii: s_load_b32 s2, s[0:1], 0x1c s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_5 s_clause 0x3 s_load_b32 s3, s[0:1], 0x18 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[8:9], s[0:1], 0x10 s_load_b32 s16, s[0:1], 0x34 s_mul_i32 s12, s15, s2 s_waitcnt lgkmcnt(0) s_mul_i32 s0, s15, s3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_add_i32 s0, s0, s14 s_mul_i32 s0, s0, s2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_ashr_i32 s1, s0, 31 s_lshl_b64 s[10:11], s[0:1], 2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) s_add_u32 s0, s4, s10 s_addc_u32 s1, s5, s11 s_add_u32 s3, s8, s10 s_addc_u32 s4, s9, s11 s_ashr_i32 s13, s12, 31 s_lshl_b64 s[8:9], s[12:13], 2 s_delay_alu instid0(SALU_CYCLE_1) s_add_u32 s5, s6, s8 s_addc_u32 s6, s7, s9 s_and_b32 s7, s16, 0xffff s_mov_b32 s8, 0 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_3 .p2align 6 .LBB0_2: s_or_b32 exec_lo, exec_lo, s9 s_add_i32 s8, s8, s7 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_ge_i32 s8, s2 s_cbranch_scc1 .LBB0_5 .LBB0_3: v_add_nc_u32_e32 v1, s8, v0 s_mov_b32 s9, exec_lo s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_i32_e64 s2, v1 s_cbranch_execz .LBB0_2 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[1:2], 2, v[1:2] v_add_co_u32 v3, vcc_lo, s0, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s1, v2, vcc_lo v_add_co_u32 v5, vcc_lo, s5, v1 v_add_co_ci_u32_e32 v6, vcc_lo, s6, v2, vcc_lo v_add_co_u32 v1, vcc_lo, s3, v1 global_load_b32 v3, v[3:4], off global_load_b32 v4, v[5:6], off v_add_co_ci_u32_e32 v2, vcc_lo, s4, v2, vcc_lo s_waitcnt vmcnt(0) v_mul_f32_e32 v3, v3, v4 global_store_b32 v[1:2], v3, off s_branch .LBB0_2 .LBB0_5: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z21g_One_backpropagationPfS_S_iii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 17 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z21g_One_backpropagationPfS_S_iii, .Lfunc_end0-_Z21g_One_backpropagationPfS_S_iii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z21g_One_backpropagationPfS_S_iii .private_segment_fixed_size: 0 .sgpr_count: 19 .sgpr_spill_count: 0 .symbol: _Z21g_One_backpropagationPfS_S_iii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000022f3_00000000-6_g_One_backpropagation.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z48__device_stub__Z21g_One_backpropagationPfS_S_iiiPfS_S_iii .type _Z48__device_stub__Z21g_One_backpropagationPfS_S_iiiPfS_S_iii, @function _Z48__device_stub__Z21g_One_backpropagationPfS_S_iiiPfS_S_iii: .LFB2051: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 20(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z21g_One_backpropagationPfS_S_iii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z48__device_stub__Z21g_One_backpropagationPfS_S_iiiPfS_S_iii, .-_Z48__device_stub__Z21g_One_backpropagationPfS_S_iiiPfS_S_iii .globl _Z21g_One_backpropagationPfS_S_iii .type _Z21g_One_backpropagationPfS_S_iii, @function _Z21g_One_backpropagationPfS_S_iii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z48__device_stub__Z21g_One_backpropagationPfS_S_iiiPfS_S_iii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z21g_One_backpropagationPfS_S_iii, .-_Z21g_One_backpropagationPfS_S_iii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z21g_One_backpropagationPfS_S_iii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z21g_One_backpropagationPfS_S_iii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "g_One_backpropagation.hip" .globl _Z36__device_stub__g_One_backpropagationPfS_S_iii # -- Begin function _Z36__device_stub__g_One_backpropagationPfS_S_iii .p2align 4, 0x90 .type _Z36__device_stub__g_One_backpropagationPfS_S_iii,@function _Z36__device_stub__g_One_backpropagationPfS_S_iii: # @_Z36__device_stub__g_One_backpropagationPfS_S_iii .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 20(%rsp), %rax movq %rax, 120(%rsp) leaq 16(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z21g_One_backpropagationPfS_S_iii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z36__device_stub__g_One_backpropagationPfS_S_iii, .Lfunc_end0-_Z36__device_stub__g_One_backpropagationPfS_S_iii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z21g_One_backpropagationPfS_S_iii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z21g_One_backpropagationPfS_S_iii,@object # @_Z21g_One_backpropagationPfS_S_iii .section .rodata,"a",@progbits .globl _Z21g_One_backpropagationPfS_S_iii .p2align 3, 0x0 _Z21g_One_backpropagationPfS_S_iii: .quad _Z36__device_stub__g_One_backpropagationPfS_S_iii .size _Z21g_One_backpropagationPfS_S_iii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z21g_One_backpropagationPfS_S_iii" .size .L__unnamed_1, 35 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z36__device_stub__g_One_backpropagationPfS_S_iii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z21g_One_backpropagationPfS_S_iii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/* * Example of using reducing (tree) type algorithms to parallelize finding the sum of * a set of numbers. On a GF 8600 GT the two parallel algorithms (sumControl = 0 or 1) * are about 35 times faster than the serial algorithm also running on the GPU but using * global memory (sumControl=2), for an array of 512 floats. This is both because * the parallel algorithms scale as ln N while the serial algorithm scales as N, and because the * parallel algorithms use the shared memory while the serial one uses the (generally slower) * global memory in these tests. If we do the serial algorithm on the same computer but use * shared memory (set sumControl=3), the parallel algorithms are only 3.5 times faster. Thus a * factor of 10 in the speed up is because of using the shared memory in this example. Note, * however, that shared memory can only be shared among the threads within a single block. * */ #include <stdio.h> #define BLOCKSIZE 512 // Define some GPU timing utilities. These are invoked from the host program. Usage: // START_GPU; // kernelFunction <<< numBlocks, threadsPerBlock >>> (args) // STOP_GPU; // PRINT_GPU // in the host code. This estimates the time for the kernel kernelFunction to run on the GPU. // For a more extensive discusion, see Section 5.1.2 of the CUDA Best Practices Guide at // http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_C_Best_Practices_Guide.pdf float timeGPU; cudaEvent_t start, stop; #define START_GPU cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); #define STOP_GPU cudaEventRecord(stop, 0); cudaEventSynchronize(stop);\ cudaEventElapsedTime(&timeGPU, start, stop);\ cudaEventDestroy(start);cudaEventDestroy(stop); #define PRINT_GPU printf("\n\nTime to compute on GPU: %f ms \n", timeGPU); // Define a utility to check for CUDA errors. Place it immediately after a CUDA kernel // call in the host code. The initial cudaDeviceSynchronize() command ensures that the device // has completed all preceding requested tasks. #define CUDA_ERROR_CHECK cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError();\ if(error != cudaSuccess){printf("***CUDA error: %s\n", cudaGetErrorString(error)); exit(-1);}\ else{printf("\nNo CUDA errors detected\n" );} // Device code. Sums the elements of the array Array and puts the result in Sum __global__ void SumKernel(float* Array, float* Sum, int arraySize) { __device__ float reductionSum(int, float *); *Sum = reductionSum(arraySize, Array); } /* Function to do parallel reduction sum. This should scale as ln N. The parallel butterfly algorithm taken from the literature works generally. My homegrown parallel version works as written for even number of entries in the array, so this algorithm can be used for an odd number by padding the array with an added zero entry. Note that this version assumes that all summations are within one block, so a max of 512 threads on 1.1 devices (presently blocksize is set to 256). One option for larger sums is to break the array up onto multiple blocks, use this algorithm on each block to get a block sum, and then sum the block sums. */ __device__ float reductionSum(int length, float *array) { float sum = 0.0f; // = 0 or 1 for parallel with shared memory, 2 for serial with global, 3 for serial with shared int sumControl = 0; // Copy the array to be summed into shared memory and initialize __shared__ float sarray[BLOCKSIZE]; int i = threadIdx.x; sarray[i] = 0.0f; if(i<length) sarray[i] = array[i]; __syncthreads(); if(sumControl == 0) { // Parallel butterfly sum // see http://cs.anu.edu.au/files/systems/GPUWksp/PDFs/02_CUDAParallelProgrammingModel.pdf for(int bit=BLOCKSIZE/2; bit>0; bit /= 2) { if(i<length) { float t=sarray[i] + sarray[i^bit]; __syncthreads(); sarray[i] = t; __syncthreads(); } } // The array entries sarray[i] with i<length/2 now all contain the sum sum = sarray[0]; } else if(sumControl == 1) { // Another home-made parallel version of a reduction sum. As written, this requires an even // number of entries in the array to be summed, so pad with a zero to handle odd number // (or rewrite to deal with odd number). int L=length; int steps = 0; int inc = 1; float t = 0; while(L > 2 ) { steps ++; if(i < (length-inc)) t = sarray[i] + sarray[i+inc]; __syncthreads(); sarray[i] = t; __syncthreads(); inc *= 2; L /= 2; } sum = sarray[0] + sarray[inc]; // This contains the sum } else if(sumControl == 2) { // Serial version of sum accessing global (not shared) memory sum = 0.0f; for(int i=0; i<length; i++) { sum += array[i]; } } else { // Serial version of sum accessing shared memory sum = 0.0f; for(int i=0; i<length; i++) { sum += sarray[i]; } } return sum; } // Host code int main(void) { int arraySize = 512; float* Array; Array = (float*) malloc(sizeof(float) * arraySize); // Fill array with some numbers for(int i=0; i<arraySize; i++) { Array[i] = 2.0f * (float) i; } // Set up device pointers float *devPtrArray; float Sum; float* devPtrSum; cudaMalloc((void**)&devPtrSum, sizeof(float)); // Allocate device memory cudaMalloc((void**)&devPtrArray, arraySize * sizeof(float)); // Copy array to device cudaMemcpy(devPtrArray, Array, arraySize * sizeof(float), cudaMemcpyHostToDevice); // Launch the kernel. START_GPU; // Start timer for device code SumKernel<<<1, 512>>>(devPtrArray, devPtrSum, arraySize); STOP_GPU; // Stop timer for device code PRINT_GPU; // Print timing for device code CUDA_ERROR_CHECK // Copy the sum back from the GPU to the host cudaMemcpy(&Sum, devPtrSum, sizeof(float), cudaMemcpyDeviceToHost); printf("\nSum array elements multiplied by 2 (GPU) = %6.2f\n\n", Sum); // Free the memory allocated on the device cudaFree(devPtrSum); cudaFree(devPtrArray); // Free the memory allocated on the CPU free(Array); return 0; }
code for sm_80 Function : _Z9SumKernelPfS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0030*/ BSSY B0, 0xd0 ; /* 0x0000009000007945 */ /* 0x000fe40003800000 */ /*0040*/ STS [R0.X4], RZ ; /* 0x000000ff00007388 */ /* 0x0011e20000004800 */ /*0050*/ ISETP.GE.AND P0, PT, R0.reuse, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */ /* 0x040fe40003f06270 */ /*0060*/ SHF.L.U32 R2, R0, 0x2, RZ ; /* 0x0000000200027819 */ /* 0x000fd600000006ff */ /*0070*/ @P0 BRA 0xc0 ; /* 0x0000004000000947 */ /* 0x000fea0003800000 */ /*0080*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x001fc800078e00ff */ /*0090*/ IMAD.WIDE R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */ /* 0x000fcc00078e0205 */ /*00a0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ STS [R0.X4], R5 ; /* 0x0000000500007388 */ /* 0x0041e40000004800 */ /*00c0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x001fea0003800000 */ /*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*00e0*/ @P0 BRA 0x4f0 ; /* 0x0000040000000947 */ /* 0x000fea0003800000 */ /*00f0*/ LOP3.LUT R4, R2.reuse, 0x400, RZ, 0x3c, !PT ; /* 0x0000040002047812 */ /* 0x040fe200078e3cff */ /*0100*/ LDS R3, [R0.X4] ; /* 0x0000000000037984 */ /* 0x000fe20000004800 */ /*0110*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe20003800000 */ /*0120*/ LOP3.LUT R5, R2, 0x200, RZ, 0x3c, !PT ; /* 0x0000020002057812 */ /* 0x000fc600078e3cff */ /*0130*/ LDS R4, [R4] ; /* 0x0000000004047984 */ /* 0x000e240000000800 */ /*0140*/ FADD R3, R3, R4 ; /* 0x0000000403037221 */ /* 0x001fe40000000000 */ /*0150*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0160*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */ /* 0x0001e80000004800 */ /*0170*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0180*/ LOP3.LUT R3, R2, 0x80, RZ, 0x3c, !PT ; /* 0x0000008002037812 */ /* 0x001fca00078e3cff */ /*0190*/ LDS R5, [R5] ; /* 0x0000000005057984 */ /* 0x000fe80000000800 */ /*01a0*/ LDS R6, [R0.X4] ; /* 0x0000000000067984 */ /* 0x000e240000004800 */ /*01b0*/ FADD R7, R5, R6 ; /* 0x0000000605077221 */ /* 0x001fe40000000000 */ /*01c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*01d0*/ LOP3.LUT R6, R2, 0x100, RZ, 0x3c, !PT ; /* 0x0000010002067812 */ /* 0x000fca00078e3cff */ /*01e0*/ STS [R0.X4], R7 ; /* 0x0000000700007388 */ /* 0x000fe80000004800 */ /*01f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0200*/ LDS R6, [R6] ; /* 0x0000000006067984 */ /* 0x000fe80000000800 */ /*0210*/ LDS R9, [R0.X4] ; /* 0x0000000000097984 */ /* 0x000e240000004800 */ /*0220*/ FADD R9, R6, R9 ; /* 0x0000000906097221 */ /* 0x001fc40000000000 */ /*0230*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0240*/ LOP3.LUT R6, R2, 0x20, RZ, 0x3c, !PT ; /* 0x0000002002067812 */ /* 0x000fca00078e3cff */ /*0250*/ STS [R0.X4], R9 ; /* 0x0000000900007388 */ /* 0x000fe80000004800 */ /*0260*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0270*/ LDS R3, [R3] ; /* 0x0000000003037984 */ /* 0x000fe80000000800 */ /*0280*/ LDS R4, [R0.X4] ; /* 0x0000000000047984 */ /* 0x000e240000004800 */ /*0290*/ FADD R5, R3, R4 ; /* 0x0000000403057221 */ /* 0x001fc40000000000 */ /*02a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*02b0*/ LOP3.LUT R4, R2, 0x40, RZ, 0x3c, !PT ; /* 0x0000004002047812 */ /* 0x000fca00078e3cff */ /*02c0*/ STS [R0.X4], R5 ; /* 0x0000000500007388 */ /* 0x0001e80000004800 */ /*02d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*02e0*/ LOP3.LUT R5, R2, 0x10, RZ, 0x3c, !PT ; /* 0x0000001002057812 */ /* 0x001fca00078e3cff */ /*02f0*/ LDS R4, [R4] ; /* 0x0000000004047984 */ /* 0x000fe80000000800 */ /*0300*/ LDS R7, [R0.X4] ; /* 0x0000000000077984 */ /* 0x000e240000004800 */ /*0310*/ FADD R7, R4, R7 ; /* 0x0000000704077221 */ /* 0x001fe40000000000 */ /*0320*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0330*/ STS [R0.X4], R7 ; /* 0x0000000700007388 */ /* 0x000fe80000004800 */ /*0340*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0350*/ LDS R6, [R6] ; /* 0x0000000006067984 */ /* 0x000fe80000000800 */ /*0360*/ LDS R3, [R0.X4] ; /* 0x0000000000037984 */ /* 0x000e240000004800 */ /*0370*/ FADD R3, R6, R3 ; /* 0x0000000306037221 */ /* 0x001fc40000000000 */ /*0380*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0390*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */ /* 0x000fe80000004800 */ /*03a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*03b0*/ LDS R5, [R5] ; /* 0x0000000005057984 */ /* 0x000fe80000000800 */ /*03c0*/ LDS R4, [R0.X4] ; /* 0x0000000000047984 */ /* 0x000e240000004800 */ /*03d0*/ FADD R7, R5, R4 ; /* 0x0000000405077221 */ /* 0x001fc40000000000 */ /*03e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*03f0*/ LOP3.LUT R4, R2.reuse, 0x8, RZ, 0x3c, !PT ; /* 0x0000000802047812 */ /* 0x040fe400078e3cff */ /*0400*/ LOP3.LUT R2, R2, 0x4, RZ, 0x3c, !PT ; /* 0x0000000402027812 */ /* 0x000fc600078e3cff */ /*0410*/ STS [R0.X4], R7 ; /* 0x0000000700007388 */ /* 0x000fe80000004800 */ /*0420*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0430*/ LDS R4, [R4] ; /* 0x0000000004047984 */ /* 0x000fe80000000800 */ /*0440*/ LDS R9, [R0.X4] ; /* 0x0000000000097984 */ /* 0x000e240000004800 */ /*0450*/ FADD R9, R4, R9 ; /* 0x0000000904097221 */ /* 0x001fc40000000000 */ /*0460*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0470*/ STS [R0.X4], R9 ; /* 0x0000000900007388 */ /* 0x000fe80000004800 */ /*0480*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0490*/ LDS R2, [R2] ; /* 0x0000000002027984 */ /* 0x000fe80000000800 */ /*04a0*/ LDS R3, [R0.X4] ; /* 0x0000000000037984 */ /* 0x000e240000004800 */ /*04b0*/ FADD R3, R2, R3 ; /* 0x0000000302037221 */ /* 0x001fc40000000000 */ /*04c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*04d0*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */ /* 0x0001e80000004800 */ /*04e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*04f0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */ /* 0x000e620000000800 */ /*0500*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */ /* 0x000fe20000000f00 */ /*0510*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff037624 */ /* 0x001fca00078e00ff */ /*0520*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x002fe2000c101904 */ /*0530*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0540*/ BRA 0x540; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0580*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0590*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* * Example of using reducing (tree) type algorithms to parallelize finding the sum of * a set of numbers. On a GF 8600 GT the two parallel algorithms (sumControl = 0 or 1) * are about 35 times faster than the serial algorithm also running on the GPU but using * global memory (sumControl=2), for an array of 512 floats. This is both because * the parallel algorithms scale as ln N while the serial algorithm scales as N, and because the * parallel algorithms use the shared memory while the serial one uses the (generally slower) * global memory in these tests. If we do the serial algorithm on the same computer but use * shared memory (set sumControl=3), the parallel algorithms are only 3.5 times faster. Thus a * factor of 10 in the speed up is because of using the shared memory in this example. Note, * however, that shared memory can only be shared among the threads within a single block. * */ #include <stdio.h> #define BLOCKSIZE 512 // Define some GPU timing utilities. These are invoked from the host program. Usage: // START_GPU; // kernelFunction <<< numBlocks, threadsPerBlock >>> (args) // STOP_GPU; // PRINT_GPU // in the host code. This estimates the time for the kernel kernelFunction to run on the GPU. // For a more extensive discusion, see Section 5.1.2 of the CUDA Best Practices Guide at // http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_C_Best_Practices_Guide.pdf float timeGPU; cudaEvent_t start, stop; #define START_GPU cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); #define STOP_GPU cudaEventRecord(stop, 0); cudaEventSynchronize(stop);\ cudaEventElapsedTime(&timeGPU, start, stop);\ cudaEventDestroy(start);cudaEventDestroy(stop); #define PRINT_GPU printf("\n\nTime to compute on GPU: %f ms \n", timeGPU); // Define a utility to check for CUDA errors. Place it immediately after a CUDA kernel // call in the host code. The initial cudaDeviceSynchronize() command ensures that the device // has completed all preceding requested tasks. #define CUDA_ERROR_CHECK cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError();\ if(error != cudaSuccess){printf("***CUDA error: %s\n", cudaGetErrorString(error)); exit(-1);}\ else{printf("\nNo CUDA errors detected\n" );} // Device code. Sums the elements of the array Array and puts the result in Sum __global__ void SumKernel(float* Array, float* Sum, int arraySize) { __device__ float reductionSum(int, float *); *Sum = reductionSum(arraySize, Array); } /* Function to do parallel reduction sum. This should scale as ln N. The parallel butterfly algorithm taken from the literature works generally. My homegrown parallel version works as written for even number of entries in the array, so this algorithm can be used for an odd number by padding the array with an added zero entry. Note that this version assumes that all summations are within one block, so a max of 512 threads on 1.1 devices (presently blocksize is set to 256). One option for larger sums is to break the array up onto multiple blocks, use this algorithm on each block to get a block sum, and then sum the block sums. */ __device__ float reductionSum(int length, float *array) { float sum = 0.0f; // = 0 or 1 for parallel with shared memory, 2 for serial with global, 3 for serial with shared int sumControl = 0; // Copy the array to be summed into shared memory and initialize __shared__ float sarray[BLOCKSIZE]; int i = threadIdx.x; sarray[i] = 0.0f; if(i<length) sarray[i] = array[i]; __syncthreads(); if(sumControl == 0) { // Parallel butterfly sum // see http://cs.anu.edu.au/files/systems/GPUWksp/PDFs/02_CUDAParallelProgrammingModel.pdf for(int bit=BLOCKSIZE/2; bit>0; bit /= 2) { if(i<length) { float t=sarray[i] + sarray[i^bit]; __syncthreads(); sarray[i] = t; __syncthreads(); } } // The array entries sarray[i] with i<length/2 now all contain the sum sum = sarray[0]; } else if(sumControl == 1) { // Another home-made parallel version of a reduction sum. As written, this requires an even // number of entries in the array to be summed, so pad with a zero to handle odd number // (or rewrite to deal with odd number). int L=length; int steps = 0; int inc = 1; float t = 0; while(L > 2 ) { steps ++; if(i < (length-inc)) t = sarray[i] + sarray[i+inc]; __syncthreads(); sarray[i] = t; __syncthreads(); inc *= 2; L /= 2; } sum = sarray[0] + sarray[inc]; // This contains the sum } else if(sumControl == 2) { // Serial version of sum accessing global (not shared) memory sum = 0.0f; for(int i=0; i<length; i++) { sum += array[i]; } } else { // Serial version of sum accessing shared memory sum = 0.0f; for(int i=0; i<length; i++) { sum += sarray[i]; } } return sum; } // Host code int main(void) { int arraySize = 512; float* Array; Array = (float*) malloc(sizeof(float) * arraySize); // Fill array with some numbers for(int i=0; i<arraySize; i++) { Array[i] = 2.0f * (float) i; } // Set up device pointers float *devPtrArray; float Sum; float* devPtrSum; cudaMalloc((void**)&devPtrSum, sizeof(float)); // Allocate device memory cudaMalloc((void**)&devPtrArray, arraySize * sizeof(float)); // Copy array to device cudaMemcpy(devPtrArray, Array, arraySize * sizeof(float), cudaMemcpyHostToDevice); // Launch the kernel. START_GPU; // Start timer for device code SumKernel<<<1, 512>>>(devPtrArray, devPtrSum, arraySize); STOP_GPU; // Stop timer for device code PRINT_GPU; // Print timing for device code CUDA_ERROR_CHECK // Copy the sum back from the GPU to the host cudaMemcpy(&Sum, devPtrSum, sizeof(float), cudaMemcpyDeviceToHost); printf("\nSum array elements multiplied by 2 (GPU) = %6.2f\n\n", Sum); // Free the memory allocated on the device cudaFree(devPtrSum); cudaFree(devPtrArray); // Free the memory allocated on the CPU free(Array); return 0; }
.file "tmpxft_0005905e_00000000-6_reducedSum.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z12reductionSumiPf .type _Z12reductionSumiPf, @function _Z12reductionSumiPf: .LFB2057: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2057: .size _Z12reductionSumiPf, .-_Z12reductionSumiPf .globl _Z31__device_stub__Z9SumKernelPfS_iPfS_i .type _Z31__device_stub__Z9SumKernelPfS_iPfS_i, @function _Z31__device_stub__Z9SumKernelPfS_iPfS_i: .LFB2083: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 120(%rsp), %rax subq %fs:40, %rax jne .L10 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9SumKernelPfS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z31__device_stub__Z9SumKernelPfS_iPfS_i, .-_Z31__device_stub__Z9SumKernelPfS_iPfS_i .globl _Z9SumKernelPfS_i .type _Z9SumKernelPfS_i, @function _Z9SumKernelPfS_i: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z9SumKernelPfS_iPfS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z9SumKernelPfS_i, .-_Z9SumKernelPfS_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "\n\nTime to compute on GPU: %f ms \n" .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "***CUDA error: %s\n" .LC2: .string "\nNo CUDA errors detected\n" .section .rodata.str1.8 .align 8 .LC3: .string "\nSum array elements multiplied by 2 (GPU) = %6.2f\n\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $48, %rsp .cfi_def_cfa_offset 64 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $2048, %edi call malloc@PLT movq %rax, %rbx movl $0, %eax .L14: pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 addss %xmm0, %xmm0 movss %xmm0, (%rbx,%rax,4) addq $1, %rax cmpq $512, %rax jne .L14 leaq 8(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movq %rsp, %rdi movl $2048, %esi call cudaMalloc@PLT movl $1, %ecx movl $2048, %edx movq %rbx, %rsi movq (%rsp), %rdi call cudaMemcpy@PLT leaq start(%rip), %rdi call cudaEventCreate@PLT leaq stop(%rip), %rdi call cudaEventCreate@PLT movl $0, %esi movq start(%rip), %rdi call cudaEventRecord@PLT movl $512, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 16(%rsp) movl $1, 20(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L20 .L15: movl $0, %esi movq stop(%rip), %rdi call cudaEventRecord@PLT movq stop(%rip), %rdi call cudaEventSynchronize@PLT movq stop(%rip), %rdx movq start(%rip), %rsi leaq timeGPU(%rip), %rdi call cudaEventElapsedTime@PLT movq start(%rip), %rdi call cudaEventDestroy@PLT movq stop(%rip), %rdi call cudaEventDestroy@PLT pxor %xmm0, %xmm0 cvtss2sd timeGPU(%rip), %xmm0 leaq .LC0(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT call cudaDeviceSynchronize@PLT call cudaGetLastError@PLT testl %eax, %eax jne .L21 leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 28(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 8(%rsp), %rsi call cudaMemcpy@PLT pxor %xmm0, %xmm0 cvtss2sd 28(%rsp), %xmm0 leaq .LC3(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq (%rsp), %rdi call cudaFree@PLT movq %rbx, %rdi call free@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L22 movl $0, %eax addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state movl $512, %edx movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z31__device_stub__Z9SumKernelPfS_iPfS_i jmp .L15 .L21: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $-1, %edi call exit@PLT .L22: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.1 .LC4: .string "_Z9SumKernelPfS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z9SumKernelPfS_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .globl stop .bss .align 8 .type stop, @object .size stop, 8 stop: .zero 8 .globl start .align 8 .type start, @object .size start, 8 start: .zero 8 .globl timeGPU .align 4 .type timeGPU, @object .size timeGPU, 4 timeGPU: .zero 4 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* * Example of using reducing (tree) type algorithms to parallelize finding the sum of * a set of numbers. On a GF 8600 GT the two parallel algorithms (sumControl = 0 or 1) * are about 35 times faster than the serial algorithm also running on the GPU but using * global memory (sumControl=2), for an array of 512 floats. This is both because * the parallel algorithms scale as ln N while the serial algorithm scales as N, and because the * parallel algorithms use the shared memory while the serial one uses the (generally slower) * global memory in these tests. If we do the serial algorithm on the same computer but use * shared memory (set sumControl=3), the parallel algorithms are only 3.5 times faster. Thus a * factor of 10 in the speed up is because of using the shared memory in this example. Note, * however, that shared memory can only be shared among the threads within a single block. * */ #include <stdio.h> #define BLOCKSIZE 512 // Define some GPU timing utilities. These are invoked from the host program. Usage: // START_GPU; // kernelFunction <<< numBlocks, threadsPerBlock >>> (args) // STOP_GPU; // PRINT_GPU // in the host code. This estimates the time for the kernel kernelFunction to run on the GPU. // For a more extensive discusion, see Section 5.1.2 of the CUDA Best Practices Guide at // http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_C_Best_Practices_Guide.pdf float timeGPU; cudaEvent_t start, stop; #define START_GPU cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); #define STOP_GPU cudaEventRecord(stop, 0); cudaEventSynchronize(stop);\ cudaEventElapsedTime(&timeGPU, start, stop);\ cudaEventDestroy(start);cudaEventDestroy(stop); #define PRINT_GPU printf("\n\nTime to compute on GPU: %f ms \n", timeGPU); // Define a utility to check for CUDA errors. Place it immediately after a CUDA kernel // call in the host code. The initial cudaDeviceSynchronize() command ensures that the device // has completed all preceding requested tasks. #define CUDA_ERROR_CHECK cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError();\ if(error != cudaSuccess){printf("***CUDA error: %s\n", cudaGetErrorString(error)); exit(-1);}\ else{printf("\nNo CUDA errors detected\n" );} // Device code. Sums the elements of the array Array and puts the result in Sum __global__ void SumKernel(float* Array, float* Sum, int arraySize) { __device__ float reductionSum(int, float *); *Sum = reductionSum(arraySize, Array); } /* Function to do parallel reduction sum. This should scale as ln N. The parallel butterfly algorithm taken from the literature works generally. My homegrown parallel version works as written for even number of entries in the array, so this algorithm can be used for an odd number by padding the array with an added zero entry. Note that this version assumes that all summations are within one block, so a max of 512 threads on 1.1 devices (presently blocksize is set to 256). One option for larger sums is to break the array up onto multiple blocks, use this algorithm on each block to get a block sum, and then sum the block sums. */ __device__ float reductionSum(int length, float *array) { float sum = 0.0f; // = 0 or 1 for parallel with shared memory, 2 for serial with global, 3 for serial with shared int sumControl = 0; // Copy the array to be summed into shared memory and initialize __shared__ float sarray[BLOCKSIZE]; int i = threadIdx.x; sarray[i] = 0.0f; if(i<length) sarray[i] = array[i]; __syncthreads(); if(sumControl == 0) { // Parallel butterfly sum // see http://cs.anu.edu.au/files/systems/GPUWksp/PDFs/02_CUDAParallelProgrammingModel.pdf for(int bit=BLOCKSIZE/2; bit>0; bit /= 2) { if(i<length) { float t=sarray[i] + sarray[i^bit]; __syncthreads(); sarray[i] = t; __syncthreads(); } } // The array entries sarray[i] with i<length/2 now all contain the sum sum = sarray[0]; } else if(sumControl == 1) { // Another home-made parallel version of a reduction sum. As written, this requires an even // number of entries in the array to be summed, so pad with a zero to handle odd number // (or rewrite to deal with odd number). int L=length; int steps = 0; int inc = 1; float t = 0; while(L > 2 ) { steps ++; if(i < (length-inc)) t = sarray[i] + sarray[i+inc]; __syncthreads(); sarray[i] = t; __syncthreads(); inc *= 2; L /= 2; } sum = sarray[0] + sarray[inc]; // This contains the sum } else if(sumControl == 2) { // Serial version of sum accessing global (not shared) memory sum = 0.0f; for(int i=0; i<length; i++) { sum += array[i]; } } else { // Serial version of sum accessing shared memory sum = 0.0f; for(int i=0; i<length; i++) { sum += sarray[i]; } } return sum; } // Host code int main(void) { int arraySize = 512; float* Array; Array = (float*) malloc(sizeof(float) * arraySize); // Fill array with some numbers for(int i=0; i<arraySize; i++) { Array[i] = 2.0f * (float) i; } // Set up device pointers float *devPtrArray; float Sum; float* devPtrSum; cudaMalloc((void**)&devPtrSum, sizeof(float)); // Allocate device memory cudaMalloc((void**)&devPtrArray, arraySize * sizeof(float)); // Copy array to device cudaMemcpy(devPtrArray, Array, arraySize * sizeof(float), cudaMemcpyHostToDevice); // Launch the kernel. START_GPU; // Start timer for device code SumKernel<<<1, 512>>>(devPtrArray, devPtrSum, arraySize); STOP_GPU; // Stop timer for device code PRINT_GPU; // Print timing for device code CUDA_ERROR_CHECK // Copy the sum back from the GPU to the host cudaMemcpy(&Sum, devPtrSum, sizeof(float), cudaMemcpyDeviceToHost); printf("\nSum array elements multiplied by 2 (GPU) = %6.2f\n\n", Sum); // Free the memory allocated on the device cudaFree(devPtrSum); cudaFree(devPtrArray); // Free the memory allocated on the CPU free(Array); return 0; }
/* * Example of using reducing (tree) type algorithms to parallelize finding the sum of * a set of numbers. On a GF 8600 GT the two parallel algorithms (sumControl = 0 or 1) * are about 35 times faster than the serial algorithm also running on the GPU but using * global memory (sumControl=2), for an array of 512 floats. This is both because * the parallel algorithms scale as ln N while the serial algorithm scales as N, and because the * parallel algorithms use the shared memory while the serial one uses the (generally slower) * global memory in these tests. If we do the serial algorithm on the same computer but use * shared memory (set sumControl=3), the parallel algorithms are only 3.5 times faster. Thus a * factor of 10 in the speed up is because of using the shared memory in this example. Note, * however, that shared memory can only be shared among the threads within a single block. * */ #include <hip/hip_runtime.h> #include <stdio.h> #define BLOCKSIZE 512 // Define some GPU timing utilities. These are invoked from the host program. Usage: // START_GPU; // kernelFunction <<< numBlocks, threadsPerBlock >>> (args) // STOP_GPU; // PRINT_GPU // in the host code. This estimates the time for the kernel kernelFunction to run on the GPU. // For a more extensive discusion, see Section 5.1.2 of the CUDA Best Practices Guide at // http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_C_Best_Practices_Guide.pdf float timeGPU; hipEvent_t start, stop; #define START_GPU hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); #define STOP_GPU hipEventRecord(stop, 0); hipEventSynchronize(stop);\ hipEventElapsedTime(&timeGPU, start, stop);\ hipEventDestroy(start);hipEventDestroy(stop); #define PRINT_GPU printf("\n\nTime to compute on GPU: %f ms \n", timeGPU); // Define a utility to check for CUDA errors. Place it immediately after a CUDA kernel // call in the host code. The initial cudaDeviceSynchronize() command ensures that the device // has completed all preceding requested tasks. #define CUDA_ERROR_CHECK hipDeviceSynchronize(); hipError_t error = hipGetLastError();\ if(error != hipSuccess){printf("***CUDA error: %s\n", hipGetErrorString(error)); exit(-1);}\ else{printf("\nNo CUDA errors detected\n" );} // Device code. Sums the elements of the array Array and puts the result in Sum __global__ void SumKernel(float* Array, float* Sum, int arraySize) { __device__ float reductionSum(int, float *); *Sum = reductionSum(arraySize, Array); } /* Function to do parallel reduction sum. This should scale as ln N. The parallel butterfly algorithm taken from the literature works generally. My homegrown parallel version works as written for even number of entries in the array, so this algorithm can be used for an odd number by padding the array with an added zero entry. Note that this version assumes that all summations are within one block, so a max of 512 threads on 1.1 devices (presently blocksize is set to 256). One option for larger sums is to break the array up onto multiple blocks, use this algorithm on each block to get a block sum, and then sum the block sums. */ __device__ float reductionSum(int length, float *array) { float sum = 0.0f; // = 0 or 1 for parallel with shared memory, 2 for serial with global, 3 for serial with shared int sumControl = 0; // Copy the array to be summed into shared memory and initialize __shared__ float sarray[BLOCKSIZE]; int i = threadIdx.x; sarray[i] = 0.0f; if(i<length) sarray[i] = array[i]; __syncthreads(); if(sumControl == 0) { // Parallel butterfly sum // see http://cs.anu.edu.au/files/systems/GPUWksp/PDFs/02_CUDAParallelProgrammingModel.pdf for(int bit=BLOCKSIZE/2; bit>0; bit /= 2) { if(i<length) { float t=sarray[i] + sarray[i^bit]; __syncthreads(); sarray[i] = t; __syncthreads(); } } // The array entries sarray[i] with i<length/2 now all contain the sum sum = sarray[0]; } else if(sumControl == 1) { // Another home-made parallel version of a reduction sum. As written, this requires an even // number of entries in the array to be summed, so pad with a zero to handle odd number // (or rewrite to deal with odd number). int L=length; int steps = 0; int inc = 1; float t = 0; while(L > 2 ) { steps ++; if(i < (length-inc)) t = sarray[i] + sarray[i+inc]; __syncthreads(); sarray[i] = t; __syncthreads(); inc *= 2; L /= 2; } sum = sarray[0] + sarray[inc]; // This contains the sum } else if(sumControl == 2) { // Serial version of sum accessing global (not shared) memory sum = 0.0f; for(int i=0; i<length; i++) { sum += array[i]; } } else { // Serial version of sum accessing shared memory sum = 0.0f; for(int i=0; i<length; i++) { sum += sarray[i]; } } return sum; } // Host code int main(void) { int arraySize = 512; float* Array; Array = (float*) malloc(sizeof(float) * arraySize); // Fill array with some numbers for(int i=0; i<arraySize; i++) { Array[i] = 2.0f * (float) i; } // Set up device pointers float *devPtrArray; float Sum; float* devPtrSum; hipMalloc((void**)&devPtrSum, sizeof(float)); // Allocate device memory hipMalloc((void**)&devPtrArray, arraySize * sizeof(float)); // Copy array to device hipMemcpy(devPtrArray, Array, arraySize * sizeof(float), hipMemcpyHostToDevice); // Launch the kernel. START_GPU; // Start timer for device code SumKernel<<<1, 512>>>(devPtrArray, devPtrSum, arraySize); STOP_GPU; // Stop timer for device code PRINT_GPU; // Print timing for device code CUDA_ERROR_CHECK // Copy the sum back from the GPU to the host hipMemcpy(&Sum, devPtrSum, sizeof(float), hipMemcpyDeviceToHost); printf("\nSum array elements multiplied by 2 (GPU) = %6.2f\n\n", Sum); // Free the memory allocated on the device hipFree(devPtrSum); hipFree(devPtrArray); // Free the memory allocated on the CPU free(Array); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* * Example of using reducing (tree) type algorithms to parallelize finding the sum of * a set of numbers. On a GF 8600 GT the two parallel algorithms (sumControl = 0 or 1) * are about 35 times faster than the serial algorithm also running on the GPU but using * global memory (sumControl=2), for an array of 512 floats. This is both because * the parallel algorithms scale as ln N while the serial algorithm scales as N, and because the * parallel algorithms use the shared memory while the serial one uses the (generally slower) * global memory in these tests. If we do the serial algorithm on the same computer but use * shared memory (set sumControl=3), the parallel algorithms are only 3.5 times faster. Thus a * factor of 10 in the speed up is because of using the shared memory in this example. Note, * however, that shared memory can only be shared among the threads within a single block. * */ #include <hip/hip_runtime.h> #include <stdio.h> #define BLOCKSIZE 512 // Define some GPU timing utilities. These are invoked from the host program. Usage: // START_GPU; // kernelFunction <<< numBlocks, threadsPerBlock >>> (args) // STOP_GPU; // PRINT_GPU // in the host code. This estimates the time for the kernel kernelFunction to run on the GPU. // For a more extensive discusion, see Section 5.1.2 of the CUDA Best Practices Guide at // http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_C_Best_Practices_Guide.pdf float timeGPU; hipEvent_t start, stop; #define START_GPU hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); #define STOP_GPU hipEventRecord(stop, 0); hipEventSynchronize(stop);\ hipEventElapsedTime(&timeGPU, start, stop);\ hipEventDestroy(start);hipEventDestroy(stop); #define PRINT_GPU printf("\n\nTime to compute on GPU: %f ms \n", timeGPU); // Define a utility to check for CUDA errors. Place it immediately after a CUDA kernel // call in the host code. The initial cudaDeviceSynchronize() command ensures that the device // has completed all preceding requested tasks. #define CUDA_ERROR_CHECK hipDeviceSynchronize(); hipError_t error = hipGetLastError();\ if(error != hipSuccess){printf("***CUDA error: %s\n", hipGetErrorString(error)); exit(-1);}\ else{printf("\nNo CUDA errors detected\n" );} // Device code. Sums the elements of the array Array and puts the result in Sum __global__ void SumKernel(float* Array, float* Sum, int arraySize) { __device__ float reductionSum(int, float *); *Sum = reductionSum(arraySize, Array); } /* Function to do parallel reduction sum. This should scale as ln N. The parallel butterfly algorithm taken from the literature works generally. My homegrown parallel version works as written for even number of entries in the array, so this algorithm can be used for an odd number by padding the array with an added zero entry. Note that this version assumes that all summations are within one block, so a max of 512 threads on 1.1 devices (presently blocksize is set to 256). One option for larger sums is to break the array up onto multiple blocks, use this algorithm on each block to get a block sum, and then sum the block sums. */ __device__ float reductionSum(int length, float *array) { float sum = 0.0f; // = 0 or 1 for parallel with shared memory, 2 for serial with global, 3 for serial with shared int sumControl = 0; // Copy the array to be summed into shared memory and initialize __shared__ float sarray[BLOCKSIZE]; int i = threadIdx.x; sarray[i] = 0.0f; if(i<length) sarray[i] = array[i]; __syncthreads(); if(sumControl == 0) { // Parallel butterfly sum // see http://cs.anu.edu.au/files/systems/GPUWksp/PDFs/02_CUDAParallelProgrammingModel.pdf for(int bit=BLOCKSIZE/2; bit>0; bit /= 2) { if(i<length) { float t=sarray[i] + sarray[i^bit]; __syncthreads(); sarray[i] = t; __syncthreads(); } } // The array entries sarray[i] with i<length/2 now all contain the sum sum = sarray[0]; } else if(sumControl == 1) { // Another home-made parallel version of a reduction sum. As written, this requires an even // number of entries in the array to be summed, so pad with a zero to handle odd number // (or rewrite to deal with odd number). int L=length; int steps = 0; int inc = 1; float t = 0; while(L > 2 ) { steps ++; if(i < (length-inc)) t = sarray[i] + sarray[i+inc]; __syncthreads(); sarray[i] = t; __syncthreads(); inc *= 2; L /= 2; } sum = sarray[0] + sarray[inc]; // This contains the sum } else if(sumControl == 2) { // Serial version of sum accessing global (not shared) memory sum = 0.0f; for(int i=0; i<length; i++) { sum += array[i]; } } else { // Serial version of sum accessing shared memory sum = 0.0f; for(int i=0; i<length; i++) { sum += sarray[i]; } } return sum; } // Host code int main(void) { int arraySize = 512; float* Array; Array = (float*) malloc(sizeof(float) * arraySize); // Fill array with some numbers for(int i=0; i<arraySize; i++) { Array[i] = 2.0f * (float) i; } // Set up device pointers float *devPtrArray; float Sum; float* devPtrSum; hipMalloc((void**)&devPtrSum, sizeof(float)); // Allocate device memory hipMalloc((void**)&devPtrArray, arraySize * sizeof(float)); // Copy array to device hipMemcpy(devPtrArray, Array, arraySize * sizeof(float), hipMemcpyHostToDevice); // Launch the kernel. START_GPU; // Start timer for device code SumKernel<<<1, 512>>>(devPtrArray, devPtrSum, arraySize); STOP_GPU; // Stop timer for device code PRINT_GPU; // Print timing for device code CUDA_ERROR_CHECK // Copy the sum back from the GPU to the host hipMemcpy(&Sum, devPtrSum, sizeof(float), hipMemcpyDeviceToHost); printf("\nSum array elements multiplied by 2 (GPU) = %6.2f\n\n", Sum); // Free the memory allocated on the device hipFree(devPtrSum); hipFree(devPtrArray); // Free the memory allocated on the CPU free(Array); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9SumKernelPfS_i .globl _Z9SumKernelPfS_i .p2align 8 .type _Z9SumKernelPfS_i,@function _Z9SumKernelPfS_i: s_load_b32 s2, s[0:1], 0x10 v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v0 ds_store_b32 v1, v2 s_waitcnt lgkmcnt(0) v_cmp_gt_i32_e32 vcc_lo, s2, v0 s_and_saveexec_b32 s2, vcc_lo s_cbranch_execz .LBB0_2 s_load_b64 s[4:5], s[0:1], 0x0 s_waitcnt lgkmcnt(0) global_load_b32 v2, v1, s[4:5] s_waitcnt vmcnt(0) ds_store_b32 v1, v2 .LBB0_2: s_or_b32 exec_lo, exec_lo, s2 s_movk_i32 s2, 0x100 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_branch .LBB0_4 .p2align 6 .LBB0_3: s_or_b32 exec_lo, exec_lo, s3 s_lshr_b32 s3, s2, 1 s_cmp_gt_u32 s2, 1 s_mov_b32 s2, s3 s_cbranch_scc0 .LBB0_6 .LBB0_4: s_and_saveexec_b32 s3, vcc_lo s_cbranch_execz .LBB0_3 v_xor_b32_e32 v2, s2, v0 s_delay_alu instid0(VALU_DEP_1) v_lshlrev_b32_e32 v2, 2, v2 ds_load_b32 v3, v1 ds_load_b32 v2, v2 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_add_f32_e32 v2, v3, v2 ds_store_b32 v1, v2 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_branch .LBB0_3 .LBB0_6: v_mov_b32_e32 v0, 0 s_load_b64 s[0:1], s[0:1], 0x8 ds_load_b32 v1, v0 s_waitcnt lgkmcnt(0) global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9SumKernelPfS_i .amdhsa_group_segment_fixed_size 2048 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 20 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 6 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9SumKernelPfS_i, .Lfunc_end0-_Z9SumKernelPfS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value .group_segment_fixed_size: 2048 .kernarg_segment_align: 8 .kernarg_segment_size: 20 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9SumKernelPfS_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z9SumKernelPfS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* * Example of using reducing (tree) type algorithms to parallelize finding the sum of * a set of numbers. On a GF 8600 GT the two parallel algorithms (sumControl = 0 or 1) * are about 35 times faster than the serial algorithm also running on the GPU but using * global memory (sumControl=2), for an array of 512 floats. This is both because * the parallel algorithms scale as ln N while the serial algorithm scales as N, and because the * parallel algorithms use the shared memory while the serial one uses the (generally slower) * global memory in these tests. If we do the serial algorithm on the same computer but use * shared memory (set sumControl=3), the parallel algorithms are only 3.5 times faster. Thus a * factor of 10 in the speed up is because of using the shared memory in this example. Note, * however, that shared memory can only be shared among the threads within a single block. * */ #include <hip/hip_runtime.h> #include <stdio.h> #define BLOCKSIZE 512 // Define some GPU timing utilities. These are invoked from the host program. Usage: // START_GPU; // kernelFunction <<< numBlocks, threadsPerBlock >>> (args) // STOP_GPU; // PRINT_GPU // in the host code. This estimates the time for the kernel kernelFunction to run on the GPU. // For a more extensive discusion, see Section 5.1.2 of the CUDA Best Practices Guide at // http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_C_Best_Practices_Guide.pdf float timeGPU; hipEvent_t start, stop; #define START_GPU hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); #define STOP_GPU hipEventRecord(stop, 0); hipEventSynchronize(stop);\ hipEventElapsedTime(&timeGPU, start, stop);\ hipEventDestroy(start);hipEventDestroy(stop); #define PRINT_GPU printf("\n\nTime to compute on GPU: %f ms \n", timeGPU); // Define a utility to check for CUDA errors. Place it immediately after a CUDA kernel // call in the host code. The initial cudaDeviceSynchronize() command ensures that the device // has completed all preceding requested tasks. #define CUDA_ERROR_CHECK hipDeviceSynchronize(); hipError_t error = hipGetLastError();\ if(error != hipSuccess){printf("***CUDA error: %s\n", hipGetErrorString(error)); exit(-1);}\ else{printf("\nNo CUDA errors detected\n" );} // Device code. Sums the elements of the array Array and puts the result in Sum __global__ void SumKernel(float* Array, float* Sum, int arraySize) { __device__ float reductionSum(int, float *); *Sum = reductionSum(arraySize, Array); } /* Function to do parallel reduction sum. This should scale as ln N. The parallel butterfly algorithm taken from the literature works generally. My homegrown parallel version works as written for even number of entries in the array, so this algorithm can be used for an odd number by padding the array with an added zero entry. Note that this version assumes that all summations are within one block, so a max of 512 threads on 1.1 devices (presently blocksize is set to 256). One option for larger sums is to break the array up onto multiple blocks, use this algorithm on each block to get a block sum, and then sum the block sums. */ __device__ float reductionSum(int length, float *array) { float sum = 0.0f; // = 0 or 1 for parallel with shared memory, 2 for serial with global, 3 for serial with shared int sumControl = 0; // Copy the array to be summed into shared memory and initialize __shared__ float sarray[BLOCKSIZE]; int i = threadIdx.x; sarray[i] = 0.0f; if(i<length) sarray[i] = array[i]; __syncthreads(); if(sumControl == 0) { // Parallel butterfly sum // see http://cs.anu.edu.au/files/systems/GPUWksp/PDFs/02_CUDAParallelProgrammingModel.pdf for(int bit=BLOCKSIZE/2; bit>0; bit /= 2) { if(i<length) { float t=sarray[i] + sarray[i^bit]; __syncthreads(); sarray[i] = t; __syncthreads(); } } // The array entries sarray[i] with i<length/2 now all contain the sum sum = sarray[0]; } else if(sumControl == 1) { // Another home-made parallel version of a reduction sum. As written, this requires an even // number of entries in the array to be summed, so pad with a zero to handle odd number // (or rewrite to deal with odd number). int L=length; int steps = 0; int inc = 1; float t = 0; while(L > 2 ) { steps ++; if(i < (length-inc)) t = sarray[i] + sarray[i+inc]; __syncthreads(); sarray[i] = t; __syncthreads(); inc *= 2; L /= 2; } sum = sarray[0] + sarray[inc]; // This contains the sum } else if(sumControl == 2) { // Serial version of sum accessing global (not shared) memory sum = 0.0f; for(int i=0; i<length; i++) { sum += array[i]; } } else { // Serial version of sum accessing shared memory sum = 0.0f; for(int i=0; i<length; i++) { sum += sarray[i]; } } return sum; } // Host code int main(void) { int arraySize = 512; float* Array; Array = (float*) malloc(sizeof(float) * arraySize); // Fill array with some numbers for(int i=0; i<arraySize; i++) { Array[i] = 2.0f * (float) i; } // Set up device pointers float *devPtrArray; float Sum; float* devPtrSum; hipMalloc((void**)&devPtrSum, sizeof(float)); // Allocate device memory hipMalloc((void**)&devPtrArray, arraySize * sizeof(float)); // Copy array to device hipMemcpy(devPtrArray, Array, arraySize * sizeof(float), hipMemcpyHostToDevice); // Launch the kernel. START_GPU; // Start timer for device code SumKernel<<<1, 512>>>(devPtrArray, devPtrSum, arraySize); STOP_GPU; // Stop timer for device code PRINT_GPU; // Print timing for device code CUDA_ERROR_CHECK // Copy the sum back from the GPU to the host hipMemcpy(&Sum, devPtrSum, sizeof(float), hipMemcpyDeviceToHost); printf("\nSum array elements multiplied by 2 (GPU) = %6.2f\n\n", Sum); // Free the memory allocated on the device hipFree(devPtrSum); hipFree(devPtrArray); // Free the memory allocated on the CPU free(Array); return 0; }
.text .file "reducedSum.hip" .globl _Z24__device_stub__SumKernelPfS_i # -- Begin function _Z24__device_stub__SumKernelPfS_i .p2align 4, 0x90 .type _Z24__device_stub__SumKernelPfS_i,@function _Z24__device_stub__SumKernelPfS_i: # @_Z24__device_stub__SumKernelPfS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9SumKernelPfS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z24__device_stub__SumKernelPfS_i, .Lfunc_end0-_Z24__device_stub__SumKernelPfS_i .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $128, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -16 movl $2048, %edi # imm = 0x800 callq malloc movq %rax, %rbx xorl %eax, %eax .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 addss %xmm0, %xmm0 movss %xmm0, (%rbx,%rax,4) incq %rax cmpq $512, %rax # imm = 0x200 jne .LBB1_1 # %bb.2: leaq 8(%rsp), %rdi movl $4, %esi callq hipMalloc leaq 16(%rsp), %rdi movl $2048, %esi # imm = 0x800 callq hipMalloc movq 16(%rsp), %rdi movl $2048, %edx # imm = 0x800 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movl $start, %edi callq hipEventCreate movl $stop, %edi callq hipEventCreate movq start(%rip), %rdi xorl %esi, %esi callq hipEventRecord movabsq $4294967297, %rdi # imm = 0x100000001 leaq 511(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: movq 16(%rsp), %rax movq 8(%rsp), %rcx movq %rax, 120(%rsp) movq %rcx, 112(%rsp) movl $512, 28(%rsp) # imm = 0x200 leaq 120(%rsp), %rax movq %rax, 32(%rsp) leaq 112(%rsp), %rax movq %rax, 40(%rsp) leaq 28(%rsp), %rax movq %rax, 48(%rsp) leaq 96(%rsp), %rdi leaq 80(%rsp), %rsi leaq 72(%rsp), %rdx leaq 64(%rsp), %rcx callq __hipPopCallConfiguration movq 96(%rsp), %rsi movl 104(%rsp), %edx movq 80(%rsp), %rcx movl 88(%rsp), %r8d leaq 32(%rsp), %r9 movl $_Z9SumKernelPfS_i, %edi pushq 64(%rsp) .cfi_adjust_cfa_offset 8 pushq 80(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: movq stop(%rip), %rdi xorl %esi, %esi callq hipEventRecord movq stop(%rip), %rdi callq hipEventSynchronize movq start(%rip), %rsi movq stop(%rip), %rdx movl $timeGPU, %edi callq hipEventElapsedTime movq start(%rip), %rdi callq hipEventDestroy movq stop(%rip), %rdi callq hipEventDestroy movss timeGPU(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf callq hipDeviceSynchronize callq hipGetLastError testl %eax, %eax jne .LBB1_6 # %bb.5: movl $.Lstr, %edi callq puts@PLT movq 8(%rsp), %rsi leaq 32(%rsp), %rdi movl $4, %edx movl $2, %ecx callq hipMemcpy movss 32(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.3, %edi movb $1, %al callq printf movq 8(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq %rbx, %rdi callq free xorl %eax, %eax addq $128, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .LBB1_6: .cfi_def_cfa_offset 144 movl %eax, %edi callq hipGetErrorString movl $.L.str.1, %edi movq %rax, %rsi xorl %eax, %eax callq printf movl $-1, %edi callq exit .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9SumKernelPfS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type timeGPU,@object # @timeGPU .bss .globl timeGPU .p2align 2, 0x0 timeGPU: .long 0x00000000 # float 0 .size timeGPU, 4 .type start,@object # @start .globl start .p2align 3, 0x0 start: .quad 0 .size start, 8 .type stop,@object # @stop .globl stop .p2align 3, 0x0 stop: .quad 0 .size stop, 8 .type _Z9SumKernelPfS_i,@object # @_Z9SumKernelPfS_i .section .rodata,"a",@progbits .globl _Z9SumKernelPfS_i .p2align 3, 0x0 _Z9SumKernelPfS_i: .quad _Z24__device_stub__SumKernelPfS_i .size _Z9SumKernelPfS_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "\n\nTime to compute on GPU: %f ms \n" .size .L.str, 34 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "***CUDA error: %s\n" .size .L.str.1, 19 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "\nSum array elements multiplied by 2 (GPU) = %6.2f\n\n" .size .L.str.3, 52 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9SumKernelPfS_i" .size .L__unnamed_1, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "\nNo CUDA errors detected" .size .Lstr, 25 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__SumKernelPfS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym timeGPU .addrsig_sym start .addrsig_sym stop .addrsig_sym _Z9SumKernelPfS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z9SumKernelPfS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0030*/ BSSY B0, 0xd0 ; /* 0x0000009000007945 */ /* 0x000fe40003800000 */ /*0040*/ STS [R0.X4], RZ ; /* 0x000000ff00007388 */ /* 0x0011e20000004800 */ /*0050*/ ISETP.GE.AND P0, PT, R0.reuse, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */ /* 0x040fe40003f06270 */ /*0060*/ SHF.L.U32 R2, R0, 0x2, RZ ; /* 0x0000000200027819 */ /* 0x000fd600000006ff */ /*0070*/ @P0 BRA 0xc0 ; /* 0x0000004000000947 */ /* 0x000fea0003800000 */ /*0080*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x001fc800078e00ff */ /*0090*/ IMAD.WIDE R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */ /* 0x000fcc00078e0205 */ /*00a0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ STS [R0.X4], R5 ; /* 0x0000000500007388 */ /* 0x0041e40000004800 */ /*00c0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x001fea0003800000 */ /*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*00e0*/ @P0 BRA 0x4f0 ; /* 0x0000040000000947 */ /* 0x000fea0003800000 */ /*00f0*/ LOP3.LUT R4, R2.reuse, 0x400, RZ, 0x3c, !PT ; /* 0x0000040002047812 */ /* 0x040fe200078e3cff */ /*0100*/ LDS R3, [R0.X4] ; /* 0x0000000000037984 */ /* 0x000fe20000004800 */ /*0110*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */ /* 0x000fe20003800000 */ /*0120*/ LOP3.LUT R5, R2, 0x200, RZ, 0x3c, !PT ; /* 0x0000020002057812 */ /* 0x000fc600078e3cff */ /*0130*/ LDS R4, [R4] ; /* 0x0000000004047984 */ /* 0x000e240000000800 */ /*0140*/ FADD R3, R3, R4 ; /* 0x0000000403037221 */ /* 0x001fe40000000000 */ /*0150*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0160*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */ /* 0x0001e80000004800 */ /*0170*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0180*/ LOP3.LUT R3, R2, 0x80, RZ, 0x3c, !PT ; /* 0x0000008002037812 */ /* 0x001fca00078e3cff */ /*0190*/ LDS R5, [R5] ; /* 0x0000000005057984 */ /* 0x000fe80000000800 */ /*01a0*/ LDS R6, [R0.X4] ; /* 0x0000000000067984 */ /* 0x000e240000004800 */ /*01b0*/ FADD R7, R5, R6 ; /* 0x0000000605077221 */ /* 0x001fe40000000000 */ /*01c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*01d0*/ LOP3.LUT R6, R2, 0x100, RZ, 0x3c, !PT ; /* 0x0000010002067812 */ /* 0x000fca00078e3cff */ /*01e0*/ STS [R0.X4], R7 ; /* 0x0000000700007388 */ /* 0x000fe80000004800 */ /*01f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0200*/ LDS R6, [R6] ; /* 0x0000000006067984 */ /* 0x000fe80000000800 */ /*0210*/ LDS R9, [R0.X4] ; /* 0x0000000000097984 */ /* 0x000e240000004800 */ /*0220*/ FADD R9, R6, R9 ; /* 0x0000000906097221 */ /* 0x001fc40000000000 */ /*0230*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0240*/ LOP3.LUT R6, R2, 0x20, RZ, 0x3c, !PT ; /* 0x0000002002067812 */ /* 0x000fca00078e3cff */ /*0250*/ STS [R0.X4], R9 ; /* 0x0000000900007388 */ /* 0x000fe80000004800 */ /*0260*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0270*/ LDS R3, [R3] ; /* 0x0000000003037984 */ /* 0x000fe80000000800 */ /*0280*/ LDS R4, [R0.X4] ; /* 0x0000000000047984 */ /* 0x000e240000004800 */ /*0290*/ FADD R5, R3, R4 ; /* 0x0000000403057221 */ /* 0x001fc40000000000 */ /*02a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*02b0*/ LOP3.LUT R4, R2, 0x40, RZ, 0x3c, !PT ; /* 0x0000004002047812 */ /* 0x000fca00078e3cff */ /*02c0*/ STS [R0.X4], R5 ; /* 0x0000000500007388 */ /* 0x0001e80000004800 */ /*02d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*02e0*/ LOP3.LUT R5, R2, 0x10, RZ, 0x3c, !PT ; /* 0x0000001002057812 */ /* 0x001fca00078e3cff */ /*02f0*/ LDS R4, [R4] ; /* 0x0000000004047984 */ /* 0x000fe80000000800 */ /*0300*/ LDS R7, [R0.X4] ; /* 0x0000000000077984 */ /* 0x000e240000004800 */ /*0310*/ FADD R7, R4, R7 ; /* 0x0000000704077221 */ /* 0x001fe40000000000 */ /*0320*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0330*/ STS [R0.X4], R7 ; /* 0x0000000700007388 */ /* 0x000fe80000004800 */ /*0340*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0350*/ LDS R6, [R6] ; /* 0x0000000006067984 */ /* 0x000fe80000000800 */ /*0360*/ LDS R3, [R0.X4] ; /* 0x0000000000037984 */ /* 0x000e240000004800 */ /*0370*/ FADD R3, R6, R3 ; /* 0x0000000306037221 */ /* 0x001fc40000000000 */ /*0380*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0390*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */ /* 0x000fe80000004800 */ /*03a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*03b0*/ LDS R5, [R5] ; /* 0x0000000005057984 */ /* 0x000fe80000000800 */ /*03c0*/ LDS R4, [R0.X4] ; /* 0x0000000000047984 */ /* 0x000e240000004800 */ /*03d0*/ FADD R7, R5, R4 ; /* 0x0000000405077221 */ /* 0x001fc40000000000 */ /*03e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*03f0*/ LOP3.LUT R4, R2.reuse, 0x8, RZ, 0x3c, !PT ; /* 0x0000000802047812 */ /* 0x040fe400078e3cff */ /*0400*/ LOP3.LUT R2, R2, 0x4, RZ, 0x3c, !PT ; /* 0x0000000402027812 */ /* 0x000fc600078e3cff */ /*0410*/ STS [R0.X4], R7 ; /* 0x0000000700007388 */ /* 0x000fe80000004800 */ /*0420*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0430*/ LDS R4, [R4] ; /* 0x0000000004047984 */ /* 0x000fe80000000800 */ /*0440*/ LDS R9, [R0.X4] ; /* 0x0000000000097984 */ /* 0x000e240000004800 */ /*0450*/ FADD R9, R4, R9 ; /* 0x0000000904097221 */ /* 0x001fc40000000000 */ /*0460*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0470*/ STS [R0.X4], R9 ; /* 0x0000000900007388 */ /* 0x000fe80000004800 */ /*0480*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0490*/ LDS R2, [R2] ; /* 0x0000000002027984 */ /* 0x000fe80000000800 */ /*04a0*/ LDS R3, [R0.X4] ; /* 0x0000000000037984 */ /* 0x000e240000004800 */ /*04b0*/ FADD R3, R2, R3 ; /* 0x0000000302037221 */ /* 0x001fc40000000000 */ /*04c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*04d0*/ STS [R0.X4], R3 ; /* 0x0000000300007388 */ /* 0x0001e80000004800 */ /*04e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*04f0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */ /* 0x000e620000000800 */ /*0500*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */ /* 0x000fe20000000f00 */ /*0510*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff037624 */ /* 0x001fca00078e00ff */ /*0520*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x002fe2000c101904 */ /*0530*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0540*/ BRA 0x540; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0580*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0590*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9SumKernelPfS_i .globl _Z9SumKernelPfS_i .p2align 8 .type _Z9SumKernelPfS_i,@function _Z9SumKernelPfS_i: s_load_b32 s2, s[0:1], 0x10 v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v0 ds_store_b32 v1, v2 s_waitcnt lgkmcnt(0) v_cmp_gt_i32_e32 vcc_lo, s2, v0 s_and_saveexec_b32 s2, vcc_lo s_cbranch_execz .LBB0_2 s_load_b64 s[4:5], s[0:1], 0x0 s_waitcnt lgkmcnt(0) global_load_b32 v2, v1, s[4:5] s_waitcnt vmcnt(0) ds_store_b32 v1, v2 .LBB0_2: s_or_b32 exec_lo, exec_lo, s2 s_movk_i32 s2, 0x100 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_branch .LBB0_4 .p2align 6 .LBB0_3: s_or_b32 exec_lo, exec_lo, s3 s_lshr_b32 s3, s2, 1 s_cmp_gt_u32 s2, 1 s_mov_b32 s2, s3 s_cbranch_scc0 .LBB0_6 .LBB0_4: s_and_saveexec_b32 s3, vcc_lo s_cbranch_execz .LBB0_3 v_xor_b32_e32 v2, s2, v0 s_delay_alu instid0(VALU_DEP_1) v_lshlrev_b32_e32 v2, 2, v2 ds_load_b32 v3, v1 ds_load_b32 v2, v2 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_add_f32_e32 v2, v3, v2 ds_store_b32 v1, v2 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_branch .LBB0_3 .LBB0_6: v_mov_b32_e32 v0, 0 s_load_b64 s[0:1], s[0:1], 0x8 ds_load_b32 v1, v0 s_waitcnt lgkmcnt(0) global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9SumKernelPfS_i .amdhsa_group_segment_fixed_size 2048 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 20 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 6 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9SumKernelPfS_i, .Lfunc_end0-_Z9SumKernelPfS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value .group_segment_fixed_size: 2048 .kernarg_segment_align: 8 .kernarg_segment_size: 20 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9SumKernelPfS_i .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z9SumKernelPfS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0005905e_00000000-6_reducedSum.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z12reductionSumiPf .type _Z12reductionSumiPf, @function _Z12reductionSumiPf: .LFB2057: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2057: .size _Z12reductionSumiPf, .-_Z12reductionSumiPf .globl _Z31__device_stub__Z9SumKernelPfS_iPfS_i .type _Z31__device_stub__Z9SumKernelPfS_iPfS_i, @function _Z31__device_stub__Z9SumKernelPfS_iPfS_i: .LFB2083: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L9 .L5: movq 120(%rsp), %rax subq %fs:40, %rax jne .L10 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L9: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9SumKernelPfS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L5 .L10: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z31__device_stub__Z9SumKernelPfS_iPfS_i, .-_Z31__device_stub__Z9SumKernelPfS_iPfS_i .globl _Z9SumKernelPfS_i .type _Z9SumKernelPfS_i, @function _Z9SumKernelPfS_i: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z9SumKernelPfS_iPfS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z9SumKernelPfS_i, .-_Z9SumKernelPfS_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "\n\nTime to compute on GPU: %f ms \n" .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "***CUDA error: %s\n" .LC2: .string "\nNo CUDA errors detected\n" .section .rodata.str1.8 .align 8 .LC3: .string "\nSum array elements multiplied by 2 (GPU) = %6.2f\n\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $48, %rsp .cfi_def_cfa_offset 64 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $2048, %edi call malloc@PLT movq %rax, %rbx movl $0, %eax .L14: pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 addss %xmm0, %xmm0 movss %xmm0, (%rbx,%rax,4) addq $1, %rax cmpq $512, %rax jne .L14 leaq 8(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movq %rsp, %rdi movl $2048, %esi call cudaMalloc@PLT movl $1, %ecx movl $2048, %edx movq %rbx, %rsi movq (%rsp), %rdi call cudaMemcpy@PLT leaq start(%rip), %rdi call cudaEventCreate@PLT leaq stop(%rip), %rdi call cudaEventCreate@PLT movl $0, %esi movq start(%rip), %rdi call cudaEventRecord@PLT movl $512, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 16(%rsp) movl $1, 20(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L20 .L15: movl $0, %esi movq stop(%rip), %rdi call cudaEventRecord@PLT movq stop(%rip), %rdi call cudaEventSynchronize@PLT movq stop(%rip), %rdx movq start(%rip), %rsi leaq timeGPU(%rip), %rdi call cudaEventElapsedTime@PLT movq start(%rip), %rdi call cudaEventDestroy@PLT movq stop(%rip), %rdi call cudaEventDestroy@PLT pxor %xmm0, %xmm0 cvtss2sd timeGPU(%rip), %xmm0 leaq .LC0(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT call cudaDeviceSynchronize@PLT call cudaGetLastError@PLT testl %eax, %eax jne .L21 leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 28(%rsp), %rdi movl $2, %ecx movl $4, %edx movq 8(%rsp), %rsi call cudaMemcpy@PLT pxor %xmm0, %xmm0 cvtss2sd 28(%rsp), %xmm0 leaq .LC3(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq (%rsp), %rdi call cudaFree@PLT movq %rbx, %rdi call free@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L22 movl $0, %eax addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state movl $512, %edx movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z31__device_stub__Z9SumKernelPfS_iPfS_i jmp .L15 .L21: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $-1, %edi call exit@PLT .L22: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.1 .LC4: .string "_Z9SumKernelPfS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z9SumKernelPfS_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .globl stop .bss .align 8 .type stop, @object .size stop, 8 stop: .zero 8 .globl start .align 8 .type start, @object .size start, 8 start: .zero 8 .globl timeGPU .align 4 .type timeGPU, @object .size timeGPU, 4 timeGPU: .zero 4 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "reducedSum.hip" .globl _Z24__device_stub__SumKernelPfS_i # -- Begin function _Z24__device_stub__SumKernelPfS_i .p2align 4, 0x90 .type _Z24__device_stub__SumKernelPfS_i,@function _Z24__device_stub__SumKernelPfS_i: # @_Z24__device_stub__SumKernelPfS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9SumKernelPfS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z24__device_stub__SumKernelPfS_i, .Lfunc_end0-_Z24__device_stub__SumKernelPfS_i .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $128, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -16 movl $2048, %edi # imm = 0x800 callq malloc movq %rax, %rbx xorl %eax, %eax .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 addss %xmm0, %xmm0 movss %xmm0, (%rbx,%rax,4) incq %rax cmpq $512, %rax # imm = 0x200 jne .LBB1_1 # %bb.2: leaq 8(%rsp), %rdi movl $4, %esi callq hipMalloc leaq 16(%rsp), %rdi movl $2048, %esi # imm = 0x800 callq hipMalloc movq 16(%rsp), %rdi movl $2048, %edx # imm = 0x800 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movl $start, %edi callq hipEventCreate movl $stop, %edi callq hipEventCreate movq start(%rip), %rdi xorl %esi, %esi callq hipEventRecord movabsq $4294967297, %rdi # imm = 0x100000001 leaq 511(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: movq 16(%rsp), %rax movq 8(%rsp), %rcx movq %rax, 120(%rsp) movq %rcx, 112(%rsp) movl $512, 28(%rsp) # imm = 0x200 leaq 120(%rsp), %rax movq %rax, 32(%rsp) leaq 112(%rsp), %rax movq %rax, 40(%rsp) leaq 28(%rsp), %rax movq %rax, 48(%rsp) leaq 96(%rsp), %rdi leaq 80(%rsp), %rsi leaq 72(%rsp), %rdx leaq 64(%rsp), %rcx callq __hipPopCallConfiguration movq 96(%rsp), %rsi movl 104(%rsp), %edx movq 80(%rsp), %rcx movl 88(%rsp), %r8d leaq 32(%rsp), %r9 movl $_Z9SumKernelPfS_i, %edi pushq 64(%rsp) .cfi_adjust_cfa_offset 8 pushq 80(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: movq stop(%rip), %rdi xorl %esi, %esi callq hipEventRecord movq stop(%rip), %rdi callq hipEventSynchronize movq start(%rip), %rsi movq stop(%rip), %rdx movl $timeGPU, %edi callq hipEventElapsedTime movq start(%rip), %rdi callq hipEventDestroy movq stop(%rip), %rdi callq hipEventDestroy movss timeGPU(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf callq hipDeviceSynchronize callq hipGetLastError testl %eax, %eax jne .LBB1_6 # %bb.5: movl $.Lstr, %edi callq puts@PLT movq 8(%rsp), %rsi leaq 32(%rsp), %rdi movl $4, %edx movl $2, %ecx callq hipMemcpy movss 32(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.3, %edi movb $1, %al callq printf movq 8(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq %rbx, %rdi callq free xorl %eax, %eax addq $128, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .LBB1_6: .cfi_def_cfa_offset 144 movl %eax, %edi callq hipGetErrorString movl $.L.str.1, %edi movq %rax, %rsi xorl %eax, %eax callq printf movl $-1, %edi callq exit .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9SumKernelPfS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type timeGPU,@object # @timeGPU .bss .globl timeGPU .p2align 2, 0x0 timeGPU: .long 0x00000000 # float 0 .size timeGPU, 4 .type start,@object # @start .globl start .p2align 3, 0x0 start: .quad 0 .size start, 8 .type stop,@object # @stop .globl stop .p2align 3, 0x0 stop: .quad 0 .size stop, 8 .type _Z9SumKernelPfS_i,@object # @_Z9SumKernelPfS_i .section .rodata,"a",@progbits .globl _Z9SumKernelPfS_i .p2align 3, 0x0 _Z9SumKernelPfS_i: .quad _Z24__device_stub__SumKernelPfS_i .size _Z9SumKernelPfS_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "\n\nTime to compute on GPU: %f ms \n" .size .L.str, 34 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "***CUDA error: %s\n" .size .L.str.1, 19 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "\nSum array elements multiplied by 2 (GPU) = %6.2f\n\n" .size .L.str.3, 52 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9SumKernelPfS_i" .size .L__unnamed_1, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "\nNo CUDA errors detected" .size .Lstr, 25 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__SumKernelPfS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym timeGPU .addrsig_sym start .addrsig_sym stop .addrsig_sym _Z9SumKernelPfS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" // customDllFunctions.cu ////////////////////////// // Template to write .dlls ////////////////////////// /* Include the following directories for the program to run appropriately: /////////////////////// in the VC++ directories: $(VC_IncludePath); $(WindowsSDK_IncludePath); C:\ProgramData\NVIDIA Corporation\CUDA Samples\v9.0\common\inc; $(CUDA_INC_PATH) C:\Program Files\National Instruments\LabVIEW 2015\cintools //////////////////////// CUDA/C/C++ directories: ./ ../../common/inc $(CudaToolkitDir)/include //////////////////////////////// Linker/General include libraries: cudart.lib //changed the target machine platform from 32 to 64 bit */ //////////////////////////////////////////////////////////////////////////////// // Complex operations, //////////////////////////////////////////////////////////////////////////////// __global__ void ScaleArray(float *d_a, float alpha, int arraySize) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; float temp; for (int i = threadID; i < arraySize; i += numThreads) { temp = d_a[i]; d_a[i] = alpha*temp; } }
code for sm_80 Function : _Z10ScaleArrayPffi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e280000002500 */ /*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */ /* 0x001fca00078e0200 */ /*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x16c], PT ; /* 0x00005b0003007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */ /* 0x000fe20000000f00 */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0080*/ BSSY B0, 0x2f0 ; /* 0x0000026000007945 */ /* 0x000fe60003800000 */ /*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */ /* 0x000fc800078e02ff */ /*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */ /* 0x000e220000209000 */ /*00b0*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */ /* 0x000fe200078e0a00 */ /*00c0*/ IADD3 R2, R0.reuse, R3, RZ ; /* 0x0000000300027210 */ /* 0x040fe40007ffe0ff */ /*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe40003f45070 */ /*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */ /* 0x000fc800078e33ff */ /*00f0*/ IADD3 R7, R7, c[0x0][0x16c], R0 ; /* 0x00005b0007077a10 */ /* 0x000fe20007ffe000 */ /*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */ /* 0x001e240000001000 */ /*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */ /* 0x001fcc0007ffe0ff */ /*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */ /* 0x000064000021f000 */ /*0130*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */ /* 0x001fe400078e00ff */ /*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */ /* 0x002fc800078e02ff */ /*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */ /* 0x000fcc00078e0004 */ /*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */ /* 0x000fca00078e00ff */ /*0170*/ IADD3 R4, -R2, RZ, RZ ; /* 0x000000ff02047210 */ /* 0x000fca0007ffe1ff */ /*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */ /* 0x000fca00078e0207 */ /*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */ /* 0x000fda0003f06070 */ /*01a0*/ @P0 IMAD.IADD R7, R7, 0x1, -R0 ; /* 0x0000000107070824 */ /* 0x000fe200078e0a00 */ /*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */ /* 0x000fc80007ffe0ff */ /*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */ /* 0x000fda0003f26070 */ /*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */ /* 0x000fe40007ffe0ff */ /*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */ /* 0x000fc800078e33ff */ /*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */ /* 0x040fe40007ffe0ff */ /*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */ /* 0x000fe40003f26070 */ /*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */ /* 0x000fda000780c0ff */ /*0220*/ @!P0 BRA 0x2e0 ; /* 0x000000b000008947 */ /* 0x000fea0003800000 */ /*0230*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */ /* 0x000fe200000001ff */ /*0240*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */ /* 0x000fd200078e0004 */ /*0250*/ IMAD.WIDE R4, R3, R6, c[0x0][0x160] ; /* 0x0000580003047625 */ /* 0x000fca00078e0206 */ /*0260*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */ /* 0x000ea2000c1e1900 */ /*0270*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */ /* 0x000fe40007ffe0ff */ /*0280*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */ /* 0x000fe40007ffe0ff */ /*0290*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fe20003f05270 */ /*02a0*/ FMUL R7, R6, c[0x0][0x168] ; /* 0x00005a0006077a20 */ /* 0x004fca0000400000 */ /*02b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x0001e4000c101904 */ /*02c0*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */ /* 0x001fca00078e0204 */ /*02d0*/ @P0 BRA 0x260 ; /* 0xffffff8000000947 */ /* 0x000fea000383ffff */ /*02e0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*02f0*/ @!P1 EXIT ; /* 0x000000000000994d */ /* 0x000fea0003800000 */ /*0300*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */ /* 0x001fc800078e00ff */ /*0310*/ IMAD.WIDE R4, R3, R4, c[0x0][0x160] ; /* 0x0000580003047625 */ /* 0x000fca00078e0204 */ /*0320*/ LDG.E R2, [R4.64] ; /* 0x0000000404027981 */ /* 0x000ea2000c1e1900 */ /*0330*/ IMAD.WIDE R6, R0, 0x4, R4 ; /* 0x0000000400067825 */ /* 0x000fc800078e0204 */ /*0340*/ FMUL R13, R2, c[0x0][0x168] ; /* 0x00005a00020d7a20 */ /* 0x004fca0000400000 */ /*0350*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */ /* 0x0001e8000c101904 */ /*0360*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */ /* 0x000ea2000c1e1900 */ /*0370*/ IMAD.WIDE R8, R0, 0x4, R6 ; /* 0x0000000400087825 */ /* 0x000fc800078e0206 */ /*0380*/ FMUL R15, R2, c[0x0][0x168] ; /* 0x00005a00020f7a20 */ /* 0x004fca0000400000 */ /*0390*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0001e8000c101904 */ /*03a0*/ LDG.E R2, [R8.64] ; /* 0x0000000408027981 */ /* 0x000ea2000c1e1900 */ /*03b0*/ IMAD.WIDE R10, R0, 0x4, R8 ; /* 0x00000004000a7825 */ /* 0x000fc800078e0208 */ /*03c0*/ FMUL R17, R2, c[0x0][0x168] ; /* 0x00005a0002117a20 */ /* 0x004fca0000400000 */ /*03d0*/ STG.E [R8.64], R17 ; /* 0x0000001108007986 */ /* 0x0001e8000c101904 */ /*03e0*/ LDG.E R2, [R10.64] ; /* 0x000000040a027981 */ /* 0x000ea2000c1e1900 */ /*03f0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */ /* 0x000fc80007ffe000 */ /*0400*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */ /* 0x000fc80007ffe000 */ /*0410*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x16c], PT ; /* 0x00005b0003007a0c */ /* 0x000fe20003f06270 */ /*0420*/ FMUL R19, R2, c[0x0][0x168] ; /* 0x00005a0002137a20 */ /* 0x004fca0000400000 */ /*0430*/ STG.E [R10.64], R19 ; /* 0x000000130a007986 */ /* 0x0001ee000c101904 */ /*0440*/ @!P0 BRA 0x300 ; /* 0xfffffeb000008947 */ /* 0x000fea000383ffff */ /*0450*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0460*/ BRA 0x460; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0470*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0480*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0490*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" // customDllFunctions.cu ////////////////////////// // Template to write .dlls ////////////////////////// /* Include the following directories for the program to run appropriately: /////////////////////// in the VC++ directories: $(VC_IncludePath); $(WindowsSDK_IncludePath); C:\ProgramData\NVIDIA Corporation\CUDA Samples\v9.0\common\inc; $(CUDA_INC_PATH) C:\Program Files\National Instruments\LabVIEW 2015\cintools //////////////////////// CUDA/C/C++ directories: ./ ../../common/inc $(CudaToolkitDir)/include //////////////////////////////// Linker/General include libraries: cudart.lib //changed the target machine platform from 32 to 64 bit */ //////////////////////////////////////////////////////////////////////////////// // Complex operations, //////////////////////////////////////////////////////////////////////////////// __global__ void ScaleArray(float *d_a, float alpha, int arraySize) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; float temp; for (int i = threadID; i < arraySize; i += numThreads) { temp = d_a[i]; d_a[i] = alpha*temp; } }
.file "tmpxft_001489d9_00000000-6_ScaleArray.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z32__device_stub__Z10ScaleArrayPffiPffi .type _Z32__device_stub__Z10ScaleArrayPffiPffi, @function _Z32__device_stub__Z10ScaleArrayPffiPffi: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movss %xmm0, 4(%rsp) movl %esi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z10ScaleArrayPffi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z32__device_stub__Z10ScaleArrayPffiPffi, .-_Z32__device_stub__Z10ScaleArrayPffiPffi .globl _Z10ScaleArrayPffi .type _Z10ScaleArrayPffi, @function _Z10ScaleArrayPffi: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z10ScaleArrayPffiPffi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z10ScaleArrayPffi, .-_Z10ScaleArrayPffi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z10ScaleArrayPffi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z10ScaleArrayPffi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" // customDllFunctions.cu ////////////////////////// // Template to write .dlls ////////////////////////// /* Include the following directories for the program to run appropriately: /////////////////////// in the VC++ directories: $(VC_IncludePath); $(WindowsSDK_IncludePath); C:\ProgramData\NVIDIA Corporation\CUDA Samples\v9.0\common\inc; $(CUDA_INC_PATH) C:\Program Files\National Instruments\LabVIEW 2015\cintools //////////////////////// CUDA/C/C++ directories: ./ ../../common/inc $(CudaToolkitDir)/include //////////////////////////////// Linker/General include libraries: cudart.lib //changed the target machine platform from 32 to 64 bit */ //////////////////////////////////////////////////////////////////////////////// // Complex operations, //////////////////////////////////////////////////////////////////////////////// __global__ void ScaleArray(float *d_a, float alpha, int arraySize) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; float temp; for (int i = threadID; i < arraySize; i += numThreads) { temp = d_a[i]; d_a[i] = alpha*temp; } }
#include <hip/hip_runtime.h> #include "includes.h" // customDllFunctions.cu ////////////////////////// // Template to write .dlls ////////////////////////// /* Include the following directories for the program to run appropriately: /////////////////////// in the VC++ directories: $(VC_IncludePath); $(WindowsSDK_IncludePath); C:\ProgramData\NVIDIA Corporation\CUDA Samples\v9.0\common\inc; $(CUDA_INC_PATH) C:\Program Files\National Instruments\LabVIEW 2015\cintools //////////////////////// CUDA/C/C++ directories: ./ ../../common/inc $(CudaToolkitDir)/include //////////////////////////////// Linker/General include libraries: cudart.lib //changed the target machine platform from 32 to 64 bit */ //////////////////////////////////////////////////////////////////////////////// // Complex operations, //////////////////////////////////////////////////////////////////////////////// __global__ void ScaleArray(float *d_a, float alpha, int arraySize) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; float temp; for (int i = threadID; i < arraySize; i += numThreads) { temp = d_a[i]; d_a[i] = alpha*temp; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" // customDllFunctions.cu ////////////////////////// // Template to write .dlls ////////////////////////// /* Include the following directories for the program to run appropriately: /////////////////////// in the VC++ directories: $(VC_IncludePath); $(WindowsSDK_IncludePath); C:\ProgramData\NVIDIA Corporation\CUDA Samples\v9.0\common\inc; $(CUDA_INC_PATH) C:\Program Files\National Instruments\LabVIEW 2015\cintools //////////////////////// CUDA/C/C++ directories: ./ ../../common/inc $(CudaToolkitDir)/include //////////////////////////////// Linker/General include libraries: cudart.lib //changed the target machine platform from 32 to 64 bit */ //////////////////////////////////////////////////////////////////////////////// // Complex operations, //////////////////////////////////////////////////////////////////////////////// __global__ void ScaleArray(float *d_a, float alpha, int arraySize) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; float temp; for (int i = threadID; i < arraySize; i += numThreads) { temp = d_a[i]; d_a[i] = alpha*temp; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10ScaleArrayPffi .globl _Z10ScaleArrayPffi .p2align 8 .type _Z10ScaleArrayPffi,@function _Z10ScaleArrayPffi: s_clause 0x1 s_load_b32 s4, s[0:1], 0x1c s_load_b32 s6, s[0:1], 0xc s_add_u32 s2, s0, 16 s_addc_u32 s3, s1, 0 s_mov_b32 s5, exec_lo s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1] v_cmpx_gt_i32_e64 s6, v1 s_cbranch_execz .LBB0_3 s_load_b32 s2, s[2:3], 0x0 s_clause 0x1 s_load_b64 s[8:9], s[0:1], 0x0 s_load_b32 s1, s[0:1], 0x8 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] s_waitcnt lgkmcnt(0) s_mul_i32 s2, s2, s4 v_add_co_u32 v2, vcc_lo, s8, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo s_ashr_i32 s3, s2, 31 s_lshl_b64 s[4:5], s[2:3], 2 s_mov_b32 s3, 0 .LBB0_2: global_load_b32 v0, v[2:3], off s_waitcnt vmcnt(0) v_dual_mul_f32 v0, s1, v0 :: v_dual_add_nc_u32 v1, s2, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_cmp_le_i32_e32 vcc_lo, s6, v1 global_store_b32 v[2:3], v0, off v_add_co_u32 v2, s0, v2, s4 v_add_co_ci_u32_e64 v3, s0, s5, v3, s0 s_or_b32 s3, vcc_lo, s3 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s3 s_cbranch_execnz .LBB0_2 .LBB0_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10ScaleArrayPffi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10ScaleArrayPffi, .Lfunc_end0-_Z10ScaleArrayPffi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10ScaleArrayPffi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10ScaleArrayPffi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" // customDllFunctions.cu ////////////////////////// // Template to write .dlls ////////////////////////// /* Include the following directories for the program to run appropriately: /////////////////////// in the VC++ directories: $(VC_IncludePath); $(WindowsSDK_IncludePath); C:\ProgramData\NVIDIA Corporation\CUDA Samples\v9.0\common\inc; $(CUDA_INC_PATH) C:\Program Files\National Instruments\LabVIEW 2015\cintools //////////////////////// CUDA/C/C++ directories: ./ ../../common/inc $(CudaToolkitDir)/include //////////////////////////////// Linker/General include libraries: cudart.lib //changed the target machine platform from 32 to 64 bit */ //////////////////////////////////////////////////////////////////////////////// // Complex operations, //////////////////////////////////////////////////////////////////////////////// __global__ void ScaleArray(float *d_a, float alpha, int arraySize) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; float temp; for (int i = threadID; i < arraySize; i += numThreads) { temp = d_a[i]; d_a[i] = alpha*temp; } }
.text .file "ScaleArray.hip" .globl _Z25__device_stub__ScaleArrayPffi # -- Begin function _Z25__device_stub__ScaleArrayPffi .p2align 4, 0x90 .type _Z25__device_stub__ScaleArrayPffi,@function _Z25__device_stub__ScaleArrayPffi: # @_Z25__device_stub__ScaleArrayPffi .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movss %xmm0, 4(%rsp) movl %esi, (%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) movq %rsp, %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z10ScaleArrayPffi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z25__device_stub__ScaleArrayPffi, .Lfunc_end0-_Z25__device_stub__ScaleArrayPffi .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10ScaleArrayPffi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z10ScaleArrayPffi,@object # @_Z10ScaleArrayPffi .section .rodata,"a",@progbits .globl _Z10ScaleArrayPffi .p2align 3, 0x0 _Z10ScaleArrayPffi: .quad _Z25__device_stub__ScaleArrayPffi .size _Z10ScaleArrayPffi, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z10ScaleArrayPffi" .size .L__unnamed_1, 19 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__ScaleArrayPffi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10ScaleArrayPffi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z10ScaleArrayPffi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e280000002500 */ /*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */ /* 0x001fca00078e0200 */ /*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x16c], PT ; /* 0x00005b0003007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */ /* 0x000fe20000000f00 */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0080*/ BSSY B0, 0x2f0 ; /* 0x0000026000007945 */ /* 0x000fe60003800000 */ /*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */ /* 0x000fc800078e02ff */ /*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */ /* 0x000e220000209000 */ /*00b0*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */ /* 0x000fe200078e0a00 */ /*00c0*/ IADD3 R2, R0.reuse, R3, RZ ; /* 0x0000000300027210 */ /* 0x040fe40007ffe0ff */ /*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe40003f45070 */ /*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */ /* 0x000fc800078e33ff */ /*00f0*/ IADD3 R7, R7, c[0x0][0x16c], R0 ; /* 0x00005b0007077a10 */ /* 0x000fe20007ffe000 */ /*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */ /* 0x001e240000001000 */ /*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */ /* 0x001fcc0007ffe0ff */ /*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */ /* 0x000064000021f000 */ /*0130*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */ /* 0x001fe400078e00ff */ /*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */ /* 0x002fc800078e02ff */ /*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */ /* 0x000fcc00078e0004 */ /*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */ /* 0x000fca00078e00ff */ /*0170*/ IADD3 R4, -R2, RZ, RZ ; /* 0x000000ff02047210 */ /* 0x000fca0007ffe1ff */ /*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */ /* 0x000fca00078e0207 */ /*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */ /* 0x000fda0003f06070 */ /*01a0*/ @P0 IMAD.IADD R7, R7, 0x1, -R0 ; /* 0x0000000107070824 */ /* 0x000fe200078e0a00 */ /*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */ /* 0x000fc80007ffe0ff */ /*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */ /* 0x000fda0003f26070 */ /*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */ /* 0x000fe40007ffe0ff */ /*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */ /* 0x000fc800078e33ff */ /*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */ /* 0x040fe40007ffe0ff */ /*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */ /* 0x000fe40003f26070 */ /*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */ /* 0x000fda000780c0ff */ /*0220*/ @!P0 BRA 0x2e0 ; /* 0x000000b000008947 */ /* 0x000fea0003800000 */ /*0230*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */ /* 0x000fe200000001ff */ /*0240*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */ /* 0x000fd200078e0004 */ /*0250*/ IMAD.WIDE R4, R3, R6, c[0x0][0x160] ; /* 0x0000580003047625 */ /* 0x000fca00078e0206 */ /*0260*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */ /* 0x000ea2000c1e1900 */ /*0270*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */ /* 0x000fe40007ffe0ff */ /*0280*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */ /* 0x000fe40007ffe0ff */ /*0290*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fe20003f05270 */ /*02a0*/ FMUL R7, R6, c[0x0][0x168] ; /* 0x00005a0006077a20 */ /* 0x004fca0000400000 */ /*02b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x0001e4000c101904 */ /*02c0*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */ /* 0x001fca00078e0204 */ /*02d0*/ @P0 BRA 0x260 ; /* 0xffffff8000000947 */ /* 0x000fea000383ffff */ /*02e0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*02f0*/ @!P1 EXIT ; /* 0x000000000000994d */ /* 0x000fea0003800000 */ /*0300*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */ /* 0x001fc800078e00ff */ /*0310*/ IMAD.WIDE R4, R3, R4, c[0x0][0x160] ; /* 0x0000580003047625 */ /* 0x000fca00078e0204 */ /*0320*/ LDG.E R2, [R4.64] ; /* 0x0000000404027981 */ /* 0x000ea2000c1e1900 */ /*0330*/ IMAD.WIDE R6, R0, 0x4, R4 ; /* 0x0000000400067825 */ /* 0x000fc800078e0204 */ /*0340*/ FMUL R13, R2, c[0x0][0x168] ; /* 0x00005a00020d7a20 */ /* 0x004fca0000400000 */ /*0350*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */ /* 0x0001e8000c101904 */ /*0360*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */ /* 0x000ea2000c1e1900 */ /*0370*/ IMAD.WIDE R8, R0, 0x4, R6 ; /* 0x0000000400087825 */ /* 0x000fc800078e0206 */ /*0380*/ FMUL R15, R2, c[0x0][0x168] ; /* 0x00005a00020f7a20 */ /* 0x004fca0000400000 */ /*0390*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0001e8000c101904 */ /*03a0*/ LDG.E R2, [R8.64] ; /* 0x0000000408027981 */ /* 0x000ea2000c1e1900 */ /*03b0*/ IMAD.WIDE R10, R0, 0x4, R8 ; /* 0x00000004000a7825 */ /* 0x000fc800078e0208 */ /*03c0*/ FMUL R17, R2, c[0x0][0x168] ; /* 0x00005a0002117a20 */ /* 0x004fca0000400000 */ /*03d0*/ STG.E [R8.64], R17 ; /* 0x0000001108007986 */ /* 0x0001e8000c101904 */ /*03e0*/ LDG.E R2, [R10.64] ; /* 0x000000040a027981 */ /* 0x000ea2000c1e1900 */ /*03f0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */ /* 0x000fc80007ffe000 */ /*0400*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */ /* 0x000fc80007ffe000 */ /*0410*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x16c], PT ; /* 0x00005b0003007a0c */ /* 0x000fe20003f06270 */ /*0420*/ FMUL R19, R2, c[0x0][0x168] ; /* 0x00005a0002137a20 */ /* 0x004fca0000400000 */ /*0430*/ STG.E [R10.64], R19 ; /* 0x000000130a007986 */ /* 0x0001ee000c101904 */ /*0440*/ @!P0 BRA 0x300 ; /* 0xfffffeb000008947 */ /* 0x000fea000383ffff */ /*0450*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0460*/ BRA 0x460; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0470*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0480*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0490*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*04f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10ScaleArrayPffi .globl _Z10ScaleArrayPffi .p2align 8 .type _Z10ScaleArrayPffi,@function _Z10ScaleArrayPffi: s_clause 0x1 s_load_b32 s4, s[0:1], 0x1c s_load_b32 s6, s[0:1], 0xc s_add_u32 s2, s0, 16 s_addc_u32 s3, s1, 0 s_mov_b32 s5, exec_lo s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1] v_cmpx_gt_i32_e64 s6, v1 s_cbranch_execz .LBB0_3 s_load_b32 s2, s[2:3], 0x0 s_clause 0x1 s_load_b64 s[8:9], s[0:1], 0x0 s_load_b32 s1, s[0:1], 0x8 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] s_waitcnt lgkmcnt(0) s_mul_i32 s2, s2, s4 v_add_co_u32 v2, vcc_lo, s8, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo s_ashr_i32 s3, s2, 31 s_lshl_b64 s[4:5], s[2:3], 2 s_mov_b32 s3, 0 .LBB0_2: global_load_b32 v0, v[2:3], off s_waitcnt vmcnt(0) v_dual_mul_f32 v0, s1, v0 :: v_dual_add_nc_u32 v1, s2, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_cmp_le_i32_e32 vcc_lo, s6, v1 global_store_b32 v[2:3], v0, off v_add_co_u32 v2, s0, v2, s4 v_add_co_ci_u32_e64 v3, s0, s5, v3, s0 s_or_b32 s3, vcc_lo, s3 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s3 s_cbranch_execnz .LBB0_2 .LBB0_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10ScaleArrayPffi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10ScaleArrayPffi, .Lfunc_end0-_Z10ScaleArrayPffi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10ScaleArrayPffi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10ScaleArrayPffi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001489d9_00000000-6_ScaleArray.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z32__device_stub__Z10ScaleArrayPffiPffi .type _Z32__device_stub__Z10ScaleArrayPffiPffi, @function _Z32__device_stub__Z10ScaleArrayPffiPffi: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movss %xmm0, 4(%rsp) movl %esi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z10ScaleArrayPffi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z32__device_stub__Z10ScaleArrayPffiPffi, .-_Z32__device_stub__Z10ScaleArrayPffiPffi .globl _Z10ScaleArrayPffi .type _Z10ScaleArrayPffi, @function _Z10ScaleArrayPffi: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z10ScaleArrayPffiPffi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z10ScaleArrayPffi, .-_Z10ScaleArrayPffi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z10ScaleArrayPffi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z10ScaleArrayPffi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "ScaleArray.hip" .globl _Z25__device_stub__ScaleArrayPffi # -- Begin function _Z25__device_stub__ScaleArrayPffi .p2align 4, 0x90 .type _Z25__device_stub__ScaleArrayPffi,@function _Z25__device_stub__ScaleArrayPffi: # @_Z25__device_stub__ScaleArrayPffi .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movss %xmm0, 4(%rsp) movl %esi, (%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) movq %rsp, %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z10ScaleArrayPffi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z25__device_stub__ScaleArrayPffi, .Lfunc_end0-_Z25__device_stub__ScaleArrayPffi .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10ScaleArrayPffi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z10ScaleArrayPffi,@object # @_Z10ScaleArrayPffi .section .rodata,"a",@progbits .globl _Z10ScaleArrayPffi .p2align 3, 0x0 _Z10ScaleArrayPffi: .quad _Z25__device_stub__ScaleArrayPffi .size _Z10ScaleArrayPffi, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z10ScaleArrayPffi" .size .L__unnamed_1, 19 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__ScaleArrayPffi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10ScaleArrayPffi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdlib.h> #include <string.h> #include <time.h> #include <stdio.h> #include <math.h> #include <cuda_runtime.h> __global__ void sumArraysOnGpu(float *A, float *B, float *C, int fatorUnroll) { unsigned int idx = blockIdx.x * blockDim.x * fatorUnroll + threadIdx.x; for (int i = 1; i <= fatorUnroll; i++) { int index = idx + fatorUnroll; C[index] = A[index] + B[index]; } } void initialData(float *ip, int size){ // generate different seed for random number time_t t; srand((unsigned int) time (&t) - ip[0]); for (int i=0; i<size; i++){ ip[i] = (float)(rand() & 0xFF) / 10.0f; } } void linearData(float *input, int size) { for (int i = 0; i < size; i++) { input[i] = i + (size / (1024 * 1e3)); } } int main(int argc, char **argv){ int expoente = atoi(argv[1]); // Primeiro argumento é o expoente onde 2^X = tamanho do elemento int threads = atoi(argv[2]); // Segundo argumento é o numero de threads int fatorUnroll = atoi(argv[3]); // Terceiro argumento é o fator de unroll size_t nBytes = (2 << (expoente + 1)) / sizeof(float); int nElem = nBytes / sizeof(float); float *h_A, *h_B, *h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); initialData(h_A, nElem); linearData(h_B, nElem); printf("Quantidade de elementos: %d \n Quantidade de MB: %lu MB\n\n", nElem, (nBytes / (1024*1024))); float *d_A, *d_B, *d_C; cudaMalloc((float**)&d_A, nBytes); cudaMalloc((float**)&d_B, nBytes); cudaMalloc((float**)&d_C, nBytes); // Use cudaMemcpy to transfer the data from the host memory to the GPU global memory with the // parameter cudaMemcpyHostToDevice specifying the transfer direction. cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); sumArraysOnGpu<<<(nElem / fatorUnroll) / threads, threads>>>(d_A, d_B, d_C, fatorUnroll); cudaMemcpy(h_C, d_C, nBytes, cudaMemcpyDeviceToHost); free(h_A); free(h_B); free(h_C); // use cudaFree to release the memory used on the GPU cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaDeviceReset(); return (0); }
code for sm_80 Function : _Z14sumArraysOnGpuPfS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ MOV R2, c[0x0][0x178] ; /* 0x00005e0000027a02 */ /* 0x000fc80000000f00 */ /*0020*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */ /* 0x000fda0003f06270 */ /*0030*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0050*/ IADD3 R4, R2, -0x1, RZ ; /* 0xffffffff02047810 */ /* 0x000fe20007ffe0ff */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0080*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e620000002500 */ /*0090*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fe40003f06070 */ /*00a0*/ IADD3 R0, R0, c[0x0][0x178], RZ ; /* 0x00005e0000007a10 */ /* 0x001fe20007ffe0ff */ /*00b0*/ IMAD R3, R3, c[0x0][0x0], RZ ; /* 0x0000000003037a24 */ /* 0x002fc800078e02ff */ /*00c0*/ IMAD R6, R3, c[0x0][0x178], R0 ; /* 0x00005e0003067a24 */ /* 0x000fe200078e0200 */ /*00d0*/ LOP3.LUT R0, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302007812 */ /* 0x000fc600078ec0ff */ /*00e0*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fc800078e0207 */ /*00f0*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x000fc800078e0207 */ /*0100*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fe200078e0207 */ /*0110*/ @!P0 BRA 0x950 ; /* 0x0000083000008947 */ /* 0x000fea0003800000 */ /*0120*/ IADD3 R8, -R0, c[0x0][0x178], RZ ; /* 0x00005e0000087a10 */ /* 0x000fc80007ffe1ff */ /*0130*/ ISETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */ /* 0x000fda0003f04270 */ /*0140*/ @!P0 BRA 0x820 ; /* 0x000006d000008947 */ /* 0x000fea0003800000 */ /*0150*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */ /* 0x000fe40003f24270 */ /*0160*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*0170*/ @!P1 BRA 0x5c0 ; /* 0x0000044000009947 */ /* 0x000fea0003800000 */ /*0180*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0190*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001ea8000c1e1900 */ /*01a0*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000ea4000c1e1900 */ /*01b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x004fca0000000000 */ /*01c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*01d0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*01e0*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x000ea4000c1e1900 */ /*01f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x004fca0000000000 */ /*0200*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0003e8000c101904 */ /*0210*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*0220*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x000ea4000c1e1900 */ /*0230*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x004fca0000000000 */ /*0240*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0005e8000c101904 */ /*0250*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ee8000c1e1900 */ /*0260*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x000ee4000c1e1900 */ /*0270*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x008fca0000000000 */ /*0280*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0007e8000c101904 */ /*0290*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001f28000c1e1900 */ /*02a0*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000f24000c1e1900 */ /*02b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x010fca0000000000 */ /*02c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*02d0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*02e0*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x002f24000c1e1900 */ /*02f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x010fca0000000000 */ /*0300*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0003e8000c101904 */ /*0310*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0320*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x004f24000c1e1900 */ /*0330*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x010fca0000000000 */ /*0340*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0005e8000c101904 */ /*0350*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0360*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x008f24000c1e1900 */ /*0370*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x010fca0000000000 */ /*0380*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0007e8000c101904 */ /*0390*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001f28000c1e1900 */ /*03a0*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000f24000c1e1900 */ /*03b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x010fca0000000000 */ /*03c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*03d0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*03e0*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x002f24000c1e1900 */ /*03f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x010fca0000000000 */ /*0400*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0003e8000c101904 */ /*0410*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0420*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x004f24000c1e1900 */ /*0430*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x010fca0000000000 */ /*0440*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0005e8000c101904 */ /*0450*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0460*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x008f24000c1e1900 */ /*0470*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x010fca0000000000 */ /*0480*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0007e8000c101904 */ /*0490*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001f28000c1e1900 */ /*04a0*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000f24000c1e1900 */ /*04b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x010fca0000000000 */ /*04c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*04d0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*04e0*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x002f24000c1e1900 */ /*04f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x010fca0000000000 */ /*0500*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0001e8000c101904 */ /*0510*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0520*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x004f22000c1e1900 */ /*0530*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */ /* 0x000fe20007ffe0ff */ /*0540*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x010fca0000000000 */ /*0550*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0001e8000c101904 */ /*0560*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*0570*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x008ea2000c1e1900 */ /*0580*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */ /* 0x000fe20003f24270 */ /*0590*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x004fca0000000000 */ /*05a0*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0001ee000c101904 */ /*05b0*/ @P1 BRA 0x190 ; /* 0xfffffbd000001947 */ /* 0x000fea000383ffff */ /*05c0*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */ /* 0x000fda0003f24270 */ /*05d0*/ @!P1 BRA 0x800 ; /* 0x0000022000009947 */ /* 0x000fea0003800000 */ /*05e0*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001ea8000c1e1900 */ /*05f0*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000ea4000c1e1900 */ /*0600*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x004fca0000000000 */ /*0610*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*0620*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*0630*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x000ea4000c1e1900 */ /*0640*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x004fca0000000000 */ /*0650*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0003e8000c101904 */ /*0660*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*0670*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x000ea4000c1e1900 */ /*0680*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x004fca0000000000 */ /*0690*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0005e8000c101904 */ /*06a0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ee8000c1e1900 */ /*06b0*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x000ee4000c1e1900 */ /*06c0*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x008fca0000000000 */ /*06d0*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0007e8000c101904 */ /*06e0*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001f28000c1e1900 */ /*06f0*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000f24000c1e1900 */ /*0700*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x010fca0000000000 */ /*0710*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*0720*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0730*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x002f24000c1e1900 */ /*0740*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x010fca0000000000 */ /*0750*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0001e8000c101904 */ /*0760*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0770*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x004f24000c1e1900 */ /*0780*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x010fca0000000000 */ /*0790*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0001e8000c101904 */ /*07a0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*07b0*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x008ea2000c1e1900 */ /*07c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*07d0*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */ /* 0x000fe20007ffe0ff */ /*07e0*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x004fca0000000000 */ /*07f0*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0001e8000c101904 */ /*0800*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */ /* 0x000fda0000705670 */ /*0810*/ @!P0 BRA 0x950 ; /* 0x0000013000008947 */ /* 0x000fea0003800000 */ /*0820*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001ea8000c1e1900 */ /*0830*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000ea4000c1e1900 */ /*0840*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x004fca0000000000 */ /*0850*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*0860*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*0870*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x000ea4000c1e1900 */ /*0880*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x004fca0000000000 */ /*0890*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0001e8000c101904 */ /*08a0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*08b0*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x000ea2000c1e1900 */ /*08c0*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */ /* 0x000fe20007ffe0ff */ /*08d0*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x004fca0000000000 */ /*08e0*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0001e8000c101904 */ /*08f0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*0900*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x000ea2000c1e1900 */ /*0910*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */ /* 0x000fe20003f05270 */ /*0920*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x004fca0000000000 */ /*0930*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0001ee000c101904 */ /*0940*/ @P0 BRA 0x820 ; /* 0xfffffed000000947 */ /* 0x001fea000383ffff */ /*0950*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fda0003f05270 */ /*0960*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0970*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */ /* 0x000ea8000c1e1900 */ /*0980*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */ /* 0x001ea2000c1e1900 */ /*0990*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */ /* 0x000fc80007ffe0ff */ /*09a0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe20003f05270 */ /*09b0*/ FADD R9, R8, R9 ; /* 0x0000000908097221 */ /* 0x004fca0000000000 */ /*09c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001ee000c101904 */ /*09d0*/ @P0 BRA 0x970 ; /* 0xffffff9000000947 */ /* 0x000fea000383ffff */ /*09e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*09f0*/ BRA 0x9f0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0a00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdlib.h> #include <string.h> #include <time.h> #include <stdio.h> #include <math.h> #include <cuda_runtime.h> __global__ void sumArraysOnGpu(float *A, float *B, float *C, int fatorUnroll) { unsigned int idx = blockIdx.x * blockDim.x * fatorUnroll + threadIdx.x; for (int i = 1; i <= fatorUnroll; i++) { int index = idx + fatorUnroll; C[index] = A[index] + B[index]; } } void initialData(float *ip, int size){ // generate different seed for random number time_t t; srand((unsigned int) time (&t) - ip[0]); for (int i=0; i<size; i++){ ip[i] = (float)(rand() & 0xFF) / 10.0f; } } void linearData(float *input, int size) { for (int i = 0; i < size; i++) { input[i] = i + (size / (1024 * 1e3)); } } int main(int argc, char **argv){ int expoente = atoi(argv[1]); // Primeiro argumento é o expoente onde 2^X = tamanho do elemento int threads = atoi(argv[2]); // Segundo argumento é o numero de threads int fatorUnroll = atoi(argv[3]); // Terceiro argumento é o fator de unroll size_t nBytes = (2 << (expoente + 1)) / sizeof(float); int nElem = nBytes / sizeof(float); float *h_A, *h_B, *h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); initialData(h_A, nElem); linearData(h_B, nElem); printf("Quantidade de elementos: %d \n Quantidade de MB: %lu MB\n\n", nElem, (nBytes / (1024*1024))); float *d_A, *d_B, *d_C; cudaMalloc((float**)&d_A, nBytes); cudaMalloc((float**)&d_B, nBytes); cudaMalloc((float**)&d_C, nBytes); // Use cudaMemcpy to transfer the data from the host memory to the GPU global memory with the // parameter cudaMemcpyHostToDevice specifying the transfer direction. cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); sumArraysOnGpu<<<(nElem / fatorUnroll) / threads, threads>>>(d_A, d_B, d_C, fatorUnroll); cudaMemcpy(h_C, d_C, nBytes, cudaMemcpyDeviceToHost); free(h_A); free(h_B); free(h_C); // use cudaFree to release the memory used on the GPU cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaDeviceReset(); return (0); }
.file "tmpxft_0006d61f_00000000-6_sumArrayFloat.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z11initialDataPfi .type _Z11initialDataPfi, @function _Z11initialDataPfi: .LFB2057: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $16, %rsp .cfi_def_cfa_offset 48 movq %rdi, %r12 movl %esi, %ebp movq %fs:40, %rax movq %rax, 8(%rsp) xorl %eax, %eax movq %rsp, %rdi call time@PLT movl %eax, %eax pxor %xmm0, %xmm0 cvtsi2ssq %rax, %xmm0 subss (%r12), %xmm0 cvttss2siq %xmm0, %rdi call srand@PLT testl %ebp, %ebp jle .L3 movq %r12, %rbx movslq %ebp, %rbp leaq (%r12,%rbp,4), %rbp .L7: call rand@PLT movzbl %al, %eax pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 divss .LC0(%rip), %xmm0 movss %xmm0, (%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L7 .L3: movq 8(%rsp), %rax subq %fs:40, %rax jne .L11 addq $16, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z11initialDataPfi, .-_Z11initialDataPfi .globl _Z10linearDataPfi .type _Z10linearDataPfi, @function _Z10linearDataPfi: .LFB2058: .cfi_startproc endbr64 testl %esi, %esi jle .L12 pxor %xmm1, %xmm1 cvtsi2sdl %esi, %xmm1 divsd .LC1(%rip), %xmm1 movslq %esi, %rsi movl $0, %eax .L14: pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 addsd %xmm1, %xmm0 cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%rdi,%rax,4) addq $1, %rax cmpq %rsi, %rax jne .L14 .L12: ret .cfi_endproc .LFE2058: .size _Z10linearDataPfi, .-_Z10linearDataPfi .globl _Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i .type _Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i, @function _Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i: .LFB2084: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L20 .L16: movq 136(%rsp), %rax subq %fs:40, %rax jne .L21 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14sumArraysOnGpuPfS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L16 .L21: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i, .-_Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i .globl _Z14sumArraysOnGpuPfS_S_i .type _Z14sumArraysOnGpuPfS_S_i, @function _Z14sumArraysOnGpuPfS_S_i: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z14sumArraysOnGpuPfS_S_i, .-_Z14sumArraysOnGpuPfS_S_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string "Quantidade de elementos: %d \n Quantidade de MB: %lu MB\n\n" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $88, %rsp .cfi_def_cfa_offset 144 movq %rsi, %rbp movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %rbx movq 16(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r15 movq 24(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movl %eax, 4(%rsp) leal 1(%rbx), %ecx movl $2, %r12d sall %cl, %r12d movslq %r12d, %r12 movq %r12, %rbp shrq $2, %rbp movq %r12, %rbx shrq $4, %rbx movq %rbp, %rdi call malloc@PLT movq %rax, %r14 movq %rbp, %rdi call malloc@PLT movq %rax, %r13 movq %rbp, %rdi call malloc@PLT movq %rax, 8(%rsp) movl %ebx, %esi movq %r14, %rdi call _Z11initialDataPfi movl %ebx, %esi movq %r13, %rdi call _Z10linearDataPfi movq %r12, %rcx shrq $22, %rcx movl %ebx, %edx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 24(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT leaq 32(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT leaq 40(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbp, %rdx movq %r14, %rsi movq 24(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbp, %rdx movq %r13, %rsi movq 32(%rsp), %rdi call cudaMemcpy@PLT movl %r15d, 60(%rsp) movl $1, 64(%rsp) movl %ebx, %eax cltd idivl 4(%rsp) cltd idivl %r15d movl %eax, 48(%rsp) movl $1, 52(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L28 .L25: movl $2, %ecx movq %rbp, %rdx movq 40(%rsp), %rsi movq 8(%rsp), %rbx movq %rbx, %rdi call cudaMemcpy@PLT movq %r14, %rdi call free@PLT movq %r13, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cudaFree@PLT call cudaDeviceReset@PLT movq 72(%rsp), %rax subq %fs:40, %rax jne .L29 movl $0, %eax addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L28: .cfi_restore_state movl 4(%rsp), %ecx movq 40(%rsp), %rdx movq 32(%rsp), %rsi movq 24(%rsp), %rdi call _Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i jmp .L25 .L29: call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC3: .string "_Z14sumArraysOnGpuPfS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z14sumArraysOnGpuPfS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1092616192 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1093615616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdlib.h> #include <string.h> #include <time.h> #include <stdio.h> #include <math.h> #include <cuda_runtime.h> __global__ void sumArraysOnGpu(float *A, float *B, float *C, int fatorUnroll) { unsigned int idx = blockIdx.x * blockDim.x * fatorUnroll + threadIdx.x; for (int i = 1; i <= fatorUnroll; i++) { int index = idx + fatorUnroll; C[index] = A[index] + B[index]; } } void initialData(float *ip, int size){ // generate different seed for random number time_t t; srand((unsigned int) time (&t) - ip[0]); for (int i=0; i<size; i++){ ip[i] = (float)(rand() & 0xFF) / 10.0f; } } void linearData(float *input, int size) { for (int i = 0; i < size; i++) { input[i] = i + (size / (1024 * 1e3)); } } int main(int argc, char **argv){ int expoente = atoi(argv[1]); // Primeiro argumento é o expoente onde 2^X = tamanho do elemento int threads = atoi(argv[2]); // Segundo argumento é o numero de threads int fatorUnroll = atoi(argv[3]); // Terceiro argumento é o fator de unroll size_t nBytes = (2 << (expoente + 1)) / sizeof(float); int nElem = nBytes / sizeof(float); float *h_A, *h_B, *h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); initialData(h_A, nElem); linearData(h_B, nElem); printf("Quantidade de elementos: %d \n Quantidade de MB: %lu MB\n\n", nElem, (nBytes / (1024*1024))); float *d_A, *d_B, *d_C; cudaMalloc((float**)&d_A, nBytes); cudaMalloc((float**)&d_B, nBytes); cudaMalloc((float**)&d_C, nBytes); // Use cudaMemcpy to transfer the data from the host memory to the GPU global memory with the // parameter cudaMemcpyHostToDevice specifying the transfer direction. cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); sumArraysOnGpu<<<(nElem / fatorUnroll) / threads, threads>>>(d_A, d_B, d_C, fatorUnroll); cudaMemcpy(h_C, d_C, nBytes, cudaMemcpyDeviceToHost); free(h_A); free(h_B); free(h_C); // use cudaFree to release the memory used on the GPU cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaDeviceReset(); return (0); }
#include <stdlib.h> #include <string.h> #include <time.h> #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> __global__ void sumArraysOnGpu(float *A, float *B, float *C, int fatorUnroll) { unsigned int idx = blockIdx.x * blockDim.x * fatorUnroll + threadIdx.x; for (int i = 1; i <= fatorUnroll; i++) { int index = idx + fatorUnroll; C[index] = A[index] + B[index]; } } void initialData(float *ip, int size){ // generate different seed for random number time_t t; srand((unsigned int) time (&t) - ip[0]); for (int i=0; i<size; i++){ ip[i] = (float)(rand() & 0xFF) / 10.0f; } } void linearData(float *input, int size) { for (int i = 0; i < size; i++) { input[i] = i + (size / (1024 * 1e3)); } } int main(int argc, char **argv){ int expoente = atoi(argv[1]); // Primeiro argumento é o expoente onde 2^X = tamanho do elemento int threads = atoi(argv[2]); // Segundo argumento é o numero de threads int fatorUnroll = atoi(argv[3]); // Terceiro argumento é o fator de unroll size_t nBytes = (2 << (expoente + 1)) / sizeof(float); int nElem = nBytes / sizeof(float); float *h_A, *h_B, *h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); initialData(h_A, nElem); linearData(h_B, nElem); printf("Quantidade de elementos: %d \n Quantidade de MB: %lu MB\n\n", nElem, (nBytes / (1024*1024))); float *d_A, *d_B, *d_C; hipMalloc((float**)&d_A, nBytes); hipMalloc((float**)&d_B, nBytes); hipMalloc((float**)&d_C, nBytes); // Use cudaMemcpy to transfer the data from the host memory to the GPU global memory with the // parameter cudaMemcpyHostToDevice specifying the transfer direction. hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); sumArraysOnGpu<<<(nElem / fatorUnroll) / threads, threads>>>(d_A, d_B, d_C, fatorUnroll); hipMemcpy(h_C, d_C, nBytes, hipMemcpyDeviceToHost); free(h_A); free(h_B); free(h_C); // use cudaFree to release the memory used on the GPU hipFree(d_A); hipFree(d_B); hipFree(d_C); hipDeviceReset(); return (0); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <stdlib.h> #include <string.h> #include <time.h> #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> __global__ void sumArraysOnGpu(float *A, float *B, float *C, int fatorUnroll) { unsigned int idx = blockIdx.x * blockDim.x * fatorUnroll + threadIdx.x; for (int i = 1; i <= fatorUnroll; i++) { int index = idx + fatorUnroll; C[index] = A[index] + B[index]; } } void initialData(float *ip, int size){ // generate different seed for random number time_t t; srand((unsigned int) time (&t) - ip[0]); for (int i=0; i<size; i++){ ip[i] = (float)(rand() & 0xFF) / 10.0f; } } void linearData(float *input, int size) { for (int i = 0; i < size; i++) { input[i] = i + (size / (1024 * 1e3)); } } int main(int argc, char **argv){ int expoente = atoi(argv[1]); // Primeiro argumento é o expoente onde 2^X = tamanho do elemento int threads = atoi(argv[2]); // Segundo argumento é o numero de threads int fatorUnroll = atoi(argv[3]); // Terceiro argumento é o fator de unroll size_t nBytes = (2 << (expoente + 1)) / sizeof(float); int nElem = nBytes / sizeof(float); float *h_A, *h_B, *h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); initialData(h_A, nElem); linearData(h_B, nElem); printf("Quantidade de elementos: %d \n Quantidade de MB: %lu MB\n\n", nElem, (nBytes / (1024*1024))); float *d_A, *d_B, *d_C; hipMalloc((float**)&d_A, nBytes); hipMalloc((float**)&d_B, nBytes); hipMalloc((float**)&d_C, nBytes); // Use cudaMemcpy to transfer the data from the host memory to the GPU global memory with the // parameter cudaMemcpyHostToDevice specifying the transfer direction. hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); sumArraysOnGpu<<<(nElem / fatorUnroll) / threads, threads>>>(d_A, d_B, d_C, fatorUnroll); hipMemcpy(h_C, d_C, nBytes, hipMemcpyDeviceToHost); free(h_A); free(h_B); free(h_C); // use cudaFree to release the memory used on the GPU hipFree(d_A); hipFree(d_B); hipFree(d_C); hipDeviceReset(); return (0); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z14sumArraysOnGpuPfS_S_i .globl _Z14sumArraysOnGpuPfS_S_i .p2align 8 .type _Z14sumArraysOnGpuPfS_S_i,@function _Z14sumArraysOnGpuPfS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_3 s_clause 0x1 s_load_b32 s3, s[0:1], 0x2c s_load_b128 s[4:7], s[0:1], 0x0 s_mul_i32 s8, s15, s2 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s8, s8, s3 v_add3_u32 v0, s8, s2, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 v_lshlrev_b64 v[4:5], 2, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s4, v4 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v5, vcc_lo v_add_co_u32 v2, vcc_lo, s6, v4 v_add_co_ci_u32_e32 v3, vcc_lo, s7, v5, vcc_lo v_add_co_u32 v4, vcc_lo, s0, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo .LBB0_2: global_load_b32 v6, v[0:1], off global_load_b32 v7, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add_f32_e32 v6, v6, v7 global_store_b32 v[4:5], v6, off s_cbranch_scc0 .LBB0_2 .LBB0_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14sumArraysOnGpuPfS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z14sumArraysOnGpuPfS_S_i, .Lfunc_end0-_Z14sumArraysOnGpuPfS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14sumArraysOnGpuPfS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14sumArraysOnGpuPfS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdlib.h> #include <string.h> #include <time.h> #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> __global__ void sumArraysOnGpu(float *A, float *B, float *C, int fatorUnroll) { unsigned int idx = blockIdx.x * blockDim.x * fatorUnroll + threadIdx.x; for (int i = 1; i <= fatorUnroll; i++) { int index = idx + fatorUnroll; C[index] = A[index] + B[index]; } } void initialData(float *ip, int size){ // generate different seed for random number time_t t; srand((unsigned int) time (&t) - ip[0]); for (int i=0; i<size; i++){ ip[i] = (float)(rand() & 0xFF) / 10.0f; } } void linearData(float *input, int size) { for (int i = 0; i < size; i++) { input[i] = i + (size / (1024 * 1e3)); } } int main(int argc, char **argv){ int expoente = atoi(argv[1]); // Primeiro argumento é o expoente onde 2^X = tamanho do elemento int threads = atoi(argv[2]); // Segundo argumento é o numero de threads int fatorUnroll = atoi(argv[3]); // Terceiro argumento é o fator de unroll size_t nBytes = (2 << (expoente + 1)) / sizeof(float); int nElem = nBytes / sizeof(float); float *h_A, *h_B, *h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); initialData(h_A, nElem); linearData(h_B, nElem); printf("Quantidade de elementos: %d \n Quantidade de MB: %lu MB\n\n", nElem, (nBytes / (1024*1024))); float *d_A, *d_B, *d_C; hipMalloc((float**)&d_A, nBytes); hipMalloc((float**)&d_B, nBytes); hipMalloc((float**)&d_C, nBytes); // Use cudaMemcpy to transfer the data from the host memory to the GPU global memory with the // parameter cudaMemcpyHostToDevice specifying the transfer direction. hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); sumArraysOnGpu<<<(nElem / fatorUnroll) / threads, threads>>>(d_A, d_B, d_C, fatorUnroll); hipMemcpy(h_C, d_C, nBytes, hipMemcpyDeviceToHost); free(h_A); free(h_B); free(h_C); // use cudaFree to release the memory used on the GPU hipFree(d_A); hipFree(d_B); hipFree(d_C); hipDeviceReset(); return (0); }
.text .file "sumArrayFloat.hip" .globl _Z29__device_stub__sumArraysOnGpuPfS_S_i # -- Begin function _Z29__device_stub__sumArraysOnGpuPfS_S_i .p2align 4, 0x90 .type _Z29__device_stub__sumArraysOnGpuPfS_S_i,@function _Z29__device_stub__sumArraysOnGpuPfS_S_i: # @_Z29__device_stub__sumArraysOnGpuPfS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14sumArraysOnGpuPfS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z29__device_stub__sumArraysOnGpuPfS_S_i, .Lfunc_end0-_Z29__device_stub__sumArraysOnGpuPfS_S_i .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z11initialDataPfi .LCPI1_0: .long 0x41200000 # float 10 .text .globl _Z11initialDataPfi .p2align 4, 0x90 .type _Z11initialDataPfi,@function _Z11initialDataPfi: # @_Z11initialDataPfi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %esi, %ebp movq %rdi, %rbx movq %rsp, %rdi callq time movl %eax, %eax cvtsi2ss %rax, %xmm0 subss (%rbx), %xmm0 cvttss2si %xmm0, %rdi # kill: def $edi killed $edi killed $rdi callq srand testl %ebp, %ebp jle .LBB1_3 # %bb.1: # %.lr.ph.preheader movl %ebp, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand movzbl %al, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 divss .LCPI1_0(%rip), %xmm0 movss %xmm0, (%rbx,%r15,4) incq %r15 cmpq %r15, %r14 jne .LBB1_2 .LBB1_3: # %._crit_edge addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z11initialDataPfi, .Lfunc_end1-_Z11initialDataPfi .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z10linearDataPfi .LCPI2_0: .quad 0x412f400000000000 # double 1024000 .text .globl _Z10linearDataPfi .p2align 4, 0x90 .type _Z10linearDataPfi,@function _Z10linearDataPfi: # @_Z10linearDataPfi .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB2_3 # %bb.1: # %.lr.ph cvtsi2sd %esi, %xmm0 divsd .LCPI2_0(%rip), %xmm0 movl %esi, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_2: # =>This Inner Loop Header: Depth=1 xorps %xmm1, %xmm1 cvtsi2sd %ecx, %xmm1 addsd %xmm0, %xmm1 cvtsd2ss %xmm1, %xmm1 movss %xmm1, (%rdi,%rcx,4) incq %rcx cmpq %rcx, %rax jne .LBB2_2 .LBB2_3: # %._crit_edge retq .Lfunc_end2: .size _Z10linearDataPfi, .Lfunc_end2-_Z10linearDataPfi .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI3_0: .long 0x41200000 # float 10 .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI3_1: .quad 0x412f400000000000 # double 1024000 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $184, %rsp .cfi_def_cfa_offset 240 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rsi, %rbx movq 8(%rsi), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r14 movq 16(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, 56(%rsp) # 8-byte Spill movq 24(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, 64(%rsp) # 8-byte Spill incb %r14b movl $2, %eax movl %r14d, %ecx shll %cl, %eax cltq movq %rax, %rbx shrq $2, %rbx movq %rax, 48(%rsp) # 8-byte Spill movl %eax, %r13d sarl $4, %r13d movq %rbx, %rdi callq malloc movq %rax, %r14 movq %rbx, %rdi callq malloc movq %rax, %r15 movq %rbx, %rdi callq malloc movq %rax, %r12 leaq 144(%rsp), %rdi callq time movl %eax, %eax cvtsi2ss %rax, %xmm0 subss (%r14), %xmm0 cvttss2si %xmm0, %rdi # kill: def $edi killed $edi killed $rdi callq srand testl %r13d, %r13d jle .LBB3_6 # %bb.1: # %.lr.ph.preheader.i movq %r12, 40(%rsp) # 8-byte Spill movl %r13d, %ebp xorl %r12d, %r12d .p2align 4, 0x90 .LBB3_2: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 callq rand movzbl %al, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 divss .LCPI3_0(%rip), %xmm0 movss %xmm0, (%r14,%r12,4) incq %r12 cmpq %r12, %rbp jne .LBB3_2 # %bb.3: # %_Z11initialDataPfi.exit testl %r13d, %r13d movq 40(%rsp), %r12 # 8-byte Reload jle .LBB3_6 # %bb.4: # %.lr.ph.i32 xorps %xmm0, %xmm0 cvtsi2sd %r13d, %xmm0 divsd .LCPI3_1(%rip), %xmm0 xorl %eax, %eax .p2align 4, 0x90 .LBB3_5: # =>This Inner Loop Header: Depth=1 xorps %xmm1, %xmm1 cvtsi2sd %eax, %xmm1 addsd %xmm0, %xmm1 cvtsd2ss %xmm1, %xmm1 movss %xmm1, (%r15,%rax,4) incq %rax cmpq %rax, %rbp jne .LBB3_5 .LBB3_6: # %_Z10linearDataPfi.exit movq 48(%rsp), %rdx # 8-byte Reload shrq $22, %rdx movl $.L.str, %edi movl %r13d, %esi xorl %eax, %eax callq printf leaq 24(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 16(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 8(%rsp), %rdi movq %rbx, %rsi callq hipMalloc movq 24(%rsp), %rdi movq %r14, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movq %r15, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movl %r13d, %eax cltd movq 64(%rsp), %r13 # 8-byte Reload idivl %r13d cltd movq 56(%rsp), %rsi # 8-byte Reload idivl %esi # kill: def $eax killed $eax def $rax movabsq $4294967296, %rcx # imm = 0x100000000 leaq (%rax,%rcx), %rdi movl %esi, %edx orq %rcx, %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_8 # %bb.7: movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 136(%rsp) movq %rcx, 128(%rsp) movq %rdx, 120(%rsp) movl %r13d, 36(%rsp) leaq 136(%rsp), %rax movq %rax, 144(%rsp) leaq 128(%rsp), %rax movq %rax, 152(%rsp) leaq 120(%rsp), %rax movq %rax, 160(%rsp) leaq 36(%rsp), %rax movq %rax, 168(%rsp) leaq 104(%rsp), %rdi leaq 88(%rsp), %rsi leaq 80(%rsp), %rdx leaq 72(%rsp), %rcx callq __hipPopCallConfiguration movq 104(%rsp), %rsi movl 112(%rsp), %edx movq 88(%rsp), %rcx movl 96(%rsp), %r8d leaq 144(%rsp), %r9 movl $_Z14sumArraysOnGpuPfS_S_i, %edi pushq 72(%rsp) .cfi_adjust_cfa_offset 8 pushq 88(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_8: movq 8(%rsp), %rsi movq %r12, %rdi movq %rbx, %rdx movl $2, %ecx callq hipMemcpy movq %r14, %rdi callq free movq %r15, %rdi callq free movq %r12, %rdi callq free movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree callq hipDeviceReset xorl %eax, %eax addq $184, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14sumArraysOnGpuPfS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z14sumArraysOnGpuPfS_S_i,@object # @_Z14sumArraysOnGpuPfS_S_i .section .rodata,"a",@progbits .globl _Z14sumArraysOnGpuPfS_S_i .p2align 3, 0x0 _Z14sumArraysOnGpuPfS_S_i: .quad _Z29__device_stub__sumArraysOnGpuPfS_S_i .size _Z14sumArraysOnGpuPfS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Quantidade de elementos: %d \n Quantidade de MB: %lu MB\n\n" .size .L.str, 57 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z14sumArraysOnGpuPfS_S_i" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z29__device_stub__sumArraysOnGpuPfS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z14sumArraysOnGpuPfS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z14sumArraysOnGpuPfS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ MOV R2, c[0x0][0x178] ; /* 0x00005e0000027a02 */ /* 0x000fc80000000f00 */ /*0020*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */ /* 0x000fda0003f06270 */ /*0030*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0050*/ IADD3 R4, R2, -0x1, RZ ; /* 0xffffffff02047810 */ /* 0x000fe20007ffe0ff */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0080*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e620000002500 */ /*0090*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */ /* 0x000fe40003f06070 */ /*00a0*/ IADD3 R0, R0, c[0x0][0x178], RZ ; /* 0x00005e0000007a10 */ /* 0x001fe20007ffe0ff */ /*00b0*/ IMAD R3, R3, c[0x0][0x0], RZ ; /* 0x0000000003037a24 */ /* 0x002fc800078e02ff */ /*00c0*/ IMAD R6, R3, c[0x0][0x178], R0 ; /* 0x00005e0003067a24 */ /* 0x000fe200078e0200 */ /*00d0*/ LOP3.LUT R0, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302007812 */ /* 0x000fc600078ec0ff */ /*00e0*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fc800078e0207 */ /*00f0*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x000fc800078e0207 */ /*0100*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fe200078e0207 */ /*0110*/ @!P0 BRA 0x950 ; /* 0x0000083000008947 */ /* 0x000fea0003800000 */ /*0120*/ IADD3 R8, -R0, c[0x0][0x178], RZ ; /* 0x00005e0000087a10 */ /* 0x000fc80007ffe1ff */ /*0130*/ ISETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */ /* 0x000fda0003f04270 */ /*0140*/ @!P0 BRA 0x820 ; /* 0x000006d000008947 */ /* 0x000fea0003800000 */ /*0150*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */ /* 0x000fe40003f24270 */ /*0160*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*0170*/ @!P1 BRA 0x5c0 ; /* 0x0000044000009947 */ /* 0x000fea0003800000 */ /*0180*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0190*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001ea8000c1e1900 */ /*01a0*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000ea4000c1e1900 */ /*01b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x004fca0000000000 */ /*01c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*01d0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*01e0*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x000ea4000c1e1900 */ /*01f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x004fca0000000000 */ /*0200*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0003e8000c101904 */ /*0210*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*0220*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x000ea4000c1e1900 */ /*0230*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x004fca0000000000 */ /*0240*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0005e8000c101904 */ /*0250*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ee8000c1e1900 */ /*0260*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x000ee4000c1e1900 */ /*0270*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x008fca0000000000 */ /*0280*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0007e8000c101904 */ /*0290*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001f28000c1e1900 */ /*02a0*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000f24000c1e1900 */ /*02b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x010fca0000000000 */ /*02c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*02d0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*02e0*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x002f24000c1e1900 */ /*02f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x010fca0000000000 */ /*0300*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0003e8000c101904 */ /*0310*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0320*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x004f24000c1e1900 */ /*0330*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x010fca0000000000 */ /*0340*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0005e8000c101904 */ /*0350*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0360*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x008f24000c1e1900 */ /*0370*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x010fca0000000000 */ /*0380*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0007e8000c101904 */ /*0390*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001f28000c1e1900 */ /*03a0*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000f24000c1e1900 */ /*03b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x010fca0000000000 */ /*03c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*03d0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*03e0*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x002f24000c1e1900 */ /*03f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x010fca0000000000 */ /*0400*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0003e8000c101904 */ /*0410*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0420*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x004f24000c1e1900 */ /*0430*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x010fca0000000000 */ /*0440*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0005e8000c101904 */ /*0450*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0460*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x008f24000c1e1900 */ /*0470*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x010fca0000000000 */ /*0480*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0007e8000c101904 */ /*0490*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001f28000c1e1900 */ /*04a0*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000f24000c1e1900 */ /*04b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x010fca0000000000 */ /*04c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*04d0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*04e0*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x002f24000c1e1900 */ /*04f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x010fca0000000000 */ /*0500*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0001e8000c101904 */ /*0510*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0520*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x004f22000c1e1900 */ /*0530*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */ /* 0x000fe20007ffe0ff */ /*0540*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x010fca0000000000 */ /*0550*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0001e8000c101904 */ /*0560*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*0570*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x008ea2000c1e1900 */ /*0580*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */ /* 0x000fe20003f24270 */ /*0590*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x004fca0000000000 */ /*05a0*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0001ee000c101904 */ /*05b0*/ @P1 BRA 0x190 ; /* 0xfffffbd000001947 */ /* 0x000fea000383ffff */ /*05c0*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */ /* 0x000fda0003f24270 */ /*05d0*/ @!P1 BRA 0x800 ; /* 0x0000022000009947 */ /* 0x000fea0003800000 */ /*05e0*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001ea8000c1e1900 */ /*05f0*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000ea4000c1e1900 */ /*0600*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x004fca0000000000 */ /*0610*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*0620*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*0630*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x000ea4000c1e1900 */ /*0640*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x004fca0000000000 */ /*0650*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0003e8000c101904 */ /*0660*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*0670*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x000ea4000c1e1900 */ /*0680*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x004fca0000000000 */ /*0690*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0005e8000c101904 */ /*06a0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ee8000c1e1900 */ /*06b0*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x000ee4000c1e1900 */ /*06c0*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x008fca0000000000 */ /*06d0*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0007e8000c101904 */ /*06e0*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001f28000c1e1900 */ /*06f0*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000f24000c1e1900 */ /*0700*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x010fca0000000000 */ /*0710*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*0720*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0730*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x002f24000c1e1900 */ /*0740*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x010fca0000000000 */ /*0750*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0001e8000c101904 */ /*0760*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000f28000c1e1900 */ /*0770*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x004f24000c1e1900 */ /*0780*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x010fca0000000000 */ /*0790*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0001e8000c101904 */ /*07a0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*07b0*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x008ea2000c1e1900 */ /*07c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*07d0*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */ /* 0x000fe20007ffe0ff */ /*07e0*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x004fca0000000000 */ /*07f0*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0001e8000c101904 */ /*0800*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */ /* 0x000fda0000705670 */ /*0810*/ @!P0 BRA 0x950 ; /* 0x0000013000008947 */ /* 0x000fea0003800000 */ /*0820*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */ /* 0x001ea8000c1e1900 */ /*0830*/ LDG.E R10, [R2.64] ; /* 0x00000004020a7981 */ /* 0x000ea4000c1e1900 */ /*0840*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */ /* 0x004fca0000000000 */ /*0850*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e8000c101904 */ /*0860*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*0870*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */ /* 0x000ea4000c1e1900 */ /*0880*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */ /* 0x004fca0000000000 */ /*0890*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */ /* 0x0001e8000c101904 */ /*08a0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*08b0*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */ /* 0x000ea2000c1e1900 */ /*08c0*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */ /* 0x000fe20007ffe0ff */ /*08d0*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */ /* 0x004fca0000000000 */ /*08e0*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x0001e8000c101904 */ /*08f0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */ /* 0x000ea8000c1e1900 */ /*0900*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */ /* 0x000ea2000c1e1900 */ /*0910*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */ /* 0x000fe20003f05270 */ /*0920*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */ /* 0x004fca0000000000 */ /*0930*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */ /* 0x0001ee000c101904 */ /*0940*/ @P0 BRA 0x820 ; /* 0xfffffed000000947 */ /* 0x001fea000383ffff */ /*0950*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fda0003f05270 */ /*0960*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0970*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */ /* 0x000ea8000c1e1900 */ /*0980*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */ /* 0x001ea2000c1e1900 */ /*0990*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */ /* 0x000fc80007ffe0ff */ /*09a0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x000fe20003f05270 */ /*09b0*/ FADD R9, R8, R9 ; /* 0x0000000908097221 */ /* 0x004fca0000000000 */ /*09c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001ee000c101904 */ /*09d0*/ @P0 BRA 0x970 ; /* 0xffffff9000000947 */ /* 0x000fea000383ffff */ /*09e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*09f0*/ BRA 0x9f0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0a00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0a70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z14sumArraysOnGpuPfS_S_i .globl _Z14sumArraysOnGpuPfS_S_i .p2align 8 .type _Z14sumArraysOnGpuPfS_S_i,@function _Z14sumArraysOnGpuPfS_S_i: s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_3 s_clause 0x1 s_load_b32 s3, s[0:1], 0x2c s_load_b128 s[4:7], s[0:1], 0x0 s_mul_i32 s8, s15, s2 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s8, s8, s3 v_add3_u32 v0, s8, s2, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 v_lshlrev_b64 v[4:5], 2, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s4, v4 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v5, vcc_lo v_add_co_u32 v2, vcc_lo, s6, v4 v_add_co_ci_u32_e32 v3, vcc_lo, s7, v5, vcc_lo v_add_co_u32 v4, vcc_lo, s0, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo .LBB0_2: global_load_b32 v6, v[0:1], off global_load_b32 v7, v[2:3], off s_add_i32 s2, s2, -1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) v_add_f32_e32 v6, v6, v7 global_store_b32 v[4:5], v6, off s_cbranch_scc0 .LBB0_2 .LBB0_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14sumArraysOnGpuPfS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z14sumArraysOnGpuPfS_S_i, .Lfunc_end0-_Z14sumArraysOnGpuPfS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14sumArraysOnGpuPfS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14sumArraysOnGpuPfS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0006d61f_00000000-6_sumArrayFloat.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z11initialDataPfi .type _Z11initialDataPfi, @function _Z11initialDataPfi: .LFB2057: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $16, %rsp .cfi_def_cfa_offset 48 movq %rdi, %r12 movl %esi, %ebp movq %fs:40, %rax movq %rax, 8(%rsp) xorl %eax, %eax movq %rsp, %rdi call time@PLT movl %eax, %eax pxor %xmm0, %xmm0 cvtsi2ssq %rax, %xmm0 subss (%r12), %xmm0 cvttss2siq %xmm0, %rdi call srand@PLT testl %ebp, %ebp jle .L3 movq %r12, %rbx movslq %ebp, %rbp leaq (%r12,%rbp,4), %rbp .L7: call rand@PLT movzbl %al, %eax pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 divss .LC0(%rip), %xmm0 movss %xmm0, (%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L7 .L3: movq 8(%rsp), %rax subq %fs:40, %rax jne .L11 addq $16, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z11initialDataPfi, .-_Z11initialDataPfi .globl _Z10linearDataPfi .type _Z10linearDataPfi, @function _Z10linearDataPfi: .LFB2058: .cfi_startproc endbr64 testl %esi, %esi jle .L12 pxor %xmm1, %xmm1 cvtsi2sdl %esi, %xmm1 divsd .LC1(%rip), %xmm1 movslq %esi, %rsi movl $0, %eax .L14: pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 addsd %xmm1, %xmm0 cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%rdi,%rax,4) addq $1, %rax cmpq %rsi, %rax jne .L14 .L12: ret .cfi_endproc .LFE2058: .size _Z10linearDataPfi, .-_Z10linearDataPfi .globl _Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i .type _Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i, @function _Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i: .LFB2084: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L20 .L16: movq 136(%rsp), %rax subq %fs:40, %rax jne .L21 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14sumArraysOnGpuPfS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L16 .L21: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i, .-_Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i .globl _Z14sumArraysOnGpuPfS_S_i .type _Z14sumArraysOnGpuPfS_S_i, @function _Z14sumArraysOnGpuPfS_S_i: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z14sumArraysOnGpuPfS_S_i, .-_Z14sumArraysOnGpuPfS_S_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string "Quantidade de elementos: %d \n Quantidade de MB: %lu MB\n\n" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $88, %rsp .cfi_def_cfa_offset 144 movq %rsi, %rbp movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %rbx movq 16(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r15 movq 24(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movl %eax, 4(%rsp) leal 1(%rbx), %ecx movl $2, %r12d sall %cl, %r12d movslq %r12d, %r12 movq %r12, %rbp shrq $2, %rbp movq %r12, %rbx shrq $4, %rbx movq %rbp, %rdi call malloc@PLT movq %rax, %r14 movq %rbp, %rdi call malloc@PLT movq %rax, %r13 movq %rbp, %rdi call malloc@PLT movq %rax, 8(%rsp) movl %ebx, %esi movq %r14, %rdi call _Z11initialDataPfi movl %ebx, %esi movq %r13, %rdi call _Z10linearDataPfi movq %r12, %rcx shrq $22, %rcx movl %ebx, %edx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 24(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT leaq 32(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT leaq 40(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbp, %rdx movq %r14, %rsi movq 24(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbp, %rdx movq %r13, %rsi movq 32(%rsp), %rdi call cudaMemcpy@PLT movl %r15d, 60(%rsp) movl $1, 64(%rsp) movl %ebx, %eax cltd idivl 4(%rsp) cltd idivl %r15d movl %eax, 48(%rsp) movl $1, 52(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L28 .L25: movl $2, %ecx movq %rbp, %rdx movq 40(%rsp), %rsi movq 8(%rsp), %rbx movq %rbx, %rdi call cudaMemcpy@PLT movq %r14, %rdi call free@PLT movq %r13, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cudaFree@PLT call cudaDeviceReset@PLT movq 72(%rsp), %rax subq %fs:40, %rax jne .L29 movl $0, %eax addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L28: .cfi_restore_state movl 4(%rsp), %ecx movq 40(%rsp), %rdx movq 32(%rsp), %rsi movq 24(%rsp), %rdi call _Z39__device_stub__Z14sumArraysOnGpuPfS_S_iPfS_S_i jmp .L25 .L29: call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC3: .string "_Z14sumArraysOnGpuPfS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z14sumArraysOnGpuPfS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1092616192 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1093615616 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "sumArrayFloat.hip" .globl _Z29__device_stub__sumArraysOnGpuPfS_S_i # -- Begin function _Z29__device_stub__sumArraysOnGpuPfS_S_i .p2align 4, 0x90 .type _Z29__device_stub__sumArraysOnGpuPfS_S_i,@function _Z29__device_stub__sumArraysOnGpuPfS_S_i: # @_Z29__device_stub__sumArraysOnGpuPfS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14sumArraysOnGpuPfS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z29__device_stub__sumArraysOnGpuPfS_S_i, .Lfunc_end0-_Z29__device_stub__sumArraysOnGpuPfS_S_i .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z11initialDataPfi .LCPI1_0: .long 0x41200000 # float 10 .text .globl _Z11initialDataPfi .p2align 4, 0x90 .type _Z11initialDataPfi,@function _Z11initialDataPfi: # @_Z11initialDataPfi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %esi, %ebp movq %rdi, %rbx movq %rsp, %rdi callq time movl %eax, %eax cvtsi2ss %rax, %xmm0 subss (%rbx), %xmm0 cvttss2si %xmm0, %rdi # kill: def $edi killed $edi killed $rdi callq srand testl %ebp, %ebp jle .LBB1_3 # %bb.1: # %.lr.ph.preheader movl %ebp, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand movzbl %al, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 divss .LCPI1_0(%rip), %xmm0 movss %xmm0, (%rbx,%r15,4) incq %r15 cmpq %r15, %r14 jne .LBB1_2 .LBB1_3: # %._crit_edge addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z11initialDataPfi, .Lfunc_end1-_Z11initialDataPfi .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z10linearDataPfi .LCPI2_0: .quad 0x412f400000000000 # double 1024000 .text .globl _Z10linearDataPfi .p2align 4, 0x90 .type _Z10linearDataPfi,@function _Z10linearDataPfi: # @_Z10linearDataPfi .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB2_3 # %bb.1: # %.lr.ph cvtsi2sd %esi, %xmm0 divsd .LCPI2_0(%rip), %xmm0 movl %esi, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_2: # =>This Inner Loop Header: Depth=1 xorps %xmm1, %xmm1 cvtsi2sd %ecx, %xmm1 addsd %xmm0, %xmm1 cvtsd2ss %xmm1, %xmm1 movss %xmm1, (%rdi,%rcx,4) incq %rcx cmpq %rcx, %rax jne .LBB2_2 .LBB2_3: # %._crit_edge retq .Lfunc_end2: .size _Z10linearDataPfi, .Lfunc_end2-_Z10linearDataPfi .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI3_0: .long 0x41200000 # float 10 .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI3_1: .quad 0x412f400000000000 # double 1024000 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $184, %rsp .cfi_def_cfa_offset 240 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rsi, %rbx movq 8(%rsi), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r14 movq 16(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, 56(%rsp) # 8-byte Spill movq 24(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, 64(%rsp) # 8-byte Spill incb %r14b movl $2, %eax movl %r14d, %ecx shll %cl, %eax cltq movq %rax, %rbx shrq $2, %rbx movq %rax, 48(%rsp) # 8-byte Spill movl %eax, %r13d sarl $4, %r13d movq %rbx, %rdi callq malloc movq %rax, %r14 movq %rbx, %rdi callq malloc movq %rax, %r15 movq %rbx, %rdi callq malloc movq %rax, %r12 leaq 144(%rsp), %rdi callq time movl %eax, %eax cvtsi2ss %rax, %xmm0 subss (%r14), %xmm0 cvttss2si %xmm0, %rdi # kill: def $edi killed $edi killed $rdi callq srand testl %r13d, %r13d jle .LBB3_6 # %bb.1: # %.lr.ph.preheader.i movq %r12, 40(%rsp) # 8-byte Spill movl %r13d, %ebp xorl %r12d, %r12d .p2align 4, 0x90 .LBB3_2: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 callq rand movzbl %al, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 divss .LCPI3_0(%rip), %xmm0 movss %xmm0, (%r14,%r12,4) incq %r12 cmpq %r12, %rbp jne .LBB3_2 # %bb.3: # %_Z11initialDataPfi.exit testl %r13d, %r13d movq 40(%rsp), %r12 # 8-byte Reload jle .LBB3_6 # %bb.4: # %.lr.ph.i32 xorps %xmm0, %xmm0 cvtsi2sd %r13d, %xmm0 divsd .LCPI3_1(%rip), %xmm0 xorl %eax, %eax .p2align 4, 0x90 .LBB3_5: # =>This Inner Loop Header: Depth=1 xorps %xmm1, %xmm1 cvtsi2sd %eax, %xmm1 addsd %xmm0, %xmm1 cvtsd2ss %xmm1, %xmm1 movss %xmm1, (%r15,%rax,4) incq %rax cmpq %rax, %rbp jne .LBB3_5 .LBB3_6: # %_Z10linearDataPfi.exit movq 48(%rsp), %rdx # 8-byte Reload shrq $22, %rdx movl $.L.str, %edi movl %r13d, %esi xorl %eax, %eax callq printf leaq 24(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 16(%rsp), %rdi movq %rbx, %rsi callq hipMalloc leaq 8(%rsp), %rdi movq %rbx, %rsi callq hipMalloc movq 24(%rsp), %rdi movq %r14, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movq %r15, %rsi movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movl %r13d, %eax cltd movq 64(%rsp), %r13 # 8-byte Reload idivl %r13d cltd movq 56(%rsp), %rsi # 8-byte Reload idivl %esi # kill: def $eax killed $eax def $rax movabsq $4294967296, %rcx # imm = 0x100000000 leaq (%rax,%rcx), %rdi movl %esi, %edx orq %rcx, %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_8 # %bb.7: movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 136(%rsp) movq %rcx, 128(%rsp) movq %rdx, 120(%rsp) movl %r13d, 36(%rsp) leaq 136(%rsp), %rax movq %rax, 144(%rsp) leaq 128(%rsp), %rax movq %rax, 152(%rsp) leaq 120(%rsp), %rax movq %rax, 160(%rsp) leaq 36(%rsp), %rax movq %rax, 168(%rsp) leaq 104(%rsp), %rdi leaq 88(%rsp), %rsi leaq 80(%rsp), %rdx leaq 72(%rsp), %rcx callq __hipPopCallConfiguration movq 104(%rsp), %rsi movl 112(%rsp), %edx movq 88(%rsp), %rcx movl 96(%rsp), %r8d leaq 144(%rsp), %r9 movl $_Z14sumArraysOnGpuPfS_S_i, %edi pushq 72(%rsp) .cfi_adjust_cfa_offset 8 pushq 88(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_8: movq 8(%rsp), %rsi movq %r12, %rdi movq %rbx, %rdx movl $2, %ecx callq hipMemcpy movq %r14, %rdi callq free movq %r15, %rdi callq free movq %r12, %rdi callq free movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree callq hipDeviceReset xorl %eax, %eax addq $184, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14sumArraysOnGpuPfS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z14sumArraysOnGpuPfS_S_i,@object # @_Z14sumArraysOnGpuPfS_S_i .section .rodata,"a",@progbits .globl _Z14sumArraysOnGpuPfS_S_i .p2align 3, 0x0 _Z14sumArraysOnGpuPfS_S_i: .quad _Z29__device_stub__sumArraysOnGpuPfS_S_i .size _Z14sumArraysOnGpuPfS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Quantidade de elementos: %d \n Quantidade de MB: %lu MB\n\n" .size .L.str, 57 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z14sumArraysOnGpuPfS_S_i" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z29__device_stub__sumArraysOnGpuPfS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z14sumArraysOnGpuPfS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void initKernel(double* temperature, int block_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < block_size + 2 && j < block_size + 2) { temperature[(block_size + 2) * j + i] = 0.0; } }
code for sm_80 Function : _Z10initKernelPdi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e220000002600 */ /*0020*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */ /* 0x000fe40000000800 */ /*0030*/ UIADD3 UR4, UR4, 0x2, URZ ; /* 0x0000000204047890 */ /* 0x000fe2000fffe03f */ /*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002200 */ /*0050*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0060*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0070*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */ /* 0x001fca00078e0202 */ /*0080*/ ISETP.GE.AND P0, PT, R3, UR4, PT ; /* 0x0000000403007c0c */ /* 0x000fe2000bf06270 */ /*0090*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0205 */ /*00a0*/ ISETP.GE.OR P0, PT, R0, UR4, P0 ; /* 0x0000000400007c0c */ /* 0x000fda0008706670 */ /*00b0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00c0*/ HFMA2.MMA R2, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff027435 */ /* 0x000fe200000001ff */ /*00d0*/ IMAD R3, R3, UR4, R0 ; /* 0x0000000403037c24 */ /* 0x000fe2000f8e0200 */ /*00e0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fd00000000a00 */ /*00f0*/ IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */ /* 0x000fca00078e0202 */ /*0100*/ STG.E.64 [R2.64], RZ ; /* 0x000000ff02007986 */ /* 0x000fe2000c101b06 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void initKernel(double* temperature, int block_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < block_size + 2 && j < block_size + 2) { temperature[(block_size + 2) * j + i] = 0.0; } }
.file "tmpxft_00066783_00000000-6_initKernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z31__device_stub__Z10initKernelPdiPdi .type _Z31__device_stub__Z10initKernelPdiPdi, @function _Z31__device_stub__Z10initKernelPdiPdi: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z10initKernelPdi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z31__device_stub__Z10initKernelPdiPdi, .-_Z31__device_stub__Z10initKernelPdiPdi .globl _Z10initKernelPdi .type _Z10initKernelPdi, @function _Z10initKernelPdi: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z10initKernelPdiPdi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z10initKernelPdi, .-_Z10initKernelPdi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z10initKernelPdi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z10initKernelPdi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void initKernel(double* temperature, int block_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < block_size + 2 && j < block_size + 2) { temperature[(block_size + 2) * j + i] = 0.0; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void initKernel(double* temperature, int block_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < block_size + 2 && j < block_size + 2) { temperature[(block_size + 2) * j + i] = 0.0; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void initKernel(double* temperature, int block_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < block_size + 2 && j < block_size + 2) { temperature[(block_size + 2) * j + i] = 0.0; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10initKernelPdi .globl _Z10initKernelPdi .p2align 8 .type _Z10initKernelPdi,@function _Z10initKernelPdi: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b32 s3, s[0:1], 0x8 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s4, s2, 0xffff s_lshr_b32 s2, s2, 16 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4] s_add_i32 s2, s3, 2 s_mov_b32 s3, exec_lo v_max_i32_e32 v2, v0, v1 s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_i32_e64 s2, v2 s_cbranch_execz .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x0 v_mad_u64_u32 v[2:3], null, v1, s2, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[0:1], 3, v[2:3] v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_mov_b32_e32 v3, v2 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_4) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b64 v[0:1], v[2:3], off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10initKernelPdi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10initKernelPdi, .Lfunc_end0-_Z10initKernelPdi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10initKernelPdi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10initKernelPdi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void initKernel(double* temperature, int block_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < block_size + 2 && j < block_size + 2) { temperature[(block_size + 2) * j + i] = 0.0; } }
.text .file "initKernel.hip" .globl _Z25__device_stub__initKernelPdi # -- Begin function _Z25__device_stub__initKernelPdi .p2align 4, 0x90 .type _Z25__device_stub__initKernelPdi,@function _Z25__device_stub__initKernelPdi: # @_Z25__device_stub__initKernelPdi .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z10initKernelPdi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z25__device_stub__initKernelPdi, .Lfunc_end0-_Z25__device_stub__initKernelPdi .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10initKernelPdi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z10initKernelPdi,@object # @_Z10initKernelPdi .section .rodata,"a",@progbits .globl _Z10initKernelPdi .p2align 3, 0x0 _Z10initKernelPdi: .quad _Z25__device_stub__initKernelPdi .size _Z10initKernelPdi, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z10initKernelPdi" .size .L__unnamed_1, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__initKernelPdi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10initKernelPdi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z10initKernelPdi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e220000002600 */ /*0020*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */ /* 0x000fe40000000800 */ /*0030*/ UIADD3 UR4, UR4, 0x2, URZ ; /* 0x0000000204047890 */ /* 0x000fe2000fffe03f */ /*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002200 */ /*0050*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0060*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0070*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */ /* 0x001fca00078e0202 */ /*0080*/ ISETP.GE.AND P0, PT, R3, UR4, PT ; /* 0x0000000403007c0c */ /* 0x000fe2000bf06270 */ /*0090*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0205 */ /*00a0*/ ISETP.GE.OR P0, PT, R0, UR4, P0 ; /* 0x0000000400007c0c */ /* 0x000fda0008706670 */ /*00b0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00c0*/ HFMA2.MMA R2, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff027435 */ /* 0x000fe200000001ff */ /*00d0*/ IMAD R3, R3, UR4, R0 ; /* 0x0000000403037c24 */ /* 0x000fe2000f8e0200 */ /*00e0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fd00000000a00 */ /*00f0*/ IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */ /* 0x000fca00078e0202 */ /*0100*/ STG.E.64 [R2.64], RZ ; /* 0x000000ff02007986 */ /* 0x000fe2000c101b06 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10initKernelPdi .globl _Z10initKernelPdi .p2align 8 .type _Z10initKernelPdi,@function _Z10initKernelPdi: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b32 s3, s[0:1], 0x8 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s4, s2, 0xffff s_lshr_b32 s2, s2, 16 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4] s_add_i32 s2, s3, 2 s_mov_b32 s3, exec_lo v_max_i32_e32 v2, v0, v1 s_delay_alu instid0(VALU_DEP_1) v_cmpx_gt_i32_e64 s2, v2 s_cbranch_execz .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x0 v_mad_u64_u32 v[2:3], null, v1, s2, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[0:1], 3, v[2:3] v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_mov_b32_e32 v3, v2 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_4) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b64 v[0:1], v[2:3], off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10initKernelPdi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10initKernelPdi, .Lfunc_end0-_Z10initKernelPdi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10initKernelPdi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10initKernelPdi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00066783_00000000-6_initKernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z31__device_stub__Z10initKernelPdiPdi .type _Z31__device_stub__Z10initKernelPdiPdi, @function _Z31__device_stub__Z10initKernelPdiPdi: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z10initKernelPdi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z31__device_stub__Z10initKernelPdiPdi, .-_Z31__device_stub__Z10initKernelPdiPdi .globl _Z10initKernelPdi .type _Z10initKernelPdi, @function _Z10initKernelPdi: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z10initKernelPdiPdi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z10initKernelPdi, .-_Z10initKernelPdi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z10initKernelPdi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z10initKernelPdi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "initKernel.hip" .globl _Z25__device_stub__initKernelPdi # -- Begin function _Z25__device_stub__initKernelPdi .p2align 4, 0x90 .type _Z25__device_stub__initKernelPdi,@function _Z25__device_stub__initKernelPdi: # @_Z25__device_stub__initKernelPdi .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z10initKernelPdi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z25__device_stub__initKernelPdi, .Lfunc_end0-_Z25__device_stub__initKernelPdi .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10initKernelPdi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z10initKernelPdi,@object # @_Z10initKernelPdi .section .rodata,"a",@progbits .globl _Z10initKernelPdi .p2align 3, 0x0 _Z10initKernelPdi: .quad _Z25__device_stub__initKernelPdi .size _Z10initKernelPdi, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z10initKernelPdi" .size .L__unnamed_1, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__initKernelPdi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10initKernelPdi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void addKernel(float *c, const float *a, const float *b,int nx,int ny) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; int i = y*nx + x; if (y < ny && x < nx) { c[i] = a[i] + b[i]; } } // Helper function for using CUDA to add vectors in parallel. void addWithCuda(float* mat_a,float* mat_b,float* mat_c,int nx,int ny) { dim3 block(32, 32); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); addKernel<<<grid, block>>>(mat_c,mat_a,mat_b,nx,ny); }
code for sm_80 Function : _Z9addKernelPfPKfS1_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002600 */ /*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e680000002100 */ /*0040*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */ /* 0x000e620000002500 */ /*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */ /* 0x001fca00078e0202 */ /*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */ /* 0x000fe20003f06270 */ /*0070*/ IMAD R0, R5, c[0x0][0x0], R0 ; /* 0x0000000005007a24 */ /* 0x002fca00078e0200 */ /*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */ /* 0x000fda0000706670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*00b0*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */ /* 0x000fe200078e0200 */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x170] ; /* 0x00005c0000047625 */ /* 0x000fc800078e0207 */ /*00e0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000027625 */ /* 0x0c0fe400078e0207 */ /*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x160] ; /* 0x0000580000067625 */ /* 0x000fc800078e0207 */ /*0120*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */ /* 0x004fca0000000000 */ /*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*0140*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0150*/ BRA 0x150; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void addKernel(float *c, const float *a, const float *b,int nx,int ny) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; int i = y*nx + x; if (y < ny && x < nx) { c[i] = a[i] + b[i]; } } // Helper function for using CUDA to add vectors in parallel. void addWithCuda(float* mat_a,float* mat_b,float* mat_c,int nx,int ny) { dim3 block(32, 32); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); addKernel<<<grid, block>>>(mat_c,mat_a,mat_b,nx,ny); }
.file "tmpxft_000be7ec_00000000-6_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii .type _Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii, @function _Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii: .LFB2082: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9addKernelPfPKfS1_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii, .-_Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii .globl _Z9addKernelPfPKfS1_ii .type _Z9addKernelPfPKfS1_ii, @function _Z9addKernelPfPKfS1_ii: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z9addKernelPfPKfS1_ii, .-_Z9addKernelPfPKfS1_ii .globl _Z11addWithCudaPfS_S_ii .type _Z11addWithCudaPfS_S_ii, @function _Z11addWithCudaPfS_S_ii: .LFB2057: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $32, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r13 movq %rsi, %r14 movq %rdx, %r12 movl %ecx, %ebx movl %r8d, %ebp leal 31(%rcx), %eax shrl $5, %eax movl %eax, 20(%rsp) leal 31(%r8), %eax shrl $5, %eax movl %eax, 24(%rsp) movl $32, 8(%rsp) movl $32, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 8(%rsp), %rdx movl $1, %ecx movq 20(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L11: addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state movl %ebp, %r8d movl %ebx, %ecx movq %r14, %rdx movq %r13, %rsi movq %r12, %rdi call _Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii jmp .L11 .cfi_endproc .LFE2057: .size _Z11addWithCudaPfS_S_ii, .-_Z11addWithCudaPfS_S_ii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z9addKernelPfPKfS1_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z9addKernelPfPKfS1_ii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void addKernel(float *c, const float *a, const float *b,int nx,int ny) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; int i = y*nx + x; if (y < ny && x < nx) { c[i] = a[i] + b[i]; } } // Helper function for using CUDA to add vectors in parallel. void addWithCuda(float* mat_a,float* mat_b,float* mat_c,int nx,int ny) { dim3 block(32, 32); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); addKernel<<<grid, block>>>(mat_c,mat_a,mat_b,nx,ny); }
#include "hip/hip_runtime.h" #include <stdio.h> __global__ void addKernel(float *c, const float *a, const float *b,int nx,int ny) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; int i = y*nx + x; if (y < ny && x < nx) { c[i] = a[i] + b[i]; } } // Helper function for using CUDA to add vectors in parallel. void addWithCuda(float* mat_a,float* mat_b,float* mat_c,int nx,int ny) { dim3 block(32, 32); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); addKernel<<<grid, block>>>(mat_c,mat_a,mat_b,nx,ny); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include "hip/hip_runtime.h" #include <stdio.h> __global__ void addKernel(float *c, const float *a, const float *b,int nx,int ny) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; int i = y*nx + x; if (y < ny && x < nx) { c[i] = a[i] + b[i]; } } // Helper function for using CUDA to add vectors in parallel. void addWithCuda(float* mat_a,float* mat_b,float* mat_c,int nx,int ny) { dim3 block(32, 32); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); addKernel<<<grid, block>>>(mat_c,mat_a,mat_b,nx,ny); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9addKernelPfPKfS1_ii .globl _Z9addKernelPfPKfS1_ii .p2align 8 .type _Z9addKernelPfPKfS1_ii,@function _Z9addKernelPfPKfS1_ii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b64 s[4:5], s[0:1], 0x18 v_bfe_u32 v2, v0, 10, 10 v_and_b32_e32 v3, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_mad_u64_u32 v[0:1], null, s15, s2, v[2:3] v_mad_u64_u32 v[1:2], null, s14, s3, v[3:4] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, s5, v0 v_cmp_gt_i32_e64 s2, s4, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, s2, vcc_lo s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_2 s_load_b128 s[8:11], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, v0, s4, v[1:2] s_load_b64 s[0:1], s[0:1], 0x10 v_ashrrev_i32_e32 v3, 31, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[2:3] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s10, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s11, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s8, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s9, v1, vcc_lo s_waitcnt vmcnt(0) v_add_f32_e32 v2, v2, v3 global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9addKernelPfPKfS1_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9addKernelPfPKfS1_ii, .Lfunc_end0-_Z9addKernelPfPKfS1_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9addKernelPfPKfS1_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z9addKernelPfPKfS1_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include "hip/hip_runtime.h" #include <stdio.h> __global__ void addKernel(float *c, const float *a, const float *b,int nx,int ny) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; int i = y*nx + x; if (y < ny && x < nx) { c[i] = a[i] + b[i]; } } // Helper function for using CUDA to add vectors in parallel. void addWithCuda(float* mat_a,float* mat_b,float* mat_c,int nx,int ny) { dim3 block(32, 32); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); addKernel<<<grid, block>>>(mat_c,mat_a,mat_b,nx,ny); }
.text .file "kernel.hip" .globl _Z24__device_stub__addKernelPfPKfS1_ii # -- Begin function _Z24__device_stub__addKernelPfPKfS1_ii .p2align 4, 0x90 .type _Z24__device_stub__addKernelPfPKfS1_ii,@function _Z24__device_stub__addKernelPfPKfS1_ii: # @_Z24__device_stub__addKernelPfPKfS1_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9addKernelPfPKfS1_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z24__device_stub__addKernelPfPKfS1_ii, .Lfunc_end0-_Z24__device_stub__addKernelPfPKfS1_ii .cfi_endproc # -- End function .globl _Z11addWithCudaPfS_S_ii # -- Begin function _Z11addWithCudaPfS_S_ii .p2align 4, 0x90 .type _Z11addWithCudaPfS_S_ii,@function _Z11addWithCudaPfS_S_ii: # @_Z11addWithCudaPfS_S_ii .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $128, %rsp .cfi_def_cfa_offset 176 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %r8d, %ebx movl %ecx, %r14d movq %rdx, %r12 movq %rsi, %r15 movq %rdi, %r13 leal 31(%r14), %eax shrl $5, %eax leal 31(%rbx), %edi shrl $5, %edi shlq $32, %rdi orq %rax, %rdi movabsq $137438953504, %rdx # imm = 0x2000000020 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq %r12, 72(%rsp) movq %r13, 64(%rsp) movq %r15, 56(%rsp) movl %r14d, 4(%rsp) movl %ebx, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9addKernelPfPKfS1_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: addq $128, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z11addWithCudaPfS_S_ii, .Lfunc_end1-_Z11addWithCudaPfS_S_ii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9addKernelPfPKfS1_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z9addKernelPfPKfS1_ii,@object # @_Z9addKernelPfPKfS1_ii .section .rodata,"a",@progbits .globl _Z9addKernelPfPKfS1_ii .p2align 3, 0x0 _Z9addKernelPfPKfS1_ii: .quad _Z24__device_stub__addKernelPfPKfS1_ii .size _Z9addKernelPfPKfS1_ii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z9addKernelPfPKfS1_ii" .size .L__unnamed_1, 23 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__addKernelPfPKfS1_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9addKernelPfPKfS1_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z9addKernelPfPKfS1_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002600 */ /*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e680000002100 */ /*0040*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */ /* 0x000e620000002500 */ /*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */ /* 0x001fca00078e0202 */ /*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */ /* 0x000fe20003f06270 */ /*0070*/ IMAD R0, R5, c[0x0][0x0], R0 ; /* 0x0000000005007a24 */ /* 0x002fca00078e0200 */ /*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */ /* 0x000fda0000706670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*00b0*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */ /* 0x000fe200078e0200 */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x170] ; /* 0x00005c0000047625 */ /* 0x000fc800078e0207 */ /*00e0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000027625 */ /* 0x0c0fe400078e0207 */ /*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x160] ; /* 0x0000580000067625 */ /* 0x000fc800078e0207 */ /*0120*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */ /* 0x004fca0000000000 */ /*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*0140*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0150*/ BRA 0x150; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9addKernelPfPKfS1_ii .globl _Z9addKernelPfPKfS1_ii .p2align 8 .type _Z9addKernelPfPKfS1_ii,@function _Z9addKernelPfPKfS1_ii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b64 s[4:5], s[0:1], 0x18 v_bfe_u32 v2, v0, 10, 10 v_and_b32_e32 v3, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_mad_u64_u32 v[0:1], null, s15, s2, v[2:3] v_mad_u64_u32 v[1:2], null, s14, s3, v[3:4] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, s5, v0 v_cmp_gt_i32_e64 s2, s4, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, s2, vcc_lo s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_2 s_load_b128 s[8:11], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, v0, s4, v[1:2] s_load_b64 s[0:1], s[0:1], 0x10 v_ashrrev_i32_e32 v3, 31, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[2:3] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s10, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s11, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s8, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s9, v1, vcc_lo s_waitcnt vmcnt(0) v_add_f32_e32 v2, v2, v3 global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9addKernelPfPKfS1_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9addKernelPfPKfS1_ii, .Lfunc_end0-_Z9addKernelPfPKfS1_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9addKernelPfPKfS1_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z9addKernelPfPKfS1_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000be7ec_00000000-6_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii .type _Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii, @function _Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii: .LFB2082: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9addKernelPfPKfS1_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii, .-_Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii .globl _Z9addKernelPfPKfS1_ii .type _Z9addKernelPfPKfS1_ii, @function _Z9addKernelPfPKfS1_ii: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z9addKernelPfPKfS1_ii, .-_Z9addKernelPfPKfS1_ii .globl _Z11addWithCudaPfS_S_ii .type _Z11addWithCudaPfS_S_ii, @function _Z11addWithCudaPfS_S_ii: .LFB2057: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $32, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r13 movq %rsi, %r14 movq %rdx, %r12 movl %ecx, %ebx movl %r8d, %ebp leal 31(%rcx), %eax shrl $5, %eax movl %eax, 20(%rsp) leal 31(%r8), %eax shrl $5, %eax movl %eax, 24(%rsp) movl $32, 8(%rsp) movl $32, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 8(%rsp), %rdx movl $1, %ecx movq 20(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L11: addq $32, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state movl %ebp, %r8d movl %ebx, %ecx movq %r14, %rdx movq %r13, %rsi movq %r12, %rdi call _Z36__device_stub__Z9addKernelPfPKfS1_iiPfPKfS1_ii jmp .L11 .cfi_endproc .LFE2057: .size _Z11addWithCudaPfS_S_ii, .-_Z11addWithCudaPfS_S_ii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z9addKernelPfPKfS1_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z9addKernelPfPKfS1_ii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "kernel.hip" .globl _Z24__device_stub__addKernelPfPKfS1_ii # -- Begin function _Z24__device_stub__addKernelPfPKfS1_ii .p2align 4, 0x90 .type _Z24__device_stub__addKernelPfPKfS1_ii,@function _Z24__device_stub__addKernelPfPKfS1_ii: # @_Z24__device_stub__addKernelPfPKfS1_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9addKernelPfPKfS1_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z24__device_stub__addKernelPfPKfS1_ii, .Lfunc_end0-_Z24__device_stub__addKernelPfPKfS1_ii .cfi_endproc # -- End function .globl _Z11addWithCudaPfS_S_ii # -- Begin function _Z11addWithCudaPfS_S_ii .p2align 4, 0x90 .type _Z11addWithCudaPfS_S_ii,@function _Z11addWithCudaPfS_S_ii: # @_Z11addWithCudaPfS_S_ii .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $128, %rsp .cfi_def_cfa_offset 176 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %r8d, %ebx movl %ecx, %r14d movq %rdx, %r12 movq %rsi, %r15 movq %rdi, %r13 leal 31(%r14), %eax shrl $5, %eax leal 31(%rbx), %edi shrl $5, %edi shlq $32, %rdi orq %rax, %rdi movabsq $137438953504, %rdx # imm = 0x2000000020 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq %r12, 72(%rsp) movq %r13, 64(%rsp) movq %r15, 56(%rsp) movl %r14d, 4(%rsp) movl %ebx, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9addKernelPfPKfS1_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: addq $128, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z11addWithCudaPfS_S_ii, .Lfunc_end1-_Z11addWithCudaPfS_S_ii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9addKernelPfPKfS1_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z9addKernelPfPKfS1_ii,@object # @_Z9addKernelPfPKfS1_ii .section .rodata,"a",@progbits .globl _Z9addKernelPfPKfS1_ii .p2align 3, 0x0 _Z9addKernelPfPKfS1_ii: .quad _Z24__device_stub__addKernelPfPKfS1_ii .size _Z9addKernelPfPKfS1_ii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z9addKernelPfPKfS1_ii" .size .L__unnamed_1, 23 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__addKernelPfPKfS1_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9addKernelPfPKfS1_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void matrix_multiply_tiling_cuda(int* A, int* B, int* C, int m, int n) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = n * blockDim.y * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + n - 1; // Step size used to iterate through the sub-matrices of A int aStep = blockDim.x; // Index of the first sub-matrix of B processed by the block int bBegin = blockDim.x * bx; // Step size used to iterate through the sub-matrices of B int bStep = blockDim.y * m; // Csub is used to store the element of the block sub-matrix // that is computed by the thread int Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A // Suppose to be As[blockDim.y][blockDim.x] but need dynamic allocation // For simplicity, use a macro here __shared__ int As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B // Suppose to be Bs[blockDim.x][blockDim.y] but need dynamic allocation // For simplicity, use a macro here __shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + n * ty + tx]; Bs[ty][tx] = B[b + m * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < blockDim.x; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = m * blockDim.y * by + blockDim.x * bx; C[c + m * ty + tx] = Csub; }
code for sm_80 Function : _Z27matrix_multiply_tiling_cudaPiS_S_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2UR UR5, SR_CTAID.X ; /* 0x00000000000579c3 */ /* 0x000e220000002500 */ /*0020*/ S2R R20, SR_TID.X ; /* 0x0000000000147919 */ /* 0x000e620000002100 */ /*0030*/ ISETP.LT.AND P0, PT, RZ, c[0x0][0x17c], PT ; /* 0x00005f00ff007a0c */ /* 0x000fe20003f01270 */ /*0040*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe20000000800 */ /*0050*/ MOV R18, c[0x0][0x4] ; /* 0x0000010000127a02 */ /* 0x000fe20000000f00 */ /*0060*/ S2R R17, SR_TID.Y ; /* 0x0000000000117919 */ /* 0x000ea20000002200 */ /*0070*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */ /* 0x000fe40000000a00 */ /*0080*/ S2UR UR7, SR_CTAID.Y ; /* 0x00000000000779c3 */ /* 0x000ee20000002600 */ /*0090*/ IMAD R18, R18, c[0x0][0x178], RZ ; /* 0x00005e0012127a24 */ /* 0x000fcc00078e02ff */ /*00a0*/ @!P0 IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff098224 */ /* 0x000fe200078e00ff */ /*00b0*/ UIMAD UR5, UR5, UR4, URZ ; /* 0x00000004050572a4 */ /* 0x001fe2000f8e023f */ /*00c0*/ @!P0 BRA 0x520 ; /* 0x0000045000008947 */ /* 0x008fea0003800000 */ /*00d0*/ ULDC UR4, c[0x0][0x4] ; /* 0x0000010000047ab9 */ /* 0x002fe20000000800 */ /*00e0*/ MOV R12, c[0x0][0x0] ; /* 0x00000000000c7a02 */ /* 0x000fe20000000f00 */ /*00f0*/ UIMAD UR4, UR7, UR4, URZ ; /* 0x00000004070472a4 */ /* 0x000fe2000f8e023f */ /*0100*/ IMAD.SHL.U32 R15, R17.reuse, 0x80, RZ ; /* 0x00000080110f7824 */ /* 0x044fe200078e00ff */ /*0110*/ ULDC UR6, c[0x0][0x17c] ; /* 0x00005f0000067ab9 */ /* 0x000fe20000000800 */ /*0120*/ IADD3 R0, R12.reuse, -0x1, RZ ; /* 0xffffffff0c007810 */ /* 0x040fe20007ffe0ff */ /*0130*/ UIMAD UR4, UR4, UR6, URZ ; /* 0x00000006040472a4 */ /* 0x000fe2000f8e023f */ /*0140*/ LOP3.LUT R12, R12, 0x3, RZ, 0xc0, !PT ; /* 0x000000030c0c7812 */ /* 0x000fe200078ec0ff */ /*0150*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */ /* 0x000fe200000001ff */ /*0160*/ ISETP.GE.U32.AND P1, PT, R0, 0x3, PT ; /* 0x000000030000780c */ /* 0x000fe20003f26070 */ /*0170*/ IMAD R16, R17.reuse, c[0x0][0x17c], R20.reuse ; /* 0x00005f0011107a24 */ /* 0x140fe200078e0214 */ /*0180*/ MOV R11, UR5 ; /* 0x00000005000b7c02 */ /* 0x000fe20008000f00 */ /*0190*/ IMAD.U32 R13, RZ, RZ, UR4 ; /* 0x00000004ff0d7e24 */ /* 0x000fe2000f8e00ff */ /*01a0*/ IADD3 R8, -R12, c[0x0][0x0], RZ ; /* 0x000000000c087a10 */ /* 0x000fe20007ffe1ff */ /*01b0*/ IMAD R14, R17, c[0x0][0x178], R20 ; /* 0x00005e00110e7a24 */ /* 0x000fc400078e0214 */ /*01c0*/ IMAD R10, R20, 0x4, R15 ; /* 0x00000004140a7824 */ /* 0x000fe200078e020f */ /*01d0*/ IADD3 R0, R13, c[0x0][0x17c], RZ ; /* 0x00005f000d007a10 */ /* 0x000fe40007ffe0ff */ /*01e0*/ IADD3 R2, R16, R13, RZ ; /* 0x0000000d10027210 */ /* 0x000fe20007ffe0ff */ /*01f0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fe200078e00ff */ /*0200*/ IADD3 R4, R14, R11, RZ ; /* 0x0000000b0e047210 */ /* 0x000fc60007ffe0ff */ /*0210*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fc800078e0205 */ /*0220*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */ /* 0x000fe400078e0205 */ /*0230*/ LDG.E R3, [R2.64] ; /* 0x0000000802037981 */ /* 0x000ea8000c1e1900 */ /*0240*/ LDG.E R5, [R4.64] ; /* 0x0000000804057981 */ /* 0x000ee2000c1e1900 */ /*0250*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x0], PT ; /* 0x00000000ff007a0c */ /* 0x000fe40003f45270 */ /*0260*/ IADD3 R13, R13, c[0x0][0x0], RZ ; /* 0x000000000d0d7a10 */ /* 0x000fc80007ffe0ff */ /*0270*/ ISETP.GE.AND P0, PT, R13, R0, PT ; /* 0x000000000d00720c */ /* 0x000fe20003f06270 */ /*0280*/ STS [R10], R3 ; /* 0x000000030a007388 */ /* 0x0041e80000000800 */ /*0290*/ STS [R10+0x1000], R5 ; /* 0x001000050a007388 */ /* 0x0081e80000000800 */ /*02a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*02b0*/ @!P2 BRA 0x4f0 ; /* 0x000002300000a947 */ /* 0x000fea0003800000 */ /*02c0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x001fe20008000000 */ /*02d0*/ ISETP.NE.AND P3, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */ /* 0x000fe20003f65270 */ /*02e0*/ @!P1 BRA 0x410 ; /* 0x0000012000009947 */ /* 0x000fd80003800000 */ /*02f0*/ IMAD.MOV.U32 R2, RZ, RZ, R8 ; /* 0x000000ffff027224 */ /* 0x000fe200078e0008 */ /*0300*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fc40008000000 */ /*0310*/ MOV R4, UR4 ; /* 0x0000000400047c02 */ /* 0x000fe20008000f00 */ /*0320*/ ULEA UR6, UR4, 0x1000, 0x7 ; /* 0x0000100004067891 */ /* 0x000fe2000f8e383f */ /*0330*/ IADD3 R2, R2, -0x4, RZ ; /* 0xfffffffc02027810 */ /* 0x000fe20007ffe0ff */ /*0340*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */ /* 0x000fc4000fffe03f */ /*0350*/ IMAD R4, R4, 0x4, R15 ; /* 0x0000000404047824 */ /* 0x000fe200078e020f */ /*0360*/ ISETP.NE.AND P2, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fc80003f45270 */ /*0370*/ LDS R3, [R20.X4+UR6] ; /* 0x0000000614037984 */ /* 0x000fe80008004800 */ /*0380*/ LDS.128 R4, [R4] ; /* 0x0000000004047984 */ /* 0x000e280000000c00 */ /*0390*/ LDS R19, [R20.X4+UR6+0x80] ; /* 0x0000800614137984 */ /* 0x000e680008004800 */ /*03a0*/ LDS R21, [R20.X4+UR6+0x100] ; /* 0x0001000614157984 */ /* 0x000ea80008004800 */ /*03b0*/ LDS R23, [R20.X4+UR6+0x180] ; /* 0x0001800614177984 */ /* 0x000ee20008004800 */ /*03c0*/ IMAD R22, R3, R4, R9 ; /* 0x0000000403167224 */ /* 0x001fc800078e0209 */ /*03d0*/ IMAD R5, R19, R5, R22 ; /* 0x0000000513057224 */ /* 0x002fc800078e0216 */ /*03e0*/ IMAD R6, R21, R6, R5 ; /* 0x0000000615067224 */ /* 0x004fc800078e0205 */ /*03f0*/ IMAD R9, R23, R7, R6 ; /* 0x0000000717097224 */ /* 0x008fe200078e0206 */ /*0400*/ @P2 BRA 0x310 ; /* 0xffffff0000002947 */ /* 0x000fea000383ffff */ /*0410*/ @!P3 BRA 0x4f0 ; /* 0x000000d00000b947 */ /* 0x000fea0003800000 */ /*0420*/ MOV R4, UR4 ; /* 0x0000000400047c02 */ /* 0x000fe20008000f00 */ /*0430*/ ULEA UR6, UR4, 0x1000, 0x7 ; /* 0x0000100004067891 */ /* 0x000fe2000f8e383f */ /*0440*/ ISETP.NE.AND P2, PT, R12, 0x1, PT ; /* 0x000000010c00780c */ /* 0x000fc60003f45270 */ /*0450*/ IMAD R4, R4, 0x4, R15 ; /* 0x0000000404047824 */ /* 0x000fca00078e020f */ /*0460*/ LDS R2, [R20.X4+UR6] ; /* 0x0000000614027984 */ /* 0x000fe80008004800 */ /*0470*/ LDS.128 R4, [R4] ; /* 0x0000000004047984 */ /* 0x000e240000000c00 */ /*0480*/ IMAD R9, R2, R4, R9 ; /* 0x0000000402097224 */ /* 0x001fe200078e0209 */ /*0490*/ @!P2 BRA 0x4f0 ; /* 0x000000500000a947 */ /* 0x000fea0003800000 */ /*04a0*/ ISETP.NE.AND P2, PT, R12, 0x2, PT ; /* 0x000000020c00780c */ /* 0x000fe20003f45270 */ /*04b0*/ LDS R2, [R20.X4+UR6+0x80] ; /* 0x0000800614027984 */ /* 0x000e180008004800 */ /*04c0*/ @P2 LDS R3, [R20.X4+UR6+0x100] ; /* 0x0001000614032984 */ /* 0x000e620008004800 */ /*04d0*/ IMAD R9, R2, R5, R9 ; /* 0x0000000502097224 */ /* 0x001fc800078e0209 */ /*04e0*/ @P2 IMAD R9, R3, R6, R9 ; /* 0x0000000603092224 */ /* 0x002fe400078e0209 */ /*04f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x001fe20000010000 */ /*0500*/ IADD3 R11, R18, R11, RZ ; /* 0x0000000b120b7210 */ /* 0x000fca0007ffe0ff */ /*0510*/ @!P0 BRA 0x1e0 ; /* 0xfffffcc000008947 */ /* 0x000fea000383ffff */ /*0520*/ IADD3 R20, R20, UR5, RZ ; /* 0x0000000514147c10 */ /* 0x002fe2000fffe0ff */ /*0530*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fc800078e00ff */ /*0540*/ IMAD R17, R17, c[0x0][0x178], R20 ; /* 0x00005e0011117a24 */ /* 0x004fc800078e0214 */ /*0550*/ IMAD R2, R18, UR7, R17 ; /* 0x0000000712027c24 */ /* 0x000fc8000f8e0211 */ /*0560*/ IMAD.WIDE R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fca00078e0203 */ /*0570*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x000fe2000c101908 */ /*0580*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0590*/ BRA 0x590; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0600*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0610*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0620*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0630*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0640*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0650*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0660*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0670*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void matrix_multiply_tiling_cuda(int* A, int* B, int* C, int m, int n) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = n * blockDim.y * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + n - 1; // Step size used to iterate through the sub-matrices of A int aStep = blockDim.x; // Index of the first sub-matrix of B processed by the block int bBegin = blockDim.x * bx; // Step size used to iterate through the sub-matrices of B int bStep = blockDim.y * m; // Csub is used to store the element of the block sub-matrix // that is computed by the thread int Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A // Suppose to be As[blockDim.y][blockDim.x] but need dynamic allocation // For simplicity, use a macro here __shared__ int As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B // Suppose to be Bs[blockDim.x][blockDim.y] but need dynamic allocation // For simplicity, use a macro here __shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + n * ty + tx]; Bs[ty][tx] = B[b + m * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < blockDim.x; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = m * blockDim.y * by + blockDim.x * bx; C[c + m * ty + tx] = Csub; }
.file "tmpxft_0014b443_00000000-6_matrix_multiply_tiling_cuda.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z53__device_stub__Z27matrix_multiply_tiling_cudaPiS_S_iiPiS_S_ii .type _Z53__device_stub__Z27matrix_multiply_tiling_cudaPiS_S_iiPiS_S_ii, @function _Z53__device_stub__Z27matrix_multiply_tiling_cudaPiS_S_iiPiS_S_ii: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z27matrix_multiply_tiling_cudaPiS_S_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z53__device_stub__Z27matrix_multiply_tiling_cudaPiS_S_iiPiS_S_ii, .-_Z53__device_stub__Z27matrix_multiply_tiling_cudaPiS_S_iiPiS_S_ii .globl _Z27matrix_multiply_tiling_cudaPiS_S_ii .type _Z27matrix_multiply_tiling_cudaPiS_S_ii, @function _Z27matrix_multiply_tiling_cudaPiS_S_ii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z53__device_stub__Z27matrix_multiply_tiling_cudaPiS_S_iiPiS_S_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z27matrix_multiply_tiling_cudaPiS_S_ii, .-_Z27matrix_multiply_tiling_cudaPiS_S_ii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z27matrix_multiply_tiling_cudaPiS_S_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z27matrix_multiply_tiling_cudaPiS_S_ii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void matrix_multiply_tiling_cuda(int* A, int* B, int* C, int m, int n) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = n * blockDim.y * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + n - 1; // Step size used to iterate through the sub-matrices of A int aStep = blockDim.x; // Index of the first sub-matrix of B processed by the block int bBegin = blockDim.x * bx; // Step size used to iterate through the sub-matrices of B int bStep = blockDim.y * m; // Csub is used to store the element of the block sub-matrix // that is computed by the thread int Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A // Suppose to be As[blockDim.y][blockDim.x] but need dynamic allocation // For simplicity, use a macro here __shared__ int As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B // Suppose to be Bs[blockDim.x][blockDim.y] but need dynamic allocation // For simplicity, use a macro here __shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + n * ty + tx]; Bs[ty][tx] = B[b + m * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < blockDim.x; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = m * blockDim.y * by + blockDim.x * bx; C[c + m * ty + tx] = Csub; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void matrix_multiply_tiling_cuda(int* A, int* B, int* C, int m, int n) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = n * blockDim.y * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + n - 1; // Step size used to iterate through the sub-matrices of A int aStep = blockDim.x; // Index of the first sub-matrix of B processed by the block int bBegin = blockDim.x * bx; // Step size used to iterate through the sub-matrices of B int bStep = blockDim.y * m; // Csub is used to store the element of the block sub-matrix // that is computed by the thread int Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A // Suppose to be As[blockDim.y][blockDim.x] but need dynamic allocation // For simplicity, use a macro here __shared__ int As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B // Suppose to be Bs[blockDim.x][blockDim.y] but need dynamic allocation // For simplicity, use a macro here __shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + n * ty + tx]; Bs[ty][tx] = B[b + m * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < blockDim.x; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = m * blockDim.y * by + blockDim.x * bx; C[c + m * ty + tx] = Csub; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void matrix_multiply_tiling_cuda(int* A, int* B, int* C, int m, int n) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = n * blockDim.y * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + n - 1; // Step size used to iterate through the sub-matrices of A int aStep = blockDim.x; // Index of the first sub-matrix of B processed by the block int bBegin = blockDim.x * bx; // Step size used to iterate through the sub-matrices of B int bStep = blockDim.y * m; // Csub is used to store the element of the block sub-matrix // that is computed by the thread int Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A // Suppose to be As[blockDim.y][blockDim.x] but need dynamic allocation // For simplicity, use a macro here __shared__ int As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B // Suppose to be Bs[blockDim.x][blockDim.y] but need dynamic allocation // For simplicity, use a macro here __shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + n * ty + tx]; Bs[ty][tx] = B[b + m * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < blockDim.x; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = m * blockDim.y * by + blockDim.x * bx; C[c + m * ty + tx] = Csub; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z27matrix_multiply_tiling_cudaPiS_S_ii .globl _Z27matrix_multiply_tiling_cudaPiS_S_ii .p2align 8 .type _Z27matrix_multiply_tiling_cudaPiS_S_ii,@function _Z27matrix_multiply_tiling_cudaPiS_S_ii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b64 s[6:7], s[0:1], 0x18 v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v5, v0, 10, 10 s_waitcnt lgkmcnt(0) s_lshr_b32 s3, s2, 16 s_and_b32 s13, s2, 0xffff s_cmp_lt_i32 s7, 1 s_mul_i32 s5, s14, s13 s_mul_i32 s12, s3, s6 s_cbranch_scc1 .LBB0_11 s_mul_i32 s4, s15, s7 s_load_b128 s[8:11], s[0:1], 0x0 s_mul_i32 s14, s4, s3 v_mad_u64_u32 v[2:3], null, v5, s7, v[1:2] s_add_i32 s16, s14, s7 s_and_b32 s7, s13, 7 s_cmp_gt_u32 s13, 7 v_cmp_ne_u16_e64 s2, s2, 0 v_lshlrev_b32_e32 v0, 2, v1 s_cselect_b32 s3, -1, 0 s_and_b32 s17, s13, 0xfff8 s_cmp_lg_u32 s7, 0 v_cndmask_b32_e64 v9, 0, 1, s2 s_cselect_b32 s2, -1, 0 v_mad_u64_u32 v[3:4], null, v5, s6, v[1:2] v_lshlrev_b32_e32 v6, 7, v5 v_or_b32_e32 v4, 0x1000, v0 v_cndmask_b32_e64 v10, 0, 1, s3 v_cndmask_b32_e64 v11, 0, 1, s2 v_cmp_ne_u32_e64 s2, 1, v9 v_dual_mov_b32 v0, 0 :: v_dual_add_nc_u32 v7, v6, v0 v_add_nc_u32_e32 v8, v4, v6 v_cmp_ne_u32_e64 s3, 1, v10 v_cmp_ne_u32_e64 s4, 1, v11 s_mov_b32 s18, s5 s_branch .LBB0_3 .LBB0_2: s_add_i32 s14, s14, s13 s_add_i32 s18, s18, s12 s_cmp_ge_i32 s14, s16 s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_12 .LBB0_3: v_add_nc_u32_e32 v9, s14, v2 v_add_nc_u32_e32 v11, s18, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v10, 31, v9 v_ashrrev_i32_e32 v12, 31, v11 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[9:10], 2, v[9:10] v_lshlrev_b64 v[11:12], 2, v[11:12] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v9, vcc_lo, s8, v9 v_add_co_ci_u32_e32 v10, vcc_lo, s9, v10, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v11, vcc_lo, s10, v11 v_add_co_ci_u32_e32 v12, vcc_lo, s11, v12, vcc_lo s_and_b32 vcc_lo, exec_lo, s2 global_load_b32 v9, v[9:10], off global_load_b32 v10, v[11:12], off s_waitcnt vmcnt(1) ds_store_b32 v7, v9 s_waitcnt vmcnt(0) ds_store_b32 v8, v10 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_vccnz .LBB0_2 s_and_b32 vcc_lo, exec_lo, s3 s_cbranch_vccnz .LBB0_8 v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v10, v6 s_mov_b32 s19, 0 s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB0_6: ds_load_2addr_b32 v[11:12], v9 offset1:32 ds_load_2addr_b32 v[13:14], v10 offset1:1 ds_load_2addr_b32 v[15:16], v9 offset0:64 offset1:96 ds_load_2addr_b32 v[17:18], v10 offset0:2 offset1:3 ds_load_2addr_b32 v[19:20], v9 offset0:128 offset1:160 ds_load_2addr_b32 v[21:22], v10 offset0:4 offset1:5 ds_load_2addr_b32 v[23:24], v10 offset0:6 offset1:7 ds_load_2addr_b32 v[25:26], v9 offset0:192 offset1:224 v_add_nc_u32_e32 v10, 32, v10 v_add_nc_u32_e32 v9, 0x400, v9 s_add_i32 s19, s19, 8 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s17, s19 s_waitcnt lgkmcnt(6) v_mul_lo_u32 v11, v11, v13 v_mul_lo_u32 v12, v12, v14 s_waitcnt lgkmcnt(4) v_mul_lo_u32 v13, v15, v17 v_mul_lo_u32 v14, v16, v18 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) v_add3_u32 v0, v11, v0, v12 s_waitcnt lgkmcnt(2) v_mul_lo_u32 v11, v19, v21 v_mul_lo_u32 v12, v20, v22 v_add3_u32 v0, v13, v0, v14 s_waitcnt lgkmcnt(0) v_mul_lo_u32 v13, v25, v23 v_mul_lo_u32 v14, v26, v24 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v0, v11, v0, v12 v_add3_u32 v0, v13, v0, v14 s_cbranch_scc0 .LBB0_6 s_set_inst_prefetch_distance 0x2 s_and_b32 vcc_lo, exec_lo, s4 s_cbranch_vccz .LBB0_9 s_branch .LBB0_2 .LBB0_8: s_mov_b32 s19, 0 s_and_b32 vcc_lo, exec_lo, s4 s_cbranch_vccnz .LBB0_2 .LBB0_9: v_lshl_add_u32 v9, s19, 7, v4 v_lshl_add_u32 v10, s19, 2, v6 s_mov_b32 s19, s7 .LBB0_10: ds_load_b32 v13, v10 ds_load_b32 v14, v9 v_add_nc_u32_e32 v10, 4, v10 s_add_i32 s19, s19, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_cmp_lg_u32 s19, 0 s_waitcnt lgkmcnt(0) v_mad_u64_u32 v[11:12], null, v14, v13, v[0:1] v_dual_mov_b32 v0, v11 :: v_dual_add_nc_u32 v9, 0x80, v9 s_cbranch_scc1 .LBB0_10 s_branch .LBB0_2 .LBB0_11: v_mov_b32_e32 v0, 0 .LBB0_12: v_mad_u64_u32 v[2:3], null, v5, s6, v[1:2] s_load_b64 s[0:1], s[0:1], 0x10 s_mul_i32 s12, s12, s15 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_add3_u32 v1, s12, s5, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[1:2], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v1, vcc_lo, s0, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo global_store_b32 v[1:2], v0, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z27matrix_multiply_tiling_cudaPiS_S_ii .amdhsa_group_segment_fixed_size 8192 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 27 .amdhsa_next_free_sgpr 20 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z27matrix_multiply_tiling_cudaPiS_S_ii, .Lfunc_end0-_Z27matrix_multiply_tiling_cudaPiS_S_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 8192 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z27matrix_multiply_tiling_cudaPiS_S_ii .private_segment_fixed_size: 0 .sgpr_count: 22 .sgpr_spill_count: 0 .symbol: _Z27matrix_multiply_tiling_cudaPiS_S_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 27 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void matrix_multiply_tiling_cuda(int* A, int* B, int* C, int m, int n) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = n * blockDim.y * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + n - 1; // Step size used to iterate through the sub-matrices of A int aStep = blockDim.x; // Index of the first sub-matrix of B processed by the block int bBegin = blockDim.x * bx; // Step size used to iterate through the sub-matrices of B int bStep = blockDim.y * m; // Csub is used to store the element of the block sub-matrix // that is computed by the thread int Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A // Suppose to be As[blockDim.y][blockDim.x] but need dynamic allocation // For simplicity, use a macro here __shared__ int As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B // Suppose to be Bs[blockDim.x][blockDim.y] but need dynamic allocation // For simplicity, use a macro here __shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + n * ty + tx]; Bs[ty][tx] = B[b + m * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < blockDim.x; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = m * blockDim.y * by + blockDim.x * bx; C[c + m * ty + tx] = Csub; }
.text .file "matrix_multiply_tiling_cuda.hip" .globl _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii # -- Begin function _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii .p2align 4, 0x90 .type _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii,@function _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii: # @_Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z27matrix_multiply_tiling_cudaPiS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii, .Lfunc_end0-_Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z27matrix_multiply_tiling_cudaPiS_S_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z27matrix_multiply_tiling_cudaPiS_S_ii,@object # @_Z27matrix_multiply_tiling_cudaPiS_S_ii .section .rodata,"a",@progbits .globl _Z27matrix_multiply_tiling_cudaPiS_S_ii .p2align 3, 0x0 _Z27matrix_multiply_tiling_cudaPiS_S_ii: .quad _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii .size _Z27matrix_multiply_tiling_cudaPiS_S_ii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z27matrix_multiply_tiling_cudaPiS_S_ii" .size .L__unnamed_1, 40 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z27matrix_multiply_tiling_cudaPiS_S_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z27matrix_multiply_tiling_cudaPiS_S_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2UR UR5, SR_CTAID.X ; /* 0x00000000000579c3 */ /* 0x000e220000002500 */ /*0020*/ S2R R20, SR_TID.X ; /* 0x0000000000147919 */ /* 0x000e620000002100 */ /*0030*/ ISETP.LT.AND P0, PT, RZ, c[0x0][0x17c], PT ; /* 0x00005f00ff007a0c */ /* 0x000fe20003f01270 */ /*0040*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe20000000800 */ /*0050*/ MOV R18, c[0x0][0x4] ; /* 0x0000010000127a02 */ /* 0x000fe20000000f00 */ /*0060*/ S2R R17, SR_TID.Y ; /* 0x0000000000117919 */ /* 0x000ea20000002200 */ /*0070*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */ /* 0x000fe40000000a00 */ /*0080*/ S2UR UR7, SR_CTAID.Y ; /* 0x00000000000779c3 */ /* 0x000ee20000002600 */ /*0090*/ IMAD R18, R18, c[0x0][0x178], RZ ; /* 0x00005e0012127a24 */ /* 0x000fcc00078e02ff */ /*00a0*/ @!P0 IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff098224 */ /* 0x000fe200078e00ff */ /*00b0*/ UIMAD UR5, UR5, UR4, URZ ; /* 0x00000004050572a4 */ /* 0x001fe2000f8e023f */ /*00c0*/ @!P0 BRA 0x520 ; /* 0x0000045000008947 */ /* 0x008fea0003800000 */ /*00d0*/ ULDC UR4, c[0x0][0x4] ; /* 0x0000010000047ab9 */ /* 0x002fe20000000800 */ /*00e0*/ MOV R12, c[0x0][0x0] ; /* 0x00000000000c7a02 */ /* 0x000fe20000000f00 */ /*00f0*/ UIMAD UR4, UR7, UR4, URZ ; /* 0x00000004070472a4 */ /* 0x000fe2000f8e023f */ /*0100*/ IMAD.SHL.U32 R15, R17.reuse, 0x80, RZ ; /* 0x00000080110f7824 */ /* 0x044fe200078e00ff */ /*0110*/ ULDC UR6, c[0x0][0x17c] ; /* 0x00005f0000067ab9 */ /* 0x000fe20000000800 */ /*0120*/ IADD3 R0, R12.reuse, -0x1, RZ ; /* 0xffffffff0c007810 */ /* 0x040fe20007ffe0ff */ /*0130*/ UIMAD UR4, UR4, UR6, URZ ; /* 0x00000006040472a4 */ /* 0x000fe2000f8e023f */ /*0140*/ LOP3.LUT R12, R12, 0x3, RZ, 0xc0, !PT ; /* 0x000000030c0c7812 */ /* 0x000fe200078ec0ff */ /*0150*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */ /* 0x000fe200000001ff */ /*0160*/ ISETP.GE.U32.AND P1, PT, R0, 0x3, PT ; /* 0x000000030000780c */ /* 0x000fe20003f26070 */ /*0170*/ IMAD R16, R17.reuse, c[0x0][0x17c], R20.reuse ; /* 0x00005f0011107a24 */ /* 0x140fe200078e0214 */ /*0180*/ MOV R11, UR5 ; /* 0x00000005000b7c02 */ /* 0x000fe20008000f00 */ /*0190*/ IMAD.U32 R13, RZ, RZ, UR4 ; /* 0x00000004ff0d7e24 */ /* 0x000fe2000f8e00ff */ /*01a0*/ IADD3 R8, -R12, c[0x0][0x0], RZ ; /* 0x000000000c087a10 */ /* 0x000fe20007ffe1ff */ /*01b0*/ IMAD R14, R17, c[0x0][0x178], R20 ; /* 0x00005e00110e7a24 */ /* 0x000fc400078e0214 */ /*01c0*/ IMAD R10, R20, 0x4, R15 ; /* 0x00000004140a7824 */ /* 0x000fe200078e020f */ /*01d0*/ IADD3 R0, R13, c[0x0][0x17c], RZ ; /* 0x00005f000d007a10 */ /* 0x000fe40007ffe0ff */ /*01e0*/ IADD3 R2, R16, R13, RZ ; /* 0x0000000d10027210 */ /* 0x000fe20007ffe0ff */ /*01f0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fe200078e00ff */ /*0200*/ IADD3 R4, R14, R11, RZ ; /* 0x0000000b0e047210 */ /* 0x000fc60007ffe0ff */ /*0210*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fc800078e0205 */ /*0220*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */ /* 0x000fe400078e0205 */ /*0230*/ LDG.E R3, [R2.64] ; /* 0x0000000802037981 */ /* 0x000ea8000c1e1900 */ /*0240*/ LDG.E R5, [R4.64] ; /* 0x0000000804057981 */ /* 0x000ee2000c1e1900 */ /*0250*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x0], PT ; /* 0x00000000ff007a0c */ /* 0x000fe40003f45270 */ /*0260*/ IADD3 R13, R13, c[0x0][0x0], RZ ; /* 0x000000000d0d7a10 */ /* 0x000fc80007ffe0ff */ /*0270*/ ISETP.GE.AND P0, PT, R13, R0, PT ; /* 0x000000000d00720c */ /* 0x000fe20003f06270 */ /*0280*/ STS [R10], R3 ; /* 0x000000030a007388 */ /* 0x0041e80000000800 */ /*0290*/ STS [R10+0x1000], R5 ; /* 0x001000050a007388 */ /* 0x0081e80000000800 */ /*02a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*02b0*/ @!P2 BRA 0x4f0 ; /* 0x000002300000a947 */ /* 0x000fea0003800000 */ /*02c0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x001fe20008000000 */ /*02d0*/ ISETP.NE.AND P3, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */ /* 0x000fe20003f65270 */ /*02e0*/ @!P1 BRA 0x410 ; /* 0x0000012000009947 */ /* 0x000fd80003800000 */ /*02f0*/ IMAD.MOV.U32 R2, RZ, RZ, R8 ; /* 0x000000ffff027224 */ /* 0x000fe200078e0008 */ /*0300*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */ /* 0x000fc40008000000 */ /*0310*/ MOV R4, UR4 ; /* 0x0000000400047c02 */ /* 0x000fe20008000f00 */ /*0320*/ ULEA UR6, UR4, 0x1000, 0x7 ; /* 0x0000100004067891 */ /* 0x000fe2000f8e383f */ /*0330*/ IADD3 R2, R2, -0x4, RZ ; /* 0xfffffffc02027810 */ /* 0x000fe20007ffe0ff */ /*0340*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */ /* 0x000fc4000fffe03f */ /*0350*/ IMAD R4, R4, 0x4, R15 ; /* 0x0000000404047824 */ /* 0x000fe200078e020f */ /*0360*/ ISETP.NE.AND P2, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fc80003f45270 */ /*0370*/ LDS R3, [R20.X4+UR6] ; /* 0x0000000614037984 */ /* 0x000fe80008004800 */ /*0380*/ LDS.128 R4, [R4] ; /* 0x0000000004047984 */ /* 0x000e280000000c00 */ /*0390*/ LDS R19, [R20.X4+UR6+0x80] ; /* 0x0000800614137984 */ /* 0x000e680008004800 */ /*03a0*/ LDS R21, [R20.X4+UR6+0x100] ; /* 0x0001000614157984 */ /* 0x000ea80008004800 */ /*03b0*/ LDS R23, [R20.X4+UR6+0x180] ; /* 0x0001800614177984 */ /* 0x000ee20008004800 */ /*03c0*/ IMAD R22, R3, R4, R9 ; /* 0x0000000403167224 */ /* 0x001fc800078e0209 */ /*03d0*/ IMAD R5, R19, R5, R22 ; /* 0x0000000513057224 */ /* 0x002fc800078e0216 */ /*03e0*/ IMAD R6, R21, R6, R5 ; /* 0x0000000615067224 */ /* 0x004fc800078e0205 */ /*03f0*/ IMAD R9, R23, R7, R6 ; /* 0x0000000717097224 */ /* 0x008fe200078e0206 */ /*0400*/ @P2 BRA 0x310 ; /* 0xffffff0000002947 */ /* 0x000fea000383ffff */ /*0410*/ @!P3 BRA 0x4f0 ; /* 0x000000d00000b947 */ /* 0x000fea0003800000 */ /*0420*/ MOV R4, UR4 ; /* 0x0000000400047c02 */ /* 0x000fe20008000f00 */ /*0430*/ ULEA UR6, UR4, 0x1000, 0x7 ; /* 0x0000100004067891 */ /* 0x000fe2000f8e383f */ /*0440*/ ISETP.NE.AND P2, PT, R12, 0x1, PT ; /* 0x000000010c00780c */ /* 0x000fc60003f45270 */ /*0450*/ IMAD R4, R4, 0x4, R15 ; /* 0x0000000404047824 */ /* 0x000fca00078e020f */ /*0460*/ LDS R2, [R20.X4+UR6] ; /* 0x0000000614027984 */ /* 0x000fe80008004800 */ /*0470*/ LDS.128 R4, [R4] ; /* 0x0000000004047984 */ /* 0x000e240000000c00 */ /*0480*/ IMAD R9, R2, R4, R9 ; /* 0x0000000402097224 */ /* 0x001fe200078e0209 */ /*0490*/ @!P2 BRA 0x4f0 ; /* 0x000000500000a947 */ /* 0x000fea0003800000 */ /*04a0*/ ISETP.NE.AND P2, PT, R12, 0x2, PT ; /* 0x000000020c00780c */ /* 0x000fe20003f45270 */ /*04b0*/ LDS R2, [R20.X4+UR6+0x80] ; /* 0x0000800614027984 */ /* 0x000e180008004800 */ /*04c0*/ @P2 LDS R3, [R20.X4+UR6+0x100] ; /* 0x0001000614032984 */ /* 0x000e620008004800 */ /*04d0*/ IMAD R9, R2, R5, R9 ; /* 0x0000000502097224 */ /* 0x001fc800078e0209 */ /*04e0*/ @P2 IMAD R9, R3, R6, R9 ; /* 0x0000000603092224 */ /* 0x002fe400078e0209 */ /*04f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x001fe20000010000 */ /*0500*/ IADD3 R11, R18, R11, RZ ; /* 0x0000000b120b7210 */ /* 0x000fca0007ffe0ff */ /*0510*/ @!P0 BRA 0x1e0 ; /* 0xfffffcc000008947 */ /* 0x000fea000383ffff */ /*0520*/ IADD3 R20, R20, UR5, RZ ; /* 0x0000000514147c10 */ /* 0x002fe2000fffe0ff */ /*0530*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fc800078e00ff */ /*0540*/ IMAD R17, R17, c[0x0][0x178], R20 ; /* 0x00005e0011117a24 */ /* 0x004fc800078e0214 */ /*0550*/ IMAD R2, R18, UR7, R17 ; /* 0x0000000712027c24 */ /* 0x000fc8000f8e0211 */ /*0560*/ IMAD.WIDE R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fca00078e0203 */ /*0570*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x000fe2000c101908 */ /*0580*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0590*/ BRA 0x590; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0600*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0610*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0620*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0630*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0640*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0650*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0660*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0670*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z27matrix_multiply_tiling_cudaPiS_S_ii .globl _Z27matrix_multiply_tiling_cudaPiS_S_ii .p2align 8 .type _Z27matrix_multiply_tiling_cudaPiS_S_ii,@function _Z27matrix_multiply_tiling_cudaPiS_S_ii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b64 s[6:7], s[0:1], 0x18 v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v5, v0, 10, 10 s_waitcnt lgkmcnt(0) s_lshr_b32 s3, s2, 16 s_and_b32 s13, s2, 0xffff s_cmp_lt_i32 s7, 1 s_mul_i32 s5, s14, s13 s_mul_i32 s12, s3, s6 s_cbranch_scc1 .LBB0_11 s_mul_i32 s4, s15, s7 s_load_b128 s[8:11], s[0:1], 0x0 s_mul_i32 s14, s4, s3 v_mad_u64_u32 v[2:3], null, v5, s7, v[1:2] s_add_i32 s16, s14, s7 s_and_b32 s7, s13, 7 s_cmp_gt_u32 s13, 7 v_cmp_ne_u16_e64 s2, s2, 0 v_lshlrev_b32_e32 v0, 2, v1 s_cselect_b32 s3, -1, 0 s_and_b32 s17, s13, 0xfff8 s_cmp_lg_u32 s7, 0 v_cndmask_b32_e64 v9, 0, 1, s2 s_cselect_b32 s2, -1, 0 v_mad_u64_u32 v[3:4], null, v5, s6, v[1:2] v_lshlrev_b32_e32 v6, 7, v5 v_or_b32_e32 v4, 0x1000, v0 v_cndmask_b32_e64 v10, 0, 1, s3 v_cndmask_b32_e64 v11, 0, 1, s2 v_cmp_ne_u32_e64 s2, 1, v9 v_dual_mov_b32 v0, 0 :: v_dual_add_nc_u32 v7, v6, v0 v_add_nc_u32_e32 v8, v4, v6 v_cmp_ne_u32_e64 s3, 1, v10 v_cmp_ne_u32_e64 s4, 1, v11 s_mov_b32 s18, s5 s_branch .LBB0_3 .LBB0_2: s_add_i32 s14, s14, s13 s_add_i32 s18, s18, s12 s_cmp_ge_i32 s14, s16 s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_12 .LBB0_3: v_add_nc_u32_e32 v9, s14, v2 v_add_nc_u32_e32 v11, s18, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v10, 31, v9 v_ashrrev_i32_e32 v12, 31, v11 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[9:10], 2, v[9:10] v_lshlrev_b64 v[11:12], 2, v[11:12] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v9, vcc_lo, s8, v9 v_add_co_ci_u32_e32 v10, vcc_lo, s9, v10, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v11, vcc_lo, s10, v11 v_add_co_ci_u32_e32 v12, vcc_lo, s11, v12, vcc_lo s_and_b32 vcc_lo, exec_lo, s2 global_load_b32 v9, v[9:10], off global_load_b32 v10, v[11:12], off s_waitcnt vmcnt(1) ds_store_b32 v7, v9 s_waitcnt vmcnt(0) ds_store_b32 v8, v10 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_vccnz .LBB0_2 s_and_b32 vcc_lo, exec_lo, s3 s_cbranch_vccnz .LBB0_8 v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v10, v6 s_mov_b32 s19, 0 s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB0_6: ds_load_2addr_b32 v[11:12], v9 offset1:32 ds_load_2addr_b32 v[13:14], v10 offset1:1 ds_load_2addr_b32 v[15:16], v9 offset0:64 offset1:96 ds_load_2addr_b32 v[17:18], v10 offset0:2 offset1:3 ds_load_2addr_b32 v[19:20], v9 offset0:128 offset1:160 ds_load_2addr_b32 v[21:22], v10 offset0:4 offset1:5 ds_load_2addr_b32 v[23:24], v10 offset0:6 offset1:7 ds_load_2addr_b32 v[25:26], v9 offset0:192 offset1:224 v_add_nc_u32_e32 v10, 32, v10 v_add_nc_u32_e32 v9, 0x400, v9 s_add_i32 s19, s19, 8 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s17, s19 s_waitcnt lgkmcnt(6) v_mul_lo_u32 v11, v11, v13 v_mul_lo_u32 v12, v12, v14 s_waitcnt lgkmcnt(4) v_mul_lo_u32 v13, v15, v17 v_mul_lo_u32 v14, v16, v18 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) v_add3_u32 v0, v11, v0, v12 s_waitcnt lgkmcnt(2) v_mul_lo_u32 v11, v19, v21 v_mul_lo_u32 v12, v20, v22 v_add3_u32 v0, v13, v0, v14 s_waitcnt lgkmcnt(0) v_mul_lo_u32 v13, v25, v23 v_mul_lo_u32 v14, v26, v24 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v0, v11, v0, v12 v_add3_u32 v0, v13, v0, v14 s_cbranch_scc0 .LBB0_6 s_set_inst_prefetch_distance 0x2 s_and_b32 vcc_lo, exec_lo, s4 s_cbranch_vccz .LBB0_9 s_branch .LBB0_2 .LBB0_8: s_mov_b32 s19, 0 s_and_b32 vcc_lo, exec_lo, s4 s_cbranch_vccnz .LBB0_2 .LBB0_9: v_lshl_add_u32 v9, s19, 7, v4 v_lshl_add_u32 v10, s19, 2, v6 s_mov_b32 s19, s7 .LBB0_10: ds_load_b32 v13, v10 ds_load_b32 v14, v9 v_add_nc_u32_e32 v10, 4, v10 s_add_i32 s19, s19, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_cmp_lg_u32 s19, 0 s_waitcnt lgkmcnt(0) v_mad_u64_u32 v[11:12], null, v14, v13, v[0:1] v_dual_mov_b32 v0, v11 :: v_dual_add_nc_u32 v9, 0x80, v9 s_cbranch_scc1 .LBB0_10 s_branch .LBB0_2 .LBB0_11: v_mov_b32_e32 v0, 0 .LBB0_12: v_mad_u64_u32 v[2:3], null, v5, s6, v[1:2] s_load_b64 s[0:1], s[0:1], 0x10 s_mul_i32 s12, s12, s15 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_add3_u32 v1, s12, s5, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[1:2], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v1, vcc_lo, s0, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo global_store_b32 v[1:2], v0, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z27matrix_multiply_tiling_cudaPiS_S_ii .amdhsa_group_segment_fixed_size 8192 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 27 .amdhsa_next_free_sgpr 20 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z27matrix_multiply_tiling_cudaPiS_S_ii, .Lfunc_end0-_Z27matrix_multiply_tiling_cudaPiS_S_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 8192 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z27matrix_multiply_tiling_cudaPiS_S_ii .private_segment_fixed_size: 0 .sgpr_count: 22 .sgpr_spill_count: 0 .symbol: _Z27matrix_multiply_tiling_cudaPiS_S_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 27 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0014b443_00000000-6_matrix_multiply_tiling_cuda.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z53__device_stub__Z27matrix_multiply_tiling_cudaPiS_S_iiPiS_S_ii .type _Z53__device_stub__Z27matrix_multiply_tiling_cudaPiS_S_iiPiS_S_ii, @function _Z53__device_stub__Z27matrix_multiply_tiling_cudaPiS_S_iiPiS_S_ii: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z27matrix_multiply_tiling_cudaPiS_S_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z53__device_stub__Z27matrix_multiply_tiling_cudaPiS_S_iiPiS_S_ii, .-_Z53__device_stub__Z27matrix_multiply_tiling_cudaPiS_S_iiPiS_S_ii .globl _Z27matrix_multiply_tiling_cudaPiS_S_ii .type _Z27matrix_multiply_tiling_cudaPiS_S_ii, @function _Z27matrix_multiply_tiling_cudaPiS_S_ii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z53__device_stub__Z27matrix_multiply_tiling_cudaPiS_S_iiPiS_S_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z27matrix_multiply_tiling_cudaPiS_S_ii, .-_Z27matrix_multiply_tiling_cudaPiS_S_ii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z27matrix_multiply_tiling_cudaPiS_S_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z27matrix_multiply_tiling_cudaPiS_S_ii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "matrix_multiply_tiling_cuda.hip" .globl _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii # -- Begin function _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii .p2align 4, 0x90 .type _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii,@function _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii: # @_Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z27matrix_multiply_tiling_cudaPiS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii, .Lfunc_end0-_Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z27matrix_multiply_tiling_cudaPiS_S_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z27matrix_multiply_tiling_cudaPiS_S_ii,@object # @_Z27matrix_multiply_tiling_cudaPiS_S_ii .section .rodata,"a",@progbits .globl _Z27matrix_multiply_tiling_cudaPiS_S_ii .p2align 3, 0x0 _Z27matrix_multiply_tiling_cudaPiS_S_ii: .quad _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii .size _Z27matrix_multiply_tiling_cudaPiS_S_ii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z27matrix_multiply_tiling_cudaPiS_S_ii" .size .L__unnamed_1, 40 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z42__device_stub__matrix_multiply_tiling_cudaPiS_S_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z27matrix_multiply_tiling_cudaPiS_S_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <time.h> #ifndef Size #define Size 1000 #endif #define b 4 void metric_mul_gold(int A[Size][Size], int B[Size][Size], int C[Size][Size]) { int i,j,k; for(i=0; i<Size; i++) for(j=0; j<Size; j++) for(k=0; k<Size; k++) C[i][j] += A[i][k]*B[k][j]; return; } void metric_mul(int A[Size][Size], int B[Size][Size], int C[Size][Size]); int main(void) { int i, j, k; int size = sizeof(int) * Size * Size; int *aptr, *bptr, *cptr; int *host_A, *host_B, *host_C; srand(time(NULL)); host_A = (int *)malloc(size); host_B = (int *)malloc(size); host_C = (int *)malloc(size); aptr = host_A; bptr = host_B; cptr = host_C; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) { *aptr++ = rand() % 10; *bptr++ = rand() % 10; *cptr++ = 0; } int *gold_C; gold_C = (int *)malloc(size); metric_mul_gold((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])gold_C); cudaEvent_t start_time, stop_time; float exectime; cudaEventCreate(&start_time); cudaEventCreate(&stop_time); cudaEventRecord(start_time, 0); metric_mul((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])host_C); cudaEventRecord(stop_time, 0); cudaEventSynchronize(stop_time); cudaEventElapsedTime(&exectime, start_time, stop_time); printf("real %f ms\n ", exectime); cudaEventDestroy(start_time); cudaEventDestroy(stop_time); //check result if (!memcmp(host_C, gold_C, size)) printf("AC!\n"); else printf("Failed!\n"); /*k = 0; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) printf("host_C[%d][%d] = %d\n", i, j, host_C[k++]);*/ free(host_A); free(host_B); free(host_C); free(gold_C); return 0; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <time.h> #ifndef Size #define Size 1000 #endif #define b 4 void metric_mul_gold(int A[Size][Size], int B[Size][Size], int C[Size][Size]) { int i,j,k; for(i=0; i<Size; i++) for(j=0; j<Size; j++) for(k=0; k<Size; k++) C[i][j] += A[i][k]*B[k][j]; return; } void metric_mul(int A[Size][Size], int B[Size][Size], int C[Size][Size]); int main(void) { int i, j, k; int size = sizeof(int) * Size * Size; int *aptr, *bptr, *cptr; int *host_A, *host_B, *host_C; srand(time(NULL)); host_A = (int *)malloc(size); host_B = (int *)malloc(size); host_C = (int *)malloc(size); aptr = host_A; bptr = host_B; cptr = host_C; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) { *aptr++ = rand() % 10; *bptr++ = rand() % 10; *cptr++ = 0; } int *gold_C; gold_C = (int *)malloc(size); metric_mul_gold((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])gold_C); cudaEvent_t start_time, stop_time; float exectime; cudaEventCreate(&start_time); cudaEventCreate(&stop_time); cudaEventRecord(start_time, 0); metric_mul((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])host_C); cudaEventRecord(stop_time, 0); cudaEventSynchronize(stop_time); cudaEventElapsedTime(&exectime, start_time, stop_time); printf("real %f ms\n ", exectime); cudaEventDestroy(start_time); cudaEventDestroy(stop_time); //check result if (!memcmp(host_C, gold_C, size)) printf("AC!\n"); else printf("Failed!\n"); /*k = 0; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) printf("host_C[%d][%d] = %d\n", i, j, host_C[k++]);*/ free(host_A); free(host_B); free(host_C); free(gold_C); return 0; }
.file "tmpxft_0018f955_00000000-6_mul_skeleton.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z15metric_mul_goldPA1000_iS0_S0_ .type _Z15metric_mul_goldPA1000_iS0_S0_, @function _Z15metric_mul_goldPA1000_iS0_S0_: .LFB2057: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movq %rdi, %r9 movq %rsi, %r11 movq %rdx, %rbx movl $0, %r8d leaq 4004000(%rsi), %r10 .L4: leaq (%rbx,%r8), %rsi leaq 4000000(%r11), %rdi .L8: leaq (%r9,%r8), %rcx leaq -4000000(%rdi), %rax .L5: movl (%rcx), %edx imull (%rax), %edx addl %edx, (%rsi) addq $4, %rcx addq $4000, %rax cmpq %rdi, %rax jne .L5 addq $4, %rsi addq $4, %rdi cmpq %r10, %rdi jne .L8 addq $4000, %r8 cmpq $4000000, %r8 jne .L4 popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z15metric_mul_goldPA1000_iS0_S0_, .-_Z15metric_mul_goldPA1000_iS0_S0_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "real %f ms\n " .LC1: .string "AC!\n" .LC2: .string "Failed!\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $56, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movl $4000000, %edi call malloc@PLT movq %rax, %r15 movl $4000000, %edi call malloc@PLT movq %rax, %r12 movq %rax, (%rsp) movl $4000000, %edi call malloc@PLT movq %rax, %r13 movq %rax, 8(%rsp) leaq 4000000(%r15), %r14 movq %r15, %rbp .L12: movl $0, %ebx .L13: call rand@PLT movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $34, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,4), %edx addl %edx, %edx subl %edx, %eax movl %eax, 0(%rbp,%rbx) call rand@PLT movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $34, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,4), %edx addl %edx, %edx subl %edx, %eax movl %eax, (%r12,%rbx) movl $0, 0(%r13,%rbx) addq $4, %rbx cmpq $4000, %rbx jne .L13 addq $4000, %rbp addq $4000, %r12 addq $4000, %r13 cmpq %rbp, %r14 jne .L12 movl $4000000, %edi call malloc@PLT movq %rax, %rbx movq %rax, %rdx movq (%rsp), %rbp movq %rbp, %rsi movq %r15, %rdi call _Z15metric_mul_goldPA1000_iS0_S0_ leaq 24(%rsp), %rdi call cudaEventCreate@PLT leaq 32(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 24(%rsp), %rdi call cudaEventRecord@PLT movq 8(%rsp), %r14 movq %r14, %rdx movq %rbp, %rsi movq %r15, %rdi call _Z10metric_mulPA1000_iS0_S0_@PLT movl $0, %esi movq 32(%rsp), %rdi call cudaEventRecord@PLT movq 32(%rsp), %rdi call cudaEventSynchronize@PLT leaq 20(%rsp), %rdi movq 32(%rsp), %rdx movq 24(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 20(%rsp), %xmm0 leaq .LC0(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 24(%rsp), %rdi call cudaEventDestroy@PLT movq 32(%rsp), %rdi call cudaEventDestroy@PLT movl $4000000, %edx movq %rbx, %rsi movq %r14, %rdi call memcmp@PLT testl %eax, %eax jne .L15 leaq .LC1(%rip), %rsi movl $2, %edi call __printf_chk@PLT .L16: movq %r15, %rdi call free@PLT movq (%rsp), %rdi call free@PLT movq 8(%rsp), %rdi call free@PLT movq %rbx, %rdi call free@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L20 movl $0, %eax addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L16 .L20: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <time.h> #ifndef Size #define Size 1000 #endif #define b 4 void metric_mul_gold(int A[Size][Size], int B[Size][Size], int C[Size][Size]) { int i,j,k; for(i=0; i<Size; i++) for(j=0; j<Size; j++) for(k=0; k<Size; k++) C[i][j] += A[i][k]*B[k][j]; return; } void metric_mul(int A[Size][Size], int B[Size][Size], int C[Size][Size]); int main(void) { int i, j, k; int size = sizeof(int) * Size * Size; int *aptr, *bptr, *cptr; int *host_A, *host_B, *host_C; srand(time(NULL)); host_A = (int *)malloc(size); host_B = (int *)malloc(size); host_C = (int *)malloc(size); aptr = host_A; bptr = host_B; cptr = host_C; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) { *aptr++ = rand() % 10; *bptr++ = rand() % 10; *cptr++ = 0; } int *gold_C; gold_C = (int *)malloc(size); metric_mul_gold((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])gold_C); cudaEvent_t start_time, stop_time; float exectime; cudaEventCreate(&start_time); cudaEventCreate(&stop_time); cudaEventRecord(start_time, 0); metric_mul((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])host_C); cudaEventRecord(stop_time, 0); cudaEventSynchronize(stop_time); cudaEventElapsedTime(&exectime, start_time, stop_time); printf("real %f ms\n ", exectime); cudaEventDestroy(start_time); cudaEventDestroy(stop_time); //check result if (!memcmp(host_C, gold_C, size)) printf("AC!\n"); else printf("Failed!\n"); /*k = 0; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) printf("host_C[%d][%d] = %d\n", i, j, host_C[k++]);*/ free(host_A); free(host_B); free(host_C); free(gold_C); return 0; }
#include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <time.h> #ifndef Size #define Size 1000 #endif #define b 4 void metric_mul_gold(int A[Size][Size], int B[Size][Size], int C[Size][Size]) { int i,j,k; for(i=0; i<Size; i++) for(j=0; j<Size; j++) for(k=0; k<Size; k++) C[i][j] += A[i][k]*B[k][j]; return; } void metric_mul(int A[Size][Size], int B[Size][Size], int C[Size][Size]); int main(void) { int i, j, k; int size = sizeof(int) * Size * Size; int *aptr, *bptr, *cptr; int *host_A, *host_B, *host_C; srand(time(NULL)); host_A = (int *)malloc(size); host_B = (int *)malloc(size); host_C = (int *)malloc(size); aptr = host_A; bptr = host_B; cptr = host_C; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) { *aptr++ = rand() % 10; *bptr++ = rand() % 10; *cptr++ = 0; } int *gold_C; gold_C = (int *)malloc(size); metric_mul_gold((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])gold_C); hipEvent_t start_time, stop_time; float exectime; hipEventCreate(&start_time); hipEventCreate(&stop_time); hipEventRecord(start_time, 0); metric_mul((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])host_C); hipEventRecord(stop_time, 0); hipEventSynchronize(stop_time); hipEventElapsedTime(&exectime, start_time, stop_time); printf("real %f ms\n ", exectime); hipEventDestroy(start_time); hipEventDestroy(stop_time); //check result if (!memcmp(host_C, gold_C, size)) printf("AC!\n"); else printf("Failed!\n"); /*k = 0; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) printf("host_C[%d][%d] = %d\n", i, j, host_C[k++]);*/ free(host_A); free(host_B); free(host_C); free(gold_C); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <time.h> #ifndef Size #define Size 1000 #endif #define b 4 void metric_mul_gold(int A[Size][Size], int B[Size][Size], int C[Size][Size]) { int i,j,k; for(i=0; i<Size; i++) for(j=0; j<Size; j++) for(k=0; k<Size; k++) C[i][j] += A[i][k]*B[k][j]; return; } void metric_mul(int A[Size][Size], int B[Size][Size], int C[Size][Size]); int main(void) { int i, j, k; int size = sizeof(int) * Size * Size; int *aptr, *bptr, *cptr; int *host_A, *host_B, *host_C; srand(time(NULL)); host_A = (int *)malloc(size); host_B = (int *)malloc(size); host_C = (int *)malloc(size); aptr = host_A; bptr = host_B; cptr = host_C; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) { *aptr++ = rand() % 10; *bptr++ = rand() % 10; *cptr++ = 0; } int *gold_C; gold_C = (int *)malloc(size); metric_mul_gold((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])gold_C); hipEvent_t start_time, stop_time; float exectime; hipEventCreate(&start_time); hipEventCreate(&stop_time); hipEventRecord(start_time, 0); metric_mul((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])host_C); hipEventRecord(stop_time, 0); hipEventSynchronize(stop_time); hipEventElapsedTime(&exectime, start_time, stop_time); printf("real %f ms\n ", exectime); hipEventDestroy(start_time); hipEventDestroy(stop_time); //check result if (!memcmp(host_C, gold_C, size)) printf("AC!\n"); else printf("Failed!\n"); /*k = 0; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) printf("host_C[%d][%d] = %d\n", i, j, host_C[k++]);*/ free(host_A); free(host_B); free(host_C); free(gold_C); return 0; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <time.h> #ifndef Size #define Size 1000 #endif #define b 4 void metric_mul_gold(int A[Size][Size], int B[Size][Size], int C[Size][Size]) { int i,j,k; for(i=0; i<Size; i++) for(j=0; j<Size; j++) for(k=0; k<Size; k++) C[i][j] += A[i][k]*B[k][j]; return; } void metric_mul(int A[Size][Size], int B[Size][Size], int C[Size][Size]); int main(void) { int i, j, k; int size = sizeof(int) * Size * Size; int *aptr, *bptr, *cptr; int *host_A, *host_B, *host_C; srand(time(NULL)); host_A = (int *)malloc(size); host_B = (int *)malloc(size); host_C = (int *)malloc(size); aptr = host_A; bptr = host_B; cptr = host_C; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) { *aptr++ = rand() % 10; *bptr++ = rand() % 10; *cptr++ = 0; } int *gold_C; gold_C = (int *)malloc(size); metric_mul_gold((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])gold_C); hipEvent_t start_time, stop_time; float exectime; hipEventCreate(&start_time); hipEventCreate(&stop_time); hipEventRecord(start_time, 0); metric_mul((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])host_C); hipEventRecord(stop_time, 0); hipEventSynchronize(stop_time); hipEventElapsedTime(&exectime, start_time, stop_time); printf("real %f ms\n ", exectime); hipEventDestroy(start_time); hipEventDestroy(stop_time); //check result if (!memcmp(host_C, gold_C, size)) printf("AC!\n"); else printf("Failed!\n"); /*k = 0; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) printf("host_C[%d][%d] = %d\n", i, j, host_C[k++]);*/ free(host_A); free(host_B); free(host_C); free(gold_C); return 0; }
.text .file "mul_skeleton.hip" .globl _Z15metric_mul_goldPA1000_iS0_S0_ # -- Begin function _Z15metric_mul_goldPA1000_iS0_S0_ .p2align 4, 0x90 .type _Z15metric_mul_goldPA1000_iS0_S0_,@function _Z15metric_mul_goldPA1000_iS0_S0_: # @_Z15metric_mul_goldPA1000_iS0_S0_ .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %rbp, -16 xorl %eax, %eax .p2align 4, 0x90 .LBB0_1: # %.preheader16 # =>This Loop Header: Depth=1 # Child Loop BB0_2 Depth 2 # Child Loop BB0_3 Depth 3 imulq $4000, %rax, %rcx # imm = 0xFA0 addq %rdx, %rcx movq %rsi, %r8 xorl %r9d, %r9d .p2align 4, 0x90 .LBB0_2: # %.preheader # Parent Loop BB0_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB0_3 Depth 3 leaq (%rcx,%r9,4), %r10 movl (%rcx,%r9,4), %r11d movq %r8, %rbx xorl %r14d, %r14d .p2align 4, 0x90 .LBB0_3: # Parent Loop BB0_1 Depth=1 # Parent Loop BB0_2 Depth=2 # => This Inner Loop Header: Depth=3 movl (%rbx), %ebp imull (%rdi,%r14,4), %ebp addl %ebp, %r11d movl %r11d, (%r10) incq %r14 addq $4000, %rbx # imm = 0xFA0 cmpq $1000, %r14 # imm = 0x3E8 jne .LBB0_3 # %bb.4: # in Loop: Header=BB0_2 Depth=2 incq %r9 addq $4, %r8 cmpq $1000, %r9 # imm = 0x3E8 jne .LBB0_2 # %bb.5: # in Loop: Header=BB0_1 Depth=1 incq %rax addq $4000, %rdi # imm = 0xFA0 cmpq $1000, %rax # imm = 0x3E8 jne .LBB0_1 # %bb.6: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z15metric_mul_goldPA1000_iS0_S0_, .Lfunc_end0-_Z15metric_mul_goldPA1000_iS0_S0_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $56, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 xorl %r12d, %r12d xorl %edi, %edi callq time movl %eax, %edi callq srand movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %r13 movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %rbp movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %r15 movq %r13, 48(%rsp) # 8-byte Spill movq %rbp, 40(%rsp) # 8-byte Spill movq %rax, 32(%rsp) # 8-byte Spill .p2align 4, 0x90 .LBB1_1: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 xorl %ebx, %ebx xorl %r14d, %r14d .p2align 4, 0x90 .LBB1_2: # Parent Loop BB1_1 Depth=1 # => This Inner Loop Header: Depth=2 callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $34, %rcx addl %edx, %ecx addl %ecx, %ecx leal (%rcx,%rcx,4), %ecx subl %ecx, %eax movl %eax, (%r13,%r14,4) callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $34, %rcx addl %edx, %ecx addl %ecx, %ecx leal (%rcx,%rcx,4), %ecx subl %ecx, %eax movl %eax, (%rbp,%r14,4) movl $0, (%r15,%r14,4) incq %r14 addq $-4, %rbx cmpl $1000, %r14d # imm = 0x3E8 jne .LBB1_2 # %bb.3: # in Loop: Header=BB1_1 Depth=1 incl %r12d subq %rbx, %r13 subq %rbx, %rbp subq %rbx, %r15 cmpl $1000, %r12d # imm = 0x3E8 jne .LBB1_1 # %bb.4: movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %r12 xorl %eax, %eax movq 48(%rsp), %r14 # 8-byte Reload movq %r14, %rcx movq 40(%rsp), %r15 # 8-byte Reload .p2align 4, 0x90 .LBB1_5: # %.preheader16.i # =>This Loop Header: Depth=1 # Child Loop BB1_6 Depth 2 # Child Loop BB1_7 Depth 3 imulq $4000, %rax, %rdx # imm = 0xFA0 addq %r12, %rdx movq %r15, %rsi xorl %edi, %edi .p2align 4, 0x90 .LBB1_6: # %.preheader.i # Parent Loop BB1_5 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_7 Depth 3 leaq (%rdx,%rdi,4), %r8 movl (%rdx,%rdi,4), %r9d movq %rsi, %r10 xorl %r11d, %r11d .p2align 4, 0x90 .LBB1_7: # Parent Loop BB1_5 Depth=1 # Parent Loop BB1_6 Depth=2 # => This Inner Loop Header: Depth=3 movl (%r10), %ebx imull (%rcx,%r11,4), %ebx addl %ebx, %r9d incq %r11 addq $4000, %r10 # imm = 0xFA0 cmpq $1000, %r11 # imm = 0x3E8 jne .LBB1_7 # %bb.8: # in Loop: Header=BB1_6 Depth=2 movl %r9d, (%r8) incq %rdi addq $4, %rsi cmpq $1000, %rdi # imm = 0x3E8 jne .LBB1_6 # %bb.9: # in Loop: Header=BB1_5 Depth=1 incq %rax addq $4000, %rcx # imm = 0xFA0 cmpq $1000, %rax # imm = 0x3E8 jne .LBB1_5 # %bb.10: # %_Z15metric_mul_goldPA1000_iS0_S0_.exit leaq 16(%rsp), %rdi callq hipEventCreate leaq 8(%rsp), %rdi callq hipEventCreate movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq %r14, %rdi movq %r15, %rsi movq 32(%rsp), %rbx # 8-byte Reload movq %rbx, %rdx callq _Z10metric_mulPA1000_iS0_S0_ movq 8(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 8(%rsp), %rdi callq hipEventSynchronize movq 16(%rsp), %rsi movq 8(%rsp), %rdx leaq 28(%rsp), %rdi callq hipEventElapsedTime movss 28(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf movq 16(%rsp), %rdi callq hipEventDestroy movq 8(%rsp), %rdi callq hipEventDestroy movl $4000000, %edx # imm = 0x3D0900 movq %rbx, %rdi movq %r12, %rsi callq bcmp@PLT testl %eax, %eax movl $.Lstr, %eax movl $.Lstr.1, %edi cmoveq %rax, %rdi callq puts@PLT movq %r14, %rdi callq free movq %r15, %rdi callq free movq %rbx, %rdi callq free movq %r12, %rdi callq free xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "real %f ms\n " .size .L.str, 13 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "AC!" .size .Lstr, 4 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "Failed!" .size .Lstr.1, 8 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0018f955_00000000-6_mul_skeleton.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z15metric_mul_goldPA1000_iS0_S0_ .type _Z15metric_mul_goldPA1000_iS0_S0_, @function _Z15metric_mul_goldPA1000_iS0_S0_: .LFB2057: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movq %rdi, %r9 movq %rsi, %r11 movq %rdx, %rbx movl $0, %r8d leaq 4004000(%rsi), %r10 .L4: leaq (%rbx,%r8), %rsi leaq 4000000(%r11), %rdi .L8: leaq (%r9,%r8), %rcx leaq -4000000(%rdi), %rax .L5: movl (%rcx), %edx imull (%rax), %edx addl %edx, (%rsi) addq $4, %rcx addq $4000, %rax cmpq %rdi, %rax jne .L5 addq $4, %rsi addq $4, %rdi cmpq %r10, %rdi jne .L8 addq $4000, %r8 cmpq $4000000, %r8 jne .L4 popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z15metric_mul_goldPA1000_iS0_S0_, .-_Z15metric_mul_goldPA1000_iS0_S0_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "real %f ms\n " .LC1: .string "AC!\n" .LC2: .string "Failed!\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $56, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $0, %edi call time@PLT movl %eax, %edi call srand@PLT movl $4000000, %edi call malloc@PLT movq %rax, %r15 movl $4000000, %edi call malloc@PLT movq %rax, %r12 movq %rax, (%rsp) movl $4000000, %edi call malloc@PLT movq %rax, %r13 movq %rax, 8(%rsp) leaq 4000000(%r15), %r14 movq %r15, %rbp .L12: movl $0, %ebx .L13: call rand@PLT movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $34, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,4), %edx addl %edx, %edx subl %edx, %eax movl %eax, 0(%rbp,%rbx) call rand@PLT movslq %eax, %rdx imulq $1717986919, %rdx, %rdx sarq $34, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,4), %edx addl %edx, %edx subl %edx, %eax movl %eax, (%r12,%rbx) movl $0, 0(%r13,%rbx) addq $4, %rbx cmpq $4000, %rbx jne .L13 addq $4000, %rbp addq $4000, %r12 addq $4000, %r13 cmpq %rbp, %r14 jne .L12 movl $4000000, %edi call malloc@PLT movq %rax, %rbx movq %rax, %rdx movq (%rsp), %rbp movq %rbp, %rsi movq %r15, %rdi call _Z15metric_mul_goldPA1000_iS0_S0_ leaq 24(%rsp), %rdi call cudaEventCreate@PLT leaq 32(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 24(%rsp), %rdi call cudaEventRecord@PLT movq 8(%rsp), %r14 movq %r14, %rdx movq %rbp, %rsi movq %r15, %rdi call _Z10metric_mulPA1000_iS0_S0_@PLT movl $0, %esi movq 32(%rsp), %rdi call cudaEventRecord@PLT movq 32(%rsp), %rdi call cudaEventSynchronize@PLT leaq 20(%rsp), %rdi movq 32(%rsp), %rdx movq 24(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 20(%rsp), %xmm0 leaq .LC0(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 24(%rsp), %rdi call cudaEventDestroy@PLT movq 32(%rsp), %rdi call cudaEventDestroy@PLT movl $4000000, %edx movq %rbx, %rsi movq %r14, %rdi call memcmp@PLT testl %eax, %eax jne .L15 leaq .LC1(%rip), %rsi movl $2, %edi call __printf_chk@PLT .L16: movq %r15, %rdi call free@PLT movq (%rsp), %rdi call free@PLT movq 8(%rsp), %rdi call free@PLT movq %rbx, %rdi call free@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L20 movl $0, %eax addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L16 .L20: call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "mul_skeleton.hip" .globl _Z15metric_mul_goldPA1000_iS0_S0_ # -- Begin function _Z15metric_mul_goldPA1000_iS0_S0_ .p2align 4, 0x90 .type _Z15metric_mul_goldPA1000_iS0_S0_,@function _Z15metric_mul_goldPA1000_iS0_S0_: # @_Z15metric_mul_goldPA1000_iS0_S0_ .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %rbp, -16 xorl %eax, %eax .p2align 4, 0x90 .LBB0_1: # %.preheader16 # =>This Loop Header: Depth=1 # Child Loop BB0_2 Depth 2 # Child Loop BB0_3 Depth 3 imulq $4000, %rax, %rcx # imm = 0xFA0 addq %rdx, %rcx movq %rsi, %r8 xorl %r9d, %r9d .p2align 4, 0x90 .LBB0_2: # %.preheader # Parent Loop BB0_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB0_3 Depth 3 leaq (%rcx,%r9,4), %r10 movl (%rcx,%r9,4), %r11d movq %r8, %rbx xorl %r14d, %r14d .p2align 4, 0x90 .LBB0_3: # Parent Loop BB0_1 Depth=1 # Parent Loop BB0_2 Depth=2 # => This Inner Loop Header: Depth=3 movl (%rbx), %ebp imull (%rdi,%r14,4), %ebp addl %ebp, %r11d movl %r11d, (%r10) incq %r14 addq $4000, %rbx # imm = 0xFA0 cmpq $1000, %r14 # imm = 0x3E8 jne .LBB0_3 # %bb.4: # in Loop: Header=BB0_2 Depth=2 incq %r9 addq $4, %r8 cmpq $1000, %r9 # imm = 0x3E8 jne .LBB0_2 # %bb.5: # in Loop: Header=BB0_1 Depth=1 incq %rax addq $4000, %rdi # imm = 0xFA0 cmpq $1000, %rax # imm = 0x3E8 jne .LBB0_1 # %bb.6: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z15metric_mul_goldPA1000_iS0_S0_, .Lfunc_end0-_Z15metric_mul_goldPA1000_iS0_S0_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $56, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 xorl %r12d, %r12d xorl %edi, %edi callq time movl %eax, %edi callq srand movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %r13 movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %rbp movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %r15 movq %r13, 48(%rsp) # 8-byte Spill movq %rbp, 40(%rsp) # 8-byte Spill movq %rax, 32(%rsp) # 8-byte Spill .p2align 4, 0x90 .LBB1_1: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 xorl %ebx, %ebx xorl %r14d, %r14d .p2align 4, 0x90 .LBB1_2: # Parent Loop BB1_1 Depth=1 # => This Inner Loop Header: Depth=2 callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $34, %rcx addl %edx, %ecx addl %ecx, %ecx leal (%rcx,%rcx,4), %ecx subl %ecx, %eax movl %eax, (%r13,%r14,4) callq rand cltq imulq $1717986919, %rax, %rcx # imm = 0x66666667 movq %rcx, %rdx shrq $63, %rdx sarq $34, %rcx addl %edx, %ecx addl %ecx, %ecx leal (%rcx,%rcx,4), %ecx subl %ecx, %eax movl %eax, (%rbp,%r14,4) movl $0, (%r15,%r14,4) incq %r14 addq $-4, %rbx cmpl $1000, %r14d # imm = 0x3E8 jne .LBB1_2 # %bb.3: # in Loop: Header=BB1_1 Depth=1 incl %r12d subq %rbx, %r13 subq %rbx, %rbp subq %rbx, %r15 cmpl $1000, %r12d # imm = 0x3E8 jne .LBB1_1 # %bb.4: movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %r12 xorl %eax, %eax movq 48(%rsp), %r14 # 8-byte Reload movq %r14, %rcx movq 40(%rsp), %r15 # 8-byte Reload .p2align 4, 0x90 .LBB1_5: # %.preheader16.i # =>This Loop Header: Depth=1 # Child Loop BB1_6 Depth 2 # Child Loop BB1_7 Depth 3 imulq $4000, %rax, %rdx # imm = 0xFA0 addq %r12, %rdx movq %r15, %rsi xorl %edi, %edi .p2align 4, 0x90 .LBB1_6: # %.preheader.i # Parent Loop BB1_5 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_7 Depth 3 leaq (%rdx,%rdi,4), %r8 movl (%rdx,%rdi,4), %r9d movq %rsi, %r10 xorl %r11d, %r11d .p2align 4, 0x90 .LBB1_7: # Parent Loop BB1_5 Depth=1 # Parent Loop BB1_6 Depth=2 # => This Inner Loop Header: Depth=3 movl (%r10), %ebx imull (%rcx,%r11,4), %ebx addl %ebx, %r9d incq %r11 addq $4000, %r10 # imm = 0xFA0 cmpq $1000, %r11 # imm = 0x3E8 jne .LBB1_7 # %bb.8: # in Loop: Header=BB1_6 Depth=2 movl %r9d, (%r8) incq %rdi addq $4, %rsi cmpq $1000, %rdi # imm = 0x3E8 jne .LBB1_6 # %bb.9: # in Loop: Header=BB1_5 Depth=1 incq %rax addq $4000, %rcx # imm = 0xFA0 cmpq $1000, %rax # imm = 0x3E8 jne .LBB1_5 # %bb.10: # %_Z15metric_mul_goldPA1000_iS0_S0_.exit leaq 16(%rsp), %rdi callq hipEventCreate leaq 8(%rsp), %rdi callq hipEventCreate movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq %r14, %rdi movq %r15, %rsi movq 32(%rsp), %rbx # 8-byte Reload movq %rbx, %rdx callq _Z10metric_mulPA1000_iS0_S0_ movq 8(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 8(%rsp), %rdi callq hipEventSynchronize movq 16(%rsp), %rsi movq 8(%rsp), %rdx leaq 28(%rsp), %rdi callq hipEventElapsedTime movss 28(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf movq 16(%rsp), %rdi callq hipEventDestroy movq 8(%rsp), %rdi callq hipEventDestroy movl $4000000, %edx # imm = 0x3D0900 movq %rbx, %rdi movq %r12, %rsi callq bcmp@PLT testl %eax, %eax movl $.Lstr, %eax movl $.Lstr.1, %edi cmoveq %rax, %rdi callq puts@PLT movq %r14, %rdi callq free movq %r15, %rdi callq free movq %rbx, %rdi callq free movq %r12, %rdi callq free xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "real %f ms\n " .size .L.str, 13 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "AC!" .size .Lstr, 4 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "Failed!" .size .Lstr.1, 8 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
__global__ void mtx_multiply(const double2 * A, const double2 * B, double2 * C, const int N, const int K, const int L ){ int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; if (col >= L || row >= K) return; C[row*L + col].x = 0.0; C[row*L + col].y = 0.0; for(int k = 0; k < N ; k++){ C[row*L + col].x += A[row*N+k].x*B[k*L+col].x - A[row*N+k].y*B[k*L+col].y; C[row*L + col].y += A[row*N+k].x*B[k*L+col].y + A[row*N+k].y*B[k*L+col].x; } }
code for sm_80 Function : _Z12mtx_multiplyPK7double2S1_PS_iii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */ /* 0x000e280000002500 */ /*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e280000002100 */ /*0030*/ S2R R6, SR_CTAID.Y ; /* 0x0000000000067919 */ /* 0x000e680000002600 */ /*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */ /* 0x000e620000002200 */ /*0050*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */ /* 0x001fca00078e0200 */ /*0060*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x180], PT ; /* 0x0000600007007a0c */ /* 0x000fe20003f06270 */ /*0070*/ IMAD R6, R6, c[0x0][0x4], R3 ; /* 0x0000010006067a24 */ /* 0x002fca00078e0203 */ /*0080*/ ISETP.GE.OR P0, PT, R6, c[0x0][0x17c], P0 ; /* 0x00005f0006007a0c */ /* 0x000fda0000706670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff057624 */ /* 0x000fe200078e00ff */ /*00b0*/ HFMA2.MMA R0, -RZ, RZ, 0, 9.5367431640625e-07 ; /* 0x00000010ff007435 */ /* 0x000fe200000001ff */ /*00c0*/ IMAD R19, R6, c[0x0][0x180], R7 ; /* 0x0000600006137a24 */ /* 0x000fe200078e0207 */ /*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*00e0*/ ISETP.GE.AND P0, PT, R5, 0x1, PT ; /* 0x000000010500780c */ /* 0x000fcc0003f06270 */ /*00f0*/ IMAD.WIDE R18, R19, R0, c[0x0][0x170] ; /* 0x00005c0013127625 */ /* 0x000fca00078e0200 */ /*0100*/ STG.E.128 [R18.64], RZ ; /* 0x000000ff12007986 */ /* 0x0001e4000c101d04 */ /*0110*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0120*/ IADD3 R2, R5.reuse, -0x1, RZ ; /* 0xffffffff05027810 */ /* 0x040fe20007ffe0ff */ /*0130*/ IMAD.MOV.U32 R3, RZ, RZ, RZ ; /* 0x000000ffff037224 */ /* 0x000fe200078e00ff */ /*0140*/ LOP3.LUT R5, R5, 0x3, RZ, 0xc0, !PT ; /* 0x0000000305057812 */ /* 0x000fe200078ec0ff */ /*0150*/ CS2R R22, SRZ ; /* 0x0000000000167805 */ /* 0x000fe2000001ff00 */ /*0160*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */ /* 0x000fe20003f26070 */ /*0170*/ CS2R R20, SRZ ; /* 0x0000000000147805 */ /* 0x000fe2000001ff00 */ /*0180*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fd60003f05270 */ /*0190*/ @!P1 BRA 0x5e0 ; /* 0x0000044000009947 */ /* 0x000fea0003800000 */ /*01a0*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */ /* 0x000fe200000001ff */ /*01b0*/ IADD3 R2, -R5, c[0x0][0x178], RZ ; /* 0x00005e0005027a10 */ /* 0x000fe20007ffe1ff */ /*01c0*/ IMAD R4, R6, c[0x0][0x178], RZ ; /* 0x00005e0006047a24 */ /* 0x000fe200078e02ff */ /*01d0*/ CS2R R22, SRZ ; /* 0x0000000000167805 */ /* 0x000fe2000001ff00 */ /*01e0*/ IMAD.WIDE R26, R7, R0, c[0x0][0x168] ; /* 0x00005a00071a7625 */ /* 0x000fe200078e0200 */ /*01f0*/ CS2R R20, SRZ ; /* 0x0000000000147805 */ /* 0x000fe2000001ff00 */ /*0200*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */ /* 0x000fe40000000a00 */ /*0210*/ MOV R17, UR7 ; /* 0x0000000700117c02 */ /* 0x000fe20008000f00 */ /*0220*/ IMAD.U32 R16, RZ, RZ, UR6 ; /* 0x00000006ff107e24 */ /* 0x000fe2000f8e00ff */ /*0230*/ LDG.E.128 R8, [R26.64] ; /* 0x000000041a087981 */ /* 0x000ea6000c1e1d00 */ /*0240*/ IMAD.WIDE R16, R4, 0x10, R16 ; /* 0x0000001004107825 */ /* 0x000fca00078e0210 */ /*0250*/ LDG.E.128 R12, [R16.64] ; /* 0x00000004100c7981 */ /* 0x000ea4000c1e1d00 */ /*0260*/ DMUL R24, R14, R10 ; /* 0x0000000a0e187228 */ /* 0x006e4c0000000000 */ /*0270*/ DFMA R8, R12, R8, -R24 ; /* 0x000000080c08722b */ /* 0x002e4c0000000818 */ /*0280*/ DADD R20, R8, R20 ; /* 0x0000000008147229 */ /* 0x002e4e0000000014 */ /*0290*/ STG.E.64 [R18.64], R20 ; /* 0x0000001412007986 */ /* 0x0023e8000c101b04 */ /*02a0*/ LDG.E.64 R8, [R26.64] ; /* 0x000000041a087981 */ /* 0x000ea8000c1e1b00 */ /*02b0*/ LDG.E.64 R12, [R16.64] ; /* 0x00000004100c7981 */ /* 0x000ee2000c1e1b00 */ /*02c0*/ IMAD.WIDE R24, R0, c[0x0][0x180], R26 ; /* 0x0000600000187a25 */ /* 0x000fe200078e021a */ /*02d0*/ DMUL R8, R14, R8 ; /* 0x000000080e087228 */ /* 0x004ecc0000000000 */ /*02e0*/ DFMA R8, R10, R12, R8 ; /* 0x0000000c0a08722b */ /* 0x008e8c0000000008 */ /*02f0*/ DADD R22, R8, R22 ; /* 0x0000000008167229 */ /* 0x004e8e0000000016 */ /*0300*/ STG.E.64 [R18.64+0x8], R22 ; /* 0x0000081612007986 */ /* 0x0045e8000c101b04 */ /*0310*/ LDG.E.128 R8, [R16.64+0x10] ; /* 0x0000100410087981 */ /* 0x000ee8000c1e1d00 */ /*0320*/ LDG.E.128 R12, [R24.64] ; /* 0x00000004180c7981 */ /* 0x000ee4000c1e1d00 */ /*0330*/ DMUL R28, R10, R14 ; /* 0x0000000e0a1c7228 */ /* 0x008ecc0000000000 */ /*0340*/ DFMA R8, R8, R12, -R28 ; /* 0x0000000c0808722b */ /* 0x008e4c000000081c */ /*0350*/ DADD R20, R20, R8 ; /* 0x0000000014147229 */ /* 0x002e4e0000000008 */ /*0360*/ STG.E.64 [R18.64], R20 ; /* 0x0000001412007986 */ /* 0x0023e8000c101b04 */ /*0370*/ LDG.E.64 R8, [R24.64] ; /* 0x0000000418087981 */ /* 0x000ee8000c1e1b00 */ /*0380*/ LDG.E.64 R12, [R16.64+0x10] ; /* 0x00001004100c7981 */ /* 0x000f22000c1e1b00 */ /*0390*/ IMAD.WIDE R26, R0, c[0x0][0x180], R24 ; /* 0x00006000001a7a25 */ /* 0x000fe200078e0218 */ /*03a0*/ DMUL R8, R10, R8 ; /* 0x000000080a087228 */ /* 0x008f0c0000000000 */ /*03b0*/ DFMA R8, R14, R12, R8 ; /* 0x0000000c0e08722b */ /* 0x010e8c0000000008 */ /*03c0*/ DADD R22, R22, R8 ; /* 0x0000000016167229 */ /* 0x004e8e0000000008 */ /*03d0*/ STG.E.64 [R18.64+0x8], R22 ; /* 0x0000081612007986 */ /* 0x0045e8000c101b04 */ /*03e0*/ LDG.E.128 R8, [R16.64+0x20] ; /* 0x0000200410087981 */ /* 0x000ee8000c1e1d00 */ /*03f0*/ LDG.E.128 R12, [R26.64] ; /* 0x000000041a0c7981 */ /* 0x000ee4000c1e1d00 */ /*0400*/ DMUL R28, R10, R14 ; /* 0x0000000e0a1c7228 */ /* 0x008ecc0000000000 */ /*0410*/ DFMA R8, R8, R12, -R28 ; /* 0x0000000c0808722b */ /* 0x008e4c000000081c */ /*0420*/ DADD R20, R20, R8 ; /* 0x0000000014147229 */ /* 0x002e4e0000000008 */ /*0430*/ STG.E.64 [R18.64], R20 ; /* 0x0000001412007986 */ /* 0x0023e8000c101b04 */ /*0440*/ LDG.E.64 R8, [R26.64] ; /* 0x000000041a087981 */ /* 0x000ee8000c1e1b00 */ /*0450*/ LDG.E.64 R12, [R16.64+0x20] ; /* 0x00002004100c7981 */ /* 0x000f22000c1e1b00 */ /*0460*/ IMAD.WIDE R24, R0, c[0x0][0x180], R26 ; /* 0x0000600000187a25 */ /* 0x000fe200078e021a */ /*0470*/ DMUL R8, R10, R8 ; /* 0x000000080a087228 */ /* 0x008f0c0000000000 */ /*0480*/ DFMA R8, R14, R12, R8 ; /* 0x0000000c0e08722b */ /* 0x010e8c0000000008 */ /*0490*/ DADD R22, R22, R8 ; /* 0x0000000016167229 */ /* 0x004e8e0000000008 */ /*04a0*/ STG.E.64 [R18.64+0x8], R22 ; /* 0x0000081612007986 */ /* 0x0045e8000c101b04 */ /*04b0*/ LDG.E.128 R8, [R16.64+0x30] ; /* 0x0000300410087981 */ /* 0x000ee8000c1e1d00 */ /*04c0*/ LDG.E.128 R12, [R24.64] ; /* 0x00000004180c7981 */ /* 0x000ee4000c1e1d00 */ /*04d0*/ DMUL R28, R10, R14 ; /* 0x0000000e0a1c7228 */ /* 0x008ecc0000000000 */ /*04e0*/ DFMA R8, R8, R12, -R28 ; /* 0x0000000c0808722b */ /* 0x008e4c000000081c */ /*04f0*/ DADD R20, R20, R8 ; /* 0x0000000014147229 */ /* 0x002e4e0000000008 */ /*0500*/ STG.E.64 [R18.64], R20 ; /* 0x0000001412007986 */ /* 0x0023e8000c101b04 */ /*0510*/ LDG.E.64 R8, [R24.64] ; /* 0x0000000418087981 */ /* 0x000ee8000c1e1b00 */ /*0520*/ LDG.E.64 R12, [R16.64+0x30] ; /* 0x00003004100c7981 */ /* 0x000f22000c1e1b00 */ /*0530*/ IADD3 R2, R2, -0x4, RZ ; /* 0xfffffffc02027810 */ /* 0x000fc80007ffe0ff */ /*0540*/ ISETP.NE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fe20003f25270 */ /*0550*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */ /* 0x000fe2000ff1e03f */ /*0560*/ IADD3 R3, R3, 0x4, RZ ; /* 0x0000000403037810 */ /* 0x000fc60007ffe0ff */ /*0570*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */ /* 0x000fe200087fe43f */ /*0580*/ IMAD.WIDE R26, R0, c[0x0][0x180], R24 ; /* 0x00006000001a7a25 */ /* 0x000fe200078e0218 */ /*0590*/ DMUL R8, R10, R8 ; /* 0x000000080a087228 */ /* 0x008f0c0000000000 */ /*05a0*/ DFMA R8, R14, R12, R8 ; /* 0x0000000c0e08722b */ /* 0x010e8c0000000008 */ /*05b0*/ DADD R22, R22, R8 ; /* 0x0000000016167229 */ /* 0x004e8e0000000008 */ /*05c0*/ STG.E.64 [R18.64+0x8], R22 ; /* 0x0000081612007986 */ /* 0x0043e2000c101b04 */ /*05d0*/ @P1 BRA 0x210 ; /* 0xfffffc3000001947 */ /* 0x000fea000383ffff */ /*05e0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*05f0*/ IMAD R9, R6, c[0x0][0x178], R3 ; /* 0x00005e0006097a24 */ /* 0x000fe400078e0203 */ /*0600*/ IMAD R3, R3, c[0x0][0x180], R7 ; /* 0x0000600003037a24 */ /* 0x000fe400078e0207 */ /*0610*/ IMAD.WIDE R6, R9, R0, c[0x0][0x160] ; /* 0x0000580009067625 */ /* 0x000fc800078e0200 */ /*0620*/ IMAD.WIDE R2, R3, R0, c[0x0][0x168] ; /* 0x00005a0003027625 */ /* 0x000fca00078e0200 */ /*0630*/ LDG.E.128 R8, [R2.64] ; /* 0x0000000402087981 */ /* 0x000ea8000c1e1d00 */ /*0640*/ LDG.E.128 R12, [R6.64] ; /* 0x00000004060c7981 */ /* 0x000ea4000c1e1d00 */ /*0650*/ DMUL R16, R14, R10 ; /* 0x0000000a0e107228 */ /* 0x004e8c0000000000 */ /*0660*/ DFMA R8, R12, R8, -R16 ; /* 0x000000080c08722b */ /* 0x004e8c0000000810 */ /*0670*/ DADD R20, R8, R20 ; /* 0x0000000008147229 */ /* 0x006e4e0000000014 */ /*0680*/ STG.E.64 [R18.64], R20 ; /* 0x0000001412007986 */ /* 0x0023e8000c101b04 */ /*0690*/ LDG.E.64 R12, [R2.64] ; /* 0x00000004020c7981 */ /* 0x0004e8000c1e1b00 */ /*06a0*/ LDG.E.64 R8, [R6.64] ; /* 0x0000000406087981 */ /* 0x000962000c1e1b00 */ /*06b0*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fc80007ffe0ff */ /*06c0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fe20003f05270 */ /*06d0*/ IMAD.WIDE R2, R0, c[0x0][0x180], R2 ; /* 0x0000600000027a25 */ /* 0x004fe200078e0202 */ /*06e0*/ IADD3 R6, P1, R6, 0x10, RZ ; /* 0x0000001006067810 */ /* 0x010fc80007f3e0ff */ /*06f0*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */ /* 0x000fe20000ffe4ff */ /*0700*/ DMUL R12, R14, R12 ; /* 0x0000000c0e0c7228 */ /* 0x008f4c0000000000 */ /*0710*/ DFMA R8, R10, R8, R12 ; /* 0x000000080a08722b */ /* 0x020e8c000000000c */ /*0720*/ DADD R22, R8, R22 ; /* 0x0000000008167229 */ /* 0x004e8e0000000016 */ /*0730*/ STG.E.64 [R18.64+0x8], R22 ; /* 0x0000081612007986 */ /* 0x0043e2000c101b04 */ /*0740*/ @P0 BRA 0x630 ; /* 0xfffffee000000947 */ /* 0x000fea000383ffff */ /*0750*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0760*/ BRA 0x760; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0770*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0780*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0790*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
__global__ void mtx_multiply(const double2 * A, const double2 * B, double2 * C, const int N, const int K, const int L ){ int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; if (col >= L || row >= K) return; C[row*L + col].x = 0.0; C[row*L + col].y = 0.0; for(int k = 0; k < N ; k++){ C[row*L + col].x += A[row*N+k].x*B[k*L+col].x - A[row*N+k].y*B[k*L+col].y; C[row*L + col].y += A[row*N+k].x*B[k*L+col].y + A[row*N+k].y*B[k*L+col].x; } }
.file "tmpxft_000afaa6_00000000-6_mtxmultiply.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z49__device_stub__Z12mtx_multiplyPK7double2S1_PS_iiiPK7double2S1_PS_iii .type _Z49__device_stub__Z12mtx_multiplyPK7double2S1_PS_iiiPK7double2S1_PS_iii, @function _Z49__device_stub__Z12mtx_multiplyPK7double2S1_PS_iiiPK7double2S1_PS_iii: .LFB2051: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 20(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z12mtx_multiplyPK7double2S1_PS_iii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z49__device_stub__Z12mtx_multiplyPK7double2S1_PS_iiiPK7double2S1_PS_iii, .-_Z49__device_stub__Z12mtx_multiplyPK7double2S1_PS_iiiPK7double2S1_PS_iii .globl _Z12mtx_multiplyPK7double2S1_PS_iii .type _Z12mtx_multiplyPK7double2S1_PS_iii, @function _Z12mtx_multiplyPK7double2S1_PS_iii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z49__device_stub__Z12mtx_multiplyPK7double2S1_PS_iiiPK7double2S1_PS_iii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z12mtx_multiplyPK7double2S1_PS_iii, .-_Z12mtx_multiplyPK7double2S1_PS_iii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z12mtx_multiplyPK7double2S1_PS_iii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z12mtx_multiplyPK7double2S1_PS_iii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
__global__ void mtx_multiply(const double2 * A, const double2 * B, double2 * C, const int N, const int K, const int L ){ int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; if (col >= L || row >= K) return; C[row*L + col].x = 0.0; C[row*L + col].y = 0.0; for(int k = 0; k < N ; k++){ C[row*L + col].x += A[row*N+k].x*B[k*L+col].x - A[row*N+k].y*B[k*L+col].y; C[row*L + col].y += A[row*N+k].x*B[k*L+col].y + A[row*N+k].y*B[k*L+col].x; } }
#include <hip/hip_runtime.h> __global__ void mtx_multiply(const double2 * A, const double2 * B, double2 * C, const int N, const int K, const int L ){ int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; if (col >= L || row >= K) return; C[row*L + col].x = 0.0; C[row*L + col].y = 0.0; for(int k = 0; k < N ; k++){ C[row*L + col].x += A[row*N+k].x*B[k*L+col].x - A[row*N+k].y*B[k*L+col].y; C[row*L + col].y += A[row*N+k].x*B[k*L+col].y + A[row*N+k].y*B[k*L+col].x; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> __global__ void mtx_multiply(const double2 * A, const double2 * B, double2 * C, const int N, const int K, const int L ){ int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; if (col >= L || row >= K) return; C[row*L + col].x = 0.0; C[row*L + col].y = 0.0; for(int k = 0; k < N ; k++){ C[row*L + col].x += A[row*N+k].x*B[k*L+col].x - A[row*N+k].y*B[k*L+col].y; C[row*L + col].y += A[row*N+k].x*B[k*L+col].y + A[row*N+k].y*B[k*L+col].x; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .globl _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .p2align 8 .type _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii,@function _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b64 s[4:5], s[0:1], 0x1c v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 v_mad_u64_u32 v[4:5], null, s14, s3, v[1:2] v_mad_u64_u32 v[5:6], null, s15, s2, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, s5, v4 v_cmp_gt_i32_e64 s2, s4, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_4 s_clause 0x1 s_load_b64 s[2:3], s[0:1], 0x10 s_load_b32 s4, s[0:1], 0x18 v_mad_u64_u32 v[1:2], null, v5, s5, v[4:5] v_mov_b32_e32 v0, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_mov_b32_e32 v3, v0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) v_lshlrev_b64 v[6:7], 4, v[1:2] v_mov_b32_e32 v1, v0 v_mov_b32_e32 v2, v0 s_waitcnt lgkmcnt(0) v_add_co_u32 v6, vcc_lo, s2, v6 s_delay_alu instid0(VALU_DEP_4) v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo s_cmp_lt_i32 s4, 1 global_store_b128 v[6:7], v[0:3], off s_cbranch_scc1 .LBB0_4 global_load_b128 v[0:3], v[6:7], off s_load_b128 s[0:3], s[0:1], 0x0 v_mul_lo_u32 v8, v5, s4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v9, 31, v8 v_lshlrev_b64 v[8:9], 4, v[8:9] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v5, vcc_lo, v8, s0 v_add_co_ci_u32_e32 v9, vcc_lo, s1, v9, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v8, vcc_lo, v5, 8 v_add_co_ci_u32_e32 v9, vcc_lo, 0, v9, vcc_lo s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB0_3: v_ashrrev_i32_e32 v5, 31, v4 s_add_i32 s4, s4, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) s_cmp_lg_u32 s4, 0 v_lshlrev_b64 v[10:11], 4, v[4:5] v_add_nc_u32_e32 v4, s5, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v10, vcc_lo, s2, v10 v_add_co_ci_u32_e32 v11, vcc_lo, s3, v11, vcc_lo s_clause 0x1 global_load_b64 v[12:13], v[8:9], off offset:-8 global_load_b64 v[14:15], v[8:9], off s_clause 0x1 global_load_b64 v[16:17], v[10:11], off offset:8 global_load_b64 v[18:19], v[10:11], off s_waitcnt vmcnt(1) v_mul_f64 v[14:15], v[14:15], v[16:17] s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[12:13], v[12:13], v[18:19], -v[14:15] v_add_f64 v[0:1], v[0:1], v[12:13] global_store_b64 v[6:7], v[0:1], off global_load_b64 v[12:13], v[8:9], off global_load_b64 v[14:15], v[10:11], off global_load_b64 v[16:17], v[8:9], off offset:-8 global_load_b64 v[10:11], v[10:11], off offset:8 v_add_co_u32 v8, vcc_lo, v8, 16 v_add_co_ci_u32_e32 v9, vcc_lo, 0, v9, vcc_lo s_waitcnt vmcnt(2) v_mul_f64 v[12:13], v[12:13], v[14:15] s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[10:11], v[16:17], v[10:11], v[12:13] v_add_f64 v[2:3], v[2:3], v[10:11] global_store_b64 v[6:7], v[2:3], off offset:8 s_cbranch_scc1 .LBB0_3 .LBB0_4: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 20 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii, .Lfunc_end0-_Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 20 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> __global__ void mtx_multiply(const double2 * A, const double2 * B, double2 * C, const int N, const int K, const int L ){ int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; if (col >= L || row >= K) return; C[row*L + col].x = 0.0; C[row*L + col].y = 0.0; for(int k = 0; k < N ; k++){ C[row*L + col].x += A[row*N+k].x*B[k*L+col].x - A[row*N+k].y*B[k*L+col].y; C[row*L + col].y += A[row*N+k].x*B[k*L+col].y + A[row*N+k].y*B[k*L+col].x; } }
.text .file "mtxmultiply.hip" .globl _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii # -- Begin function _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .p2align 4, 0x90 .type _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii,@function _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii: # @_Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 20(%rsp), %rax movq %rax, 120(%rsp) leaq 16(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii, .Lfunc_end0-_Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii,@object # @_Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .section .rodata,"a",@progbits .globl _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .p2align 3, 0x0 _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii: .quad _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .size _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii" .size .L__unnamed_1, 53 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z12mtx_multiplyPK7double2S1_PS_iii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */ /* 0x000e280000002500 */ /*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e280000002100 */ /*0030*/ S2R R6, SR_CTAID.Y ; /* 0x0000000000067919 */ /* 0x000e680000002600 */ /*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */ /* 0x000e620000002200 */ /*0050*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */ /* 0x001fca00078e0200 */ /*0060*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x180], PT ; /* 0x0000600007007a0c */ /* 0x000fe20003f06270 */ /*0070*/ IMAD R6, R6, c[0x0][0x4], R3 ; /* 0x0000010006067a24 */ /* 0x002fca00078e0203 */ /*0080*/ ISETP.GE.OR P0, PT, R6, c[0x0][0x17c], P0 ; /* 0x00005f0006007a0c */ /* 0x000fda0000706670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff057624 */ /* 0x000fe200078e00ff */ /*00b0*/ HFMA2.MMA R0, -RZ, RZ, 0, 9.5367431640625e-07 ; /* 0x00000010ff007435 */ /* 0x000fe200000001ff */ /*00c0*/ IMAD R19, R6, c[0x0][0x180], R7 ; /* 0x0000600006137a24 */ /* 0x000fe200078e0207 */ /*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*00e0*/ ISETP.GE.AND P0, PT, R5, 0x1, PT ; /* 0x000000010500780c */ /* 0x000fcc0003f06270 */ /*00f0*/ IMAD.WIDE R18, R19, R0, c[0x0][0x170] ; /* 0x00005c0013127625 */ /* 0x000fca00078e0200 */ /*0100*/ STG.E.128 [R18.64], RZ ; /* 0x000000ff12007986 */ /* 0x0001e4000c101d04 */ /*0110*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0120*/ IADD3 R2, R5.reuse, -0x1, RZ ; /* 0xffffffff05027810 */ /* 0x040fe20007ffe0ff */ /*0130*/ IMAD.MOV.U32 R3, RZ, RZ, RZ ; /* 0x000000ffff037224 */ /* 0x000fe200078e00ff */ /*0140*/ LOP3.LUT R5, R5, 0x3, RZ, 0xc0, !PT ; /* 0x0000000305057812 */ /* 0x000fe200078ec0ff */ /*0150*/ CS2R R22, SRZ ; /* 0x0000000000167805 */ /* 0x000fe2000001ff00 */ /*0160*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */ /* 0x000fe20003f26070 */ /*0170*/ CS2R R20, SRZ ; /* 0x0000000000147805 */ /* 0x000fe2000001ff00 */ /*0180*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fd60003f05270 */ /*0190*/ @!P1 BRA 0x5e0 ; /* 0x0000044000009947 */ /* 0x000fea0003800000 */ /*01a0*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */ /* 0x000fe200000001ff */ /*01b0*/ IADD3 R2, -R5, c[0x0][0x178], RZ ; /* 0x00005e0005027a10 */ /* 0x000fe20007ffe1ff */ /*01c0*/ IMAD R4, R6, c[0x0][0x178], RZ ; /* 0x00005e0006047a24 */ /* 0x000fe200078e02ff */ /*01d0*/ CS2R R22, SRZ ; /* 0x0000000000167805 */ /* 0x000fe2000001ff00 */ /*01e0*/ IMAD.WIDE R26, R7, R0, c[0x0][0x168] ; /* 0x00005a00071a7625 */ /* 0x000fe200078e0200 */ /*01f0*/ CS2R R20, SRZ ; /* 0x0000000000147805 */ /* 0x000fe2000001ff00 */ /*0200*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */ /* 0x000fe40000000a00 */ /*0210*/ MOV R17, UR7 ; /* 0x0000000700117c02 */ /* 0x000fe20008000f00 */ /*0220*/ IMAD.U32 R16, RZ, RZ, UR6 ; /* 0x00000006ff107e24 */ /* 0x000fe2000f8e00ff */ /*0230*/ LDG.E.128 R8, [R26.64] ; /* 0x000000041a087981 */ /* 0x000ea6000c1e1d00 */ /*0240*/ IMAD.WIDE R16, R4, 0x10, R16 ; /* 0x0000001004107825 */ /* 0x000fca00078e0210 */ /*0250*/ LDG.E.128 R12, [R16.64] ; /* 0x00000004100c7981 */ /* 0x000ea4000c1e1d00 */ /*0260*/ DMUL R24, R14, R10 ; /* 0x0000000a0e187228 */ /* 0x006e4c0000000000 */ /*0270*/ DFMA R8, R12, R8, -R24 ; /* 0x000000080c08722b */ /* 0x002e4c0000000818 */ /*0280*/ DADD R20, R8, R20 ; /* 0x0000000008147229 */ /* 0x002e4e0000000014 */ /*0290*/ STG.E.64 [R18.64], R20 ; /* 0x0000001412007986 */ /* 0x0023e8000c101b04 */ /*02a0*/ LDG.E.64 R8, [R26.64] ; /* 0x000000041a087981 */ /* 0x000ea8000c1e1b00 */ /*02b0*/ LDG.E.64 R12, [R16.64] ; /* 0x00000004100c7981 */ /* 0x000ee2000c1e1b00 */ /*02c0*/ IMAD.WIDE R24, R0, c[0x0][0x180], R26 ; /* 0x0000600000187a25 */ /* 0x000fe200078e021a */ /*02d0*/ DMUL R8, R14, R8 ; /* 0x000000080e087228 */ /* 0x004ecc0000000000 */ /*02e0*/ DFMA R8, R10, R12, R8 ; /* 0x0000000c0a08722b */ /* 0x008e8c0000000008 */ /*02f0*/ DADD R22, R8, R22 ; /* 0x0000000008167229 */ /* 0x004e8e0000000016 */ /*0300*/ STG.E.64 [R18.64+0x8], R22 ; /* 0x0000081612007986 */ /* 0x0045e8000c101b04 */ /*0310*/ LDG.E.128 R8, [R16.64+0x10] ; /* 0x0000100410087981 */ /* 0x000ee8000c1e1d00 */ /*0320*/ LDG.E.128 R12, [R24.64] ; /* 0x00000004180c7981 */ /* 0x000ee4000c1e1d00 */ /*0330*/ DMUL R28, R10, R14 ; /* 0x0000000e0a1c7228 */ /* 0x008ecc0000000000 */ /*0340*/ DFMA R8, R8, R12, -R28 ; /* 0x0000000c0808722b */ /* 0x008e4c000000081c */ /*0350*/ DADD R20, R20, R8 ; /* 0x0000000014147229 */ /* 0x002e4e0000000008 */ /*0360*/ STG.E.64 [R18.64], R20 ; /* 0x0000001412007986 */ /* 0x0023e8000c101b04 */ /*0370*/ LDG.E.64 R8, [R24.64] ; /* 0x0000000418087981 */ /* 0x000ee8000c1e1b00 */ /*0380*/ LDG.E.64 R12, [R16.64+0x10] ; /* 0x00001004100c7981 */ /* 0x000f22000c1e1b00 */ /*0390*/ IMAD.WIDE R26, R0, c[0x0][0x180], R24 ; /* 0x00006000001a7a25 */ /* 0x000fe200078e0218 */ /*03a0*/ DMUL R8, R10, R8 ; /* 0x000000080a087228 */ /* 0x008f0c0000000000 */ /*03b0*/ DFMA R8, R14, R12, R8 ; /* 0x0000000c0e08722b */ /* 0x010e8c0000000008 */ /*03c0*/ DADD R22, R22, R8 ; /* 0x0000000016167229 */ /* 0x004e8e0000000008 */ /*03d0*/ STG.E.64 [R18.64+0x8], R22 ; /* 0x0000081612007986 */ /* 0x0045e8000c101b04 */ /*03e0*/ LDG.E.128 R8, [R16.64+0x20] ; /* 0x0000200410087981 */ /* 0x000ee8000c1e1d00 */ /*03f0*/ LDG.E.128 R12, [R26.64] ; /* 0x000000041a0c7981 */ /* 0x000ee4000c1e1d00 */ /*0400*/ DMUL R28, R10, R14 ; /* 0x0000000e0a1c7228 */ /* 0x008ecc0000000000 */ /*0410*/ DFMA R8, R8, R12, -R28 ; /* 0x0000000c0808722b */ /* 0x008e4c000000081c */ /*0420*/ DADD R20, R20, R8 ; /* 0x0000000014147229 */ /* 0x002e4e0000000008 */ /*0430*/ STG.E.64 [R18.64], R20 ; /* 0x0000001412007986 */ /* 0x0023e8000c101b04 */ /*0440*/ LDG.E.64 R8, [R26.64] ; /* 0x000000041a087981 */ /* 0x000ee8000c1e1b00 */ /*0450*/ LDG.E.64 R12, [R16.64+0x20] ; /* 0x00002004100c7981 */ /* 0x000f22000c1e1b00 */ /*0460*/ IMAD.WIDE R24, R0, c[0x0][0x180], R26 ; /* 0x0000600000187a25 */ /* 0x000fe200078e021a */ /*0470*/ DMUL R8, R10, R8 ; /* 0x000000080a087228 */ /* 0x008f0c0000000000 */ /*0480*/ DFMA R8, R14, R12, R8 ; /* 0x0000000c0e08722b */ /* 0x010e8c0000000008 */ /*0490*/ DADD R22, R22, R8 ; /* 0x0000000016167229 */ /* 0x004e8e0000000008 */ /*04a0*/ STG.E.64 [R18.64+0x8], R22 ; /* 0x0000081612007986 */ /* 0x0045e8000c101b04 */ /*04b0*/ LDG.E.128 R8, [R16.64+0x30] ; /* 0x0000300410087981 */ /* 0x000ee8000c1e1d00 */ /*04c0*/ LDG.E.128 R12, [R24.64] ; /* 0x00000004180c7981 */ /* 0x000ee4000c1e1d00 */ /*04d0*/ DMUL R28, R10, R14 ; /* 0x0000000e0a1c7228 */ /* 0x008ecc0000000000 */ /*04e0*/ DFMA R8, R8, R12, -R28 ; /* 0x0000000c0808722b */ /* 0x008e4c000000081c */ /*04f0*/ DADD R20, R20, R8 ; /* 0x0000000014147229 */ /* 0x002e4e0000000008 */ /*0500*/ STG.E.64 [R18.64], R20 ; /* 0x0000001412007986 */ /* 0x0023e8000c101b04 */ /*0510*/ LDG.E.64 R8, [R24.64] ; /* 0x0000000418087981 */ /* 0x000ee8000c1e1b00 */ /*0520*/ LDG.E.64 R12, [R16.64+0x30] ; /* 0x00003004100c7981 */ /* 0x000f22000c1e1b00 */ /*0530*/ IADD3 R2, R2, -0x4, RZ ; /* 0xfffffffc02027810 */ /* 0x000fc80007ffe0ff */ /*0540*/ ISETP.NE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x000fe20003f25270 */ /*0550*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */ /* 0x000fe2000ff1e03f */ /*0560*/ IADD3 R3, R3, 0x4, RZ ; /* 0x0000000403037810 */ /* 0x000fc60007ffe0ff */ /*0570*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */ /* 0x000fe200087fe43f */ /*0580*/ IMAD.WIDE R26, R0, c[0x0][0x180], R24 ; /* 0x00006000001a7a25 */ /* 0x000fe200078e0218 */ /*0590*/ DMUL R8, R10, R8 ; /* 0x000000080a087228 */ /* 0x008f0c0000000000 */ /*05a0*/ DFMA R8, R14, R12, R8 ; /* 0x0000000c0e08722b */ /* 0x010e8c0000000008 */ /*05b0*/ DADD R22, R22, R8 ; /* 0x0000000016167229 */ /* 0x004e8e0000000008 */ /*05c0*/ STG.E.64 [R18.64+0x8], R22 ; /* 0x0000081612007986 */ /* 0x0043e2000c101b04 */ /*05d0*/ @P1 BRA 0x210 ; /* 0xfffffc3000001947 */ /* 0x000fea000383ffff */ /*05e0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*05f0*/ IMAD R9, R6, c[0x0][0x178], R3 ; /* 0x00005e0006097a24 */ /* 0x000fe400078e0203 */ /*0600*/ IMAD R3, R3, c[0x0][0x180], R7 ; /* 0x0000600003037a24 */ /* 0x000fe400078e0207 */ /*0610*/ IMAD.WIDE R6, R9, R0, c[0x0][0x160] ; /* 0x0000580009067625 */ /* 0x000fc800078e0200 */ /*0620*/ IMAD.WIDE R2, R3, R0, c[0x0][0x168] ; /* 0x00005a0003027625 */ /* 0x000fca00078e0200 */ /*0630*/ LDG.E.128 R8, [R2.64] ; /* 0x0000000402087981 */ /* 0x000ea8000c1e1d00 */ /*0640*/ LDG.E.128 R12, [R6.64] ; /* 0x00000004060c7981 */ /* 0x000ea4000c1e1d00 */ /*0650*/ DMUL R16, R14, R10 ; /* 0x0000000a0e107228 */ /* 0x004e8c0000000000 */ /*0660*/ DFMA R8, R12, R8, -R16 ; /* 0x000000080c08722b */ /* 0x004e8c0000000810 */ /*0670*/ DADD R20, R8, R20 ; /* 0x0000000008147229 */ /* 0x006e4e0000000014 */ /*0680*/ STG.E.64 [R18.64], R20 ; /* 0x0000001412007986 */ /* 0x0023e8000c101b04 */ /*0690*/ LDG.E.64 R12, [R2.64] ; /* 0x00000004020c7981 */ /* 0x0004e8000c1e1b00 */ /*06a0*/ LDG.E.64 R8, [R6.64] ; /* 0x0000000406087981 */ /* 0x000962000c1e1b00 */ /*06b0*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fc80007ffe0ff */ /*06c0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fe20003f05270 */ /*06d0*/ IMAD.WIDE R2, R0, c[0x0][0x180], R2 ; /* 0x0000600000027a25 */ /* 0x004fe200078e0202 */ /*06e0*/ IADD3 R6, P1, R6, 0x10, RZ ; /* 0x0000001006067810 */ /* 0x010fc80007f3e0ff */ /*06f0*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */ /* 0x000fe20000ffe4ff */ /*0700*/ DMUL R12, R14, R12 ; /* 0x0000000c0e0c7228 */ /* 0x008f4c0000000000 */ /*0710*/ DFMA R8, R10, R8, R12 ; /* 0x000000080a08722b */ /* 0x020e8c000000000c */ /*0720*/ DADD R22, R8, R22 ; /* 0x0000000008167229 */ /* 0x004e8e0000000016 */ /*0730*/ STG.E.64 [R18.64+0x8], R22 ; /* 0x0000081612007986 */ /* 0x0043e2000c101b04 */ /*0740*/ @P0 BRA 0x630 ; /* 0xfffffee000000947 */ /* 0x000fea000383ffff */ /*0750*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0760*/ BRA 0x760; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0770*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0780*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0790*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .globl _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .p2align 8 .type _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii,@function _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b64 s[4:5], s[0:1], 0x1c v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 v_mad_u64_u32 v[4:5], null, s14, s3, v[1:2] v_mad_u64_u32 v[5:6], null, s15, s2, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, s5, v4 v_cmp_gt_i32_e64 s2, s4, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_4 s_clause 0x1 s_load_b64 s[2:3], s[0:1], 0x10 s_load_b32 s4, s[0:1], 0x18 v_mad_u64_u32 v[1:2], null, v5, s5, v[4:5] v_mov_b32_e32 v0, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_mov_b32_e32 v3, v0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) v_lshlrev_b64 v[6:7], 4, v[1:2] v_mov_b32_e32 v1, v0 v_mov_b32_e32 v2, v0 s_waitcnt lgkmcnt(0) v_add_co_u32 v6, vcc_lo, s2, v6 s_delay_alu instid0(VALU_DEP_4) v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo s_cmp_lt_i32 s4, 1 global_store_b128 v[6:7], v[0:3], off s_cbranch_scc1 .LBB0_4 global_load_b128 v[0:3], v[6:7], off s_load_b128 s[0:3], s[0:1], 0x0 v_mul_lo_u32 v8, v5, s4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v9, 31, v8 v_lshlrev_b64 v[8:9], 4, v[8:9] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v5, vcc_lo, v8, s0 v_add_co_ci_u32_e32 v9, vcc_lo, s1, v9, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v8, vcc_lo, v5, 8 v_add_co_ci_u32_e32 v9, vcc_lo, 0, v9, vcc_lo s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB0_3: v_ashrrev_i32_e32 v5, 31, v4 s_add_i32 s4, s4, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) s_cmp_lg_u32 s4, 0 v_lshlrev_b64 v[10:11], 4, v[4:5] v_add_nc_u32_e32 v4, s5, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v10, vcc_lo, s2, v10 v_add_co_ci_u32_e32 v11, vcc_lo, s3, v11, vcc_lo s_clause 0x1 global_load_b64 v[12:13], v[8:9], off offset:-8 global_load_b64 v[14:15], v[8:9], off s_clause 0x1 global_load_b64 v[16:17], v[10:11], off offset:8 global_load_b64 v[18:19], v[10:11], off s_waitcnt vmcnt(1) v_mul_f64 v[14:15], v[14:15], v[16:17] s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[12:13], v[12:13], v[18:19], -v[14:15] v_add_f64 v[0:1], v[0:1], v[12:13] global_store_b64 v[6:7], v[0:1], off global_load_b64 v[12:13], v[8:9], off global_load_b64 v[14:15], v[10:11], off global_load_b64 v[16:17], v[8:9], off offset:-8 global_load_b64 v[10:11], v[10:11], off offset:8 v_add_co_u32 v8, vcc_lo, v8, 16 v_add_co_ci_u32_e32 v9, vcc_lo, 0, v9, vcc_lo s_waitcnt vmcnt(2) v_mul_f64 v[12:13], v[12:13], v[14:15] s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[10:11], v[16:17], v[10:11], v[12:13] v_add_f64 v[2:3], v[2:3], v[10:11] global_store_b64 v[6:7], v[2:3], off offset:8 s_cbranch_scc1 .LBB0_3 .LBB0_4: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 20 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii, .Lfunc_end0-_Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 20 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000afaa6_00000000-6_mtxmultiply.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z49__device_stub__Z12mtx_multiplyPK7double2S1_PS_iiiPK7double2S1_PS_iii .type _Z49__device_stub__Z12mtx_multiplyPK7double2S1_PS_iiiPK7double2S1_PS_iii, @function _Z49__device_stub__Z12mtx_multiplyPK7double2S1_PS_iiiPK7double2S1_PS_iii: .LFB2051: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 20(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z12mtx_multiplyPK7double2S1_PS_iii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z49__device_stub__Z12mtx_multiplyPK7double2S1_PS_iiiPK7double2S1_PS_iii, .-_Z49__device_stub__Z12mtx_multiplyPK7double2S1_PS_iiiPK7double2S1_PS_iii .globl _Z12mtx_multiplyPK7double2S1_PS_iii .type _Z12mtx_multiplyPK7double2S1_PS_iii, @function _Z12mtx_multiplyPK7double2S1_PS_iii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z49__device_stub__Z12mtx_multiplyPK7double2S1_PS_iiiPK7double2S1_PS_iii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z12mtx_multiplyPK7double2S1_PS_iii, .-_Z12mtx_multiplyPK7double2S1_PS_iii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z12mtx_multiplyPK7double2S1_PS_iii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z12mtx_multiplyPK7double2S1_PS_iii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "mtxmultiply.hip" .globl _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii # -- Begin function _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .p2align 4, 0x90 .type _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii,@function _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii: # @_Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 20(%rsp), %rax movq %rax, 120(%rsp) leaq 16(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii, .Lfunc_end0-_Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii,@object # @_Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .section .rodata,"a",@progbits .globl _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .p2align 3, 0x0 _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii: .quad _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .size _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii" .size .L__unnamed_1, 53 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12mtx_multiplyPK15HIP_vector_typeIdLj2EES2_PS0_iii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void init_cs(int *d_cl, int *d_cs, int c_size, int chunk) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= c_size) { return; } if (i == 0) { d_cs[i] = 0; } else { d_cs[i] = d_cl[i - 1] * chunk; } }
code for sm_80 Function : _Z7init_csPiS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fe20003f05270 */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd80000000a00 */ /*0080*/ @!P0 BRA 0x110 ; /* 0x0000008000008947 */ /* 0x000fea0003800000 */ /*0090*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*00a0*/ IADD3 R2, R4, -0x1, RZ ; /* 0xffffffff04027810 */ /* 0x000fd20007ffe0ff */ /*00b0*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fcc00078e0205 */ /*00c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*00d0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */ /* 0x000fc800078e0205 */ /*00e0*/ IMAD R7, R2, c[0x0][0x174], RZ ; /* 0x00005d0002077a24 */ /* 0x004fca00078e02ff */ /*00f0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x000fe2000c101904 */ /*0100*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0110*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */ /* 0x000fe40000000f00 */ /*0120*/ MOV R3, c[0x0][0x16c] ; /* 0x00005b0000037a02 */ /* 0x000fca0000000f00 */ /*0130*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */ /* 0x000fe2000c101904 */ /*0140*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0150*/ BRA 0x150; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void init_cs(int *d_cl, int *d_cs, int c_size, int chunk) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= c_size) { return; } if (i == 0) { d_cs[i] = 0; } else { d_cs[i] = d_cl[i - 1] * chunk; } }
.file "tmpxft_001a8ca4_00000000-6_init_cs.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z30__device_stub__Z7init_csPiS_iiPiS_ii .type _Z30__device_stub__Z7init_csPiS_iiPiS_ii, @function _Z30__device_stub__Z7init_csPiS_iiPiS_ii: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z7init_csPiS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z30__device_stub__Z7init_csPiS_iiPiS_ii, .-_Z30__device_stub__Z7init_csPiS_iiPiS_ii .globl _Z7init_csPiS_ii .type _Z7init_csPiS_ii, @function _Z7init_csPiS_ii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z30__device_stub__Z7init_csPiS_iiPiS_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z7init_csPiS_ii, .-_Z7init_csPiS_ii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z7init_csPiS_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z7init_csPiS_ii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void init_cs(int *d_cl, int *d_cs, int c_size, int chunk) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= c_size) { return; } if (i == 0) { d_cs[i] = 0; } else { d_cs[i] = d_cl[i - 1] * chunk; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void init_cs(int *d_cl, int *d_cs, int c_size, int chunk) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= c_size) { return; } if (i == 0) { d_cs[i] = 0; } else { d_cs[i] = d_cl[i - 1] * chunk; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void init_cs(int *d_cl, int *d_cs, int c_size, int chunk) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= c_size) { return; } if (i == 0) { d_cs[i] = 0; } else { d_cs[i] = d_cl[i - 1] * chunk; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z7init_csPiS_ii .globl _Z7init_csPiS_ii .p2align 8 .type _Z7init_csPiS_ii,@function _Z7init_csPiS_ii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b32 s3, s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_5 s_load_b64 s[2:3], s[0:1], 0x8 s_mov_b32 s4, exec_lo v_cmpx_ne_u32_e32 0, v1 s_xor_b32 s4, exec_lo, s4 s_cbranch_execz .LBB0_3 s_load_b64 s[6:7], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_load_b32 s0, s[0:1], 0x14 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s6, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo global_load_b32 v2, v[2:3], off offset:-4 s_waitcnt vmcnt(0) v_mul_lo_u32 v2, v2, s0 global_store_b32 v[0:1], v2, off .LBB0_3: s_and_not1_saveexec_b32 s0, s4 s_cbranch_execz .LBB0_5 v_mov_b32_e32 v0, 0 s_waitcnt lgkmcnt(0) global_store_b32 v0, v0, s[2:3] .LBB0_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z7init_csPiS_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z7init_csPiS_ii, .Lfunc_end0-_Z7init_csPiS_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z7init_csPiS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z7init_csPiS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void init_cs(int *d_cl, int *d_cs, int c_size, int chunk) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= c_size) { return; } if (i == 0) { d_cs[i] = 0; } else { d_cs[i] = d_cl[i - 1] * chunk; } }
.text .file "init_cs.hip" .globl _Z22__device_stub__init_csPiS_ii # -- Begin function _Z22__device_stub__init_csPiS_ii .p2align 4, 0x90 .type _Z22__device_stub__init_csPiS_ii,@function _Z22__device_stub__init_csPiS_ii: # @_Z22__device_stub__init_csPiS_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z7init_csPiS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z22__device_stub__init_csPiS_ii, .Lfunc_end0-_Z22__device_stub__init_csPiS_ii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7init_csPiS_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z7init_csPiS_ii,@object # @_Z7init_csPiS_ii .section .rodata,"a",@progbits .globl _Z7init_csPiS_ii .p2align 3, 0x0 _Z7init_csPiS_ii: .quad _Z22__device_stub__init_csPiS_ii .size _Z7init_csPiS_ii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z7init_csPiS_ii" .size .L__unnamed_1, 17 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__init_csPiS_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z7init_csPiS_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z7init_csPiS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fe20003f05270 */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd80000000a00 */ /*0080*/ @!P0 BRA 0x110 ; /* 0x0000008000008947 */ /* 0x000fea0003800000 */ /*0090*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*00a0*/ IADD3 R2, R4, -0x1, RZ ; /* 0xffffffff04027810 */ /* 0x000fd20007ffe0ff */ /*00b0*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fcc00078e0205 */ /*00c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*00d0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */ /* 0x000fc800078e0205 */ /*00e0*/ IMAD R7, R2, c[0x0][0x174], RZ ; /* 0x00005d0002077a24 */ /* 0x004fca00078e02ff */ /*00f0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x000fe2000c101904 */ /*0100*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0110*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */ /* 0x000fe40000000f00 */ /*0120*/ MOV R3, c[0x0][0x16c] ; /* 0x00005b0000037a02 */ /* 0x000fca0000000f00 */ /*0130*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */ /* 0x000fe2000c101904 */ /*0140*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0150*/ BRA 0x150; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z7init_csPiS_ii .globl _Z7init_csPiS_ii .p2align 8 .type _Z7init_csPiS_ii,@function _Z7init_csPiS_ii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b32 s3, s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_5 s_load_b64 s[2:3], s[0:1], 0x8 s_mov_b32 s4, exec_lo v_cmpx_ne_u32_e32 0, v1 s_xor_b32 s4, exec_lo, s4 s_cbranch_execz .LBB0_3 s_load_b64 s[6:7], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_load_b32 s0, s[0:1], 0x14 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s6, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo global_load_b32 v2, v[2:3], off offset:-4 s_waitcnt vmcnt(0) v_mul_lo_u32 v2, v2, s0 global_store_b32 v[0:1], v2, off .LBB0_3: s_and_not1_saveexec_b32 s0, s4 s_cbranch_execz .LBB0_5 v_mov_b32_e32 v0, 0 s_waitcnt lgkmcnt(0) global_store_b32 v0, v0, s[2:3] .LBB0_5: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z7init_csPiS_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z7init_csPiS_ii, .Lfunc_end0-_Z7init_csPiS_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z7init_csPiS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z7init_csPiS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata