system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | __global__ void getSortedDegree(int numNodes, int *offset, int *workspace1, int *workspace2, int *workspace3)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<numNodes; i++)
{
// initiate all workspace to 0
workspace1[i] = 0;
workspace2[i] = 0;
workspace3[i] = 0;
// compute each neighlist's length
int neighlistLen=offset[i+1]-offset[i];
// group the nodes by their degree
if(neighlistLen >= 512) workspace1[i] = 1;
else if(neighlistLen > 32) workspace2[i] = 1;
else if(neighlistLen >0 && neighlistLen <= 32) workspace3[i] = 1;
}
}
__global__ void filter(int *predicateArray, int* scanArray, int *newPlace, int sizeScan)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<sizeScan; i++)
{
if(predicateArray[i] ==1)
newPlace[scanArray[i]-1] = i;
}
} | .file "tmpxft_00132615_00000000-6_getSortedDegree.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z15getSortedDegreeiPiS_S_S_iPiS_S_S_
.type _Z42__device_stub__Z15getSortedDegreeiPiS_S_S_iPiS_S_S_, @function
_Z42__device_stub__Z15getSortedDegreeiPiS_S_S_iPiS_S_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 44(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15getSortedDegreeiPiS_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z42__device_stub__Z15getSortedDegreeiPiS_S_S_iPiS_S_S_, .-_Z42__device_stub__Z15getSortedDegreeiPiS_S_S_iPiS_S_S_
.globl _Z15getSortedDegreeiPiS_S_S_
.type _Z15getSortedDegreeiPiS_S_S_, @function
_Z15getSortedDegreeiPiS_S_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z15getSortedDegreeiPiS_S_S_iPiS_S_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15getSortedDegreeiPiS_S_S_, .-_Z15getSortedDegreeiPiS_S_S_
.globl _Z30__device_stub__Z6filterPiS_S_iPiS_S_i
.type _Z30__device_stub__Z6filterPiS_S_iPiS_S_i, @function
_Z30__device_stub__Z6filterPiS_S_iPiS_S_i:
.LFB2053:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6filterPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z30__device_stub__Z6filterPiS_S_iPiS_S_i, .-_Z30__device_stub__Z6filterPiS_S_iPiS_S_i
.globl _Z6filterPiS_S_i
.type _Z6filterPiS_S_i, @function
_Z6filterPiS_S_i:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6filterPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z6filterPiS_S_i, .-_Z6filterPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z6filterPiS_S_i"
.LC1:
.string "_Z15getSortedDegreeiPiS_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z6filterPiS_S_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z15getSortedDegreeiPiS_S_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __global__ void getSortedDegree(int numNodes, int *offset, int *workspace1, int *workspace2, int *workspace3)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<numNodes; i++)
{
// initiate all workspace to 0
workspace1[i] = 0;
workspace2[i] = 0;
workspace3[i] = 0;
// compute each neighlist's length
int neighlistLen=offset[i+1]-offset[i];
// group the nodes by their degree
if(neighlistLen >= 512) workspace1[i] = 1;
else if(neighlistLen > 32) workspace2[i] = 1;
else if(neighlistLen >0 && neighlistLen <= 32) workspace3[i] = 1;
}
}
__global__ void filter(int *predicateArray, int* scanArray, int *newPlace, int sizeScan)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<sizeScan; i++)
{
if(predicateArray[i] ==1)
newPlace[scanArray[i]-1] = i;
}
} | #include <hip/hip_runtime.h>
__global__ void getSortedDegree(int numNodes, int *offset, int *workspace1, int *workspace2, int *workspace3)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<numNodes; i++)
{
// initiate all workspace to 0
workspace1[i] = 0;
workspace2[i] = 0;
workspace3[i] = 0;
// compute each neighlist's length
int neighlistLen=offset[i+1]-offset[i];
// group the nodes by their degree
if(neighlistLen >= 512) workspace1[i] = 1;
else if(neighlistLen > 32) workspace2[i] = 1;
else if(neighlistLen >0 && neighlistLen <= 32) workspace3[i] = 1;
}
}
__global__ void filter(int *predicateArray, int* scanArray, int *newPlace, int sizeScan)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<sizeScan; i++)
{
if(predicateArray[i] ==1)
newPlace[scanArray[i]-1] = i;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__ void getSortedDegree(int numNodes, int *offset, int *workspace1, int *workspace2, int *workspace3)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<numNodes; i++)
{
// initiate all workspace to 0
workspace1[i] = 0;
workspace2[i] = 0;
workspace3[i] = 0;
// compute each neighlist's length
int neighlistLen=offset[i+1]-offset[i];
// group the nodes by their degree
if(neighlistLen >= 512) workspace1[i] = 1;
else if(neighlistLen > 32) workspace2[i] = 1;
else if(neighlistLen >0 && neighlistLen <= 32) workspace3[i] = 1;
}
}
__global__ void filter(int *predicateArray, int* scanArray, int *newPlace, int sizeScan)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<sizeScan; i++)
{
if(predicateArray[i] ==1)
newPlace[scanArray[i]-1] = i;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15getSortedDegreeiPiS_S_S_
.globl _Z15getSortedDegreeiPiS_S_S_
.p2align 8
.type _Z15getSortedDegreeiPiS_S_S_,@function
_Z15getSortedDegreeiPiS_S_S_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s8, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_9
s_load_b256 s[0:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v15, 1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[7:8], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, v7, s0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v10, vcc_lo, s1, v8, vcc_lo
v_add_co_u32 v3, vcc_lo, s2, v7
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v8, vcc_lo
v_add_co_u32 v5, vcc_lo, s4, v7
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v8, vcc_lo
v_add_co_u32 v7, vcc_lo, s6, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v8, vcc_lo
v_add_co_u32 v9, vcc_lo, v2, 4
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v10, vcc_lo
s_mov_b32 s1, 0
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s2
v_add_co_u32 v9, vcc_lo, v9, 4
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v10, vcc_lo
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_nc_u32_e32 v1, 1, v1
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
v_add_co_u32 v5, vcc_lo, v5, 4
v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_le_i32_e32 vcc_lo, s8, v1
v_add_co_u32 v7, s0, v7, 4
v_add_co_ci_u32_e64 v8, s0, 0, v8, s0
s_or_b32 s1, vcc_lo, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_9
.LBB0_3:
global_store_b32 v[3:4], v0, off
global_store_b32 v[5:6], v0, off
global_store_b32 v[7:8], v0, off
s_clause 0x1
global_load_b32 v2, v[9:10], off
global_load_b32 v11, v[9:10], off offset:-4
s_mov_b32 s0, -1
s_mov_b32 s2, exec_lo
s_waitcnt vmcnt(0)
v_sub_nc_u32_e32 v16, v2, v11
v_dual_mov_b32 v12, v4 :: v_dual_mov_b32 v11, v3
s_delay_alu instid0(VALU_DEP_2)
v_cmpx_gt_i32_e32 0x200, v16
s_cbranch_execz .LBB0_7
v_ashrrev_i32_e32 v2, 31, v1
v_cmp_lt_i32_e64 s3, 32, v16
s_mov_b32 s9, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[13:14], 2, v[1:2]
v_add_co_u32 v11, vcc_lo, s4, v13
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v12, vcc_lo, s5, v14, vcc_lo
v_cmpx_gt_i32_e32 33, v16
v_cmp_lt_i32_e32 vcc_lo, 0, v16
v_add_co_u32 v11, s0, s6, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_add_co_ci_u32_e64 v12, s0, s7, v14, s0
s_and_not1_b32 s0, s3, exec_lo
s_and_b32 s3, vcc_lo, exec_lo
s_or_b32 s3, s0, s3
s_or_b32 exec_lo, exec_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s0, s3, exec_lo
.LBB0_7:
s_or_b32 exec_lo, exec_lo, s2
s_and_saveexec_b32 s2, s0
s_cbranch_execz .LBB0_2
global_store_b32 v[11:12], v15, off
s_branch .LBB0_2
.LBB0_9:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15getSortedDegreeiPiS_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 17
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15getSortedDegreeiPiS_S_S_, .Lfunc_end0-_Z15getSortedDegreeiPiS_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z6filterPiS_S_i
.globl _Z6filterPiS_S_i
.p2align 8
.type _Z6filterPiS_S_i,@function
_Z6filterPiS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
s_mov_b32 s3, exec_lo
v_cmpx_gt_i32_e64 s2, v1
s_cbranch_execz .LBB1_5
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[8:9], s[0:1], 0x10
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v5, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
s_add_u32 s1, s8, -4
s_addc_u32 s3, s9, -1
s_mov_b32 s4, 0
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_3
.p2align 6
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s0
v_add_nc_u32_e32 v1, 1, v1
v_add_co_u32 v2, vcc_lo, v2, 4
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_le_i32_e32 vcc_lo, s2, v1
v_add_co_u32 v4, s0, v4, 4
v_add_co_ci_u32_e64 v5, s0, 0, v5, s0
s_or_b32 s4, vcc_lo, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s4
s_cbranch_execz .LBB1_5
.LBB1_3:
global_load_b32 v0, v[2:3], off
s_mov_b32 s0, exec_lo
s_waitcnt vmcnt(0)
v_cmpx_eq_u32_e32 1, v0
s_cbranch_execz .LBB1_2
global_load_b32 v6, v[4:5], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v7, 31, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[6:7]
v_add_co_u32 v6, vcc_lo, s1, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
global_store_b32 v[6:7], v1, off
s_branch .LBB1_2
.LBB1_5:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6filterPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z6filterPiS_S_i, .Lfunc_end1-_Z6filterPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15getSortedDegreeiPiS_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15getSortedDegreeiPiS_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 17
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6filterPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6filterPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__ void getSortedDegree(int numNodes, int *offset, int *workspace1, int *workspace2, int *workspace3)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<numNodes; i++)
{
// initiate all workspace to 0
workspace1[i] = 0;
workspace2[i] = 0;
workspace3[i] = 0;
// compute each neighlist's length
int neighlistLen=offset[i+1]-offset[i];
// group the nodes by their degree
if(neighlistLen >= 512) workspace1[i] = 1;
else if(neighlistLen > 32) workspace2[i] = 1;
else if(neighlistLen >0 && neighlistLen <= 32) workspace3[i] = 1;
}
}
__global__ void filter(int *predicateArray, int* scanArray, int *newPlace, int sizeScan)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<sizeScan; i++)
{
if(predicateArray[i] ==1)
newPlace[scanArray[i]-1] = i;
}
} | .text
.file "getSortedDegree.hip"
.globl _Z30__device_stub__getSortedDegreeiPiS_S_S_ # -- Begin function _Z30__device_stub__getSortedDegreeiPiS_S_S_
.p2align 4, 0x90
.type _Z30__device_stub__getSortedDegreeiPiS_S_S_,@function
_Z30__device_stub__getSortedDegreeiPiS_S_S_: # @_Z30__device_stub__getSortedDegreeiPiS_S_S_
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 12(%rsp)
movq %rsi, 88(%rsp)
movq %rdx, 80(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 88(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15getSortedDegreeiPiS_S_S_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z30__device_stub__getSortedDegreeiPiS_S_S_, .Lfunc_end0-_Z30__device_stub__getSortedDegreeiPiS_S_S_
.cfi_endproc
# -- End function
.globl _Z21__device_stub__filterPiS_S_i # -- Begin function _Z21__device_stub__filterPiS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__filterPiS_S_i,@function
_Z21__device_stub__filterPiS_S_i: # @_Z21__device_stub__filterPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6filterPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z21__device_stub__filterPiS_S_i, .Lfunc_end1-_Z21__device_stub__filterPiS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15getSortedDegreeiPiS_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6filterPiS_S_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15getSortedDegreeiPiS_S_S_,@object # @_Z15getSortedDegreeiPiS_S_S_
.section .rodata,"a",@progbits
.globl _Z15getSortedDegreeiPiS_S_S_
.p2align 3, 0x0
_Z15getSortedDegreeiPiS_S_S_:
.quad _Z30__device_stub__getSortedDegreeiPiS_S_S_
.size _Z15getSortedDegreeiPiS_S_S_, 8
.type _Z6filterPiS_S_i,@object # @_Z6filterPiS_S_i
.globl _Z6filterPiS_S_i
.p2align 3, 0x0
_Z6filterPiS_S_i:
.quad _Z21__device_stub__filterPiS_S_i
.size _Z6filterPiS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15getSortedDegreeiPiS_S_S_"
.size .L__unnamed_1, 29
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z6filterPiS_S_i"
.size .L__unnamed_2, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__getSortedDegreeiPiS_S_S_
.addrsig_sym _Z21__device_stub__filterPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15getSortedDegreeiPiS_S_S_
.addrsig_sym _Z6filterPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00132615_00000000-6_getSortedDegree.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z15getSortedDegreeiPiS_S_S_iPiS_S_S_
.type _Z42__device_stub__Z15getSortedDegreeiPiS_S_S_iPiS_S_S_, @function
_Z42__device_stub__Z15getSortedDegreeiPiS_S_S_iPiS_S_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 44(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15getSortedDegreeiPiS_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z42__device_stub__Z15getSortedDegreeiPiS_S_S_iPiS_S_S_, .-_Z42__device_stub__Z15getSortedDegreeiPiS_S_S_iPiS_S_S_
.globl _Z15getSortedDegreeiPiS_S_S_
.type _Z15getSortedDegreeiPiS_S_S_, @function
_Z15getSortedDegreeiPiS_S_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z15getSortedDegreeiPiS_S_S_iPiS_S_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15getSortedDegreeiPiS_S_S_, .-_Z15getSortedDegreeiPiS_S_S_
.globl _Z30__device_stub__Z6filterPiS_S_iPiS_S_i
.type _Z30__device_stub__Z6filterPiS_S_iPiS_S_i, @function
_Z30__device_stub__Z6filterPiS_S_iPiS_S_i:
.LFB2053:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6filterPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z30__device_stub__Z6filterPiS_S_iPiS_S_i, .-_Z30__device_stub__Z6filterPiS_S_iPiS_S_i
.globl _Z6filterPiS_S_i
.type _Z6filterPiS_S_i, @function
_Z6filterPiS_S_i:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6filterPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z6filterPiS_S_i, .-_Z6filterPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z6filterPiS_S_i"
.LC1:
.string "_Z15getSortedDegreeiPiS_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z6filterPiS_S_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z15getSortedDegreeiPiS_S_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "getSortedDegree.hip"
.globl _Z30__device_stub__getSortedDegreeiPiS_S_S_ # -- Begin function _Z30__device_stub__getSortedDegreeiPiS_S_S_
.p2align 4, 0x90
.type _Z30__device_stub__getSortedDegreeiPiS_S_S_,@function
_Z30__device_stub__getSortedDegreeiPiS_S_S_: # @_Z30__device_stub__getSortedDegreeiPiS_S_S_
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 12(%rsp)
movq %rsi, 88(%rsp)
movq %rdx, 80(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 88(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15getSortedDegreeiPiS_S_S_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z30__device_stub__getSortedDegreeiPiS_S_S_, .Lfunc_end0-_Z30__device_stub__getSortedDegreeiPiS_S_S_
.cfi_endproc
# -- End function
.globl _Z21__device_stub__filterPiS_S_i # -- Begin function _Z21__device_stub__filterPiS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__filterPiS_S_i,@function
_Z21__device_stub__filterPiS_S_i: # @_Z21__device_stub__filterPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6filterPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z21__device_stub__filterPiS_S_i, .Lfunc_end1-_Z21__device_stub__filterPiS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15getSortedDegreeiPiS_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6filterPiS_S_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15getSortedDegreeiPiS_S_S_,@object # @_Z15getSortedDegreeiPiS_S_S_
.section .rodata,"a",@progbits
.globl _Z15getSortedDegreeiPiS_S_S_
.p2align 3, 0x0
_Z15getSortedDegreeiPiS_S_S_:
.quad _Z30__device_stub__getSortedDegreeiPiS_S_S_
.size _Z15getSortedDegreeiPiS_S_S_, 8
.type _Z6filterPiS_S_i,@object # @_Z6filterPiS_S_i
.globl _Z6filterPiS_S_i
.p2align 3, 0x0
_Z6filterPiS_S_i:
.quad _Z21__device_stub__filterPiS_S_i
.size _Z6filterPiS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15getSortedDegreeiPiS_S_S_"
.size .L__unnamed_1, 29
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z6filterPiS_S_i"
.size .L__unnamed_2, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__getSortedDegreeiPiS_S_S_
.addrsig_sym _Z21__device_stub__filterPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15getSortedDegreeiPiS_S_S_
.addrsig_sym _Z6filterPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //Based on the work of Andrew Krepps
#include <iostream>
#include <random>
#include <chrono>
#include <stdio.h>
__constant__ static const int VAL_A = 1;
__constant__ static const int VAL_B = 3;
// Device GPU add c[i] = a[i] + b[i]
__device__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Device GPU subtract c[i] = a[i] - b[i]
__device__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Device GPU multiply c[i] = a[i] * b[i]
__device__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Device GPU div c[i] = a[i] / b[i]
__device__ void div(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Device GPU mod c[i] = a[i] % b[i]
__device__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Executes all 5 shared math operations
__global__ void executeSharedMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
extern __shared__ int sharedMem[];
// Use offsets in the shared mem to create arrays.
int * sharedA = &sharedMem[0];
int * sharedB = &sharedMem[size];
int * sharedRet = &sharedMem[2*size];
sharedA[tid] = a[tid];
sharedB[tid] = b[tid];
// Add sharedA to sharedB and store in addDest
add(sharedA, sharedB, sharedRet);
addDest[tid] = sharedRet[tid];
// Subtract sharedB from sharedA and store in subDest
subtract(sharedA, sharedB, sharedRet);
subDest[tid] = sharedRet[tid];
// Multiply sharedA to sharedB and store in mutlDest
mult(sharedA, sharedB, sharedRet);
multDest[tid] = sharedRet[tid];
// Divide sharedA by sharedB and store in divDest
div(sharedA, sharedB, sharedRet);
divDest[tid] = sharedRet[tid];
// Mod sharedA by sharedB and store in modDest
mod(sharedA, sharedB, sharedRet);
modDest[tid] = sharedRet[tid];
}
// Executes all 5 global math operations
__global__ void executeGlobalMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
add(a, b, addDest);
// Subtract a from b and store in subDest
subtract(a, b, subDest);
// Multiply a to b and store in mutlDest
mult(a, b, multDest);
// Divide a by b and store in divDest
div(a, b, divDest);
// Mod a by b and store in modDest
mod(a, b, modDest);
}
// Executes all 5 constant math operations
__global__ void executeConstantMathOperations(int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Add VAL_A to VAL_B and store in addDest
addDest[tid] = VAL_A + VAL_B;
// Subtract a from b and store in subDest
subDest[tid] = VAL_A - VAL_B;
// Multiply a to b and store in mutlDest
multDest[tid] = VAL_A * VAL_B;
// Divide a by b and store in divDest
divDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
// Mod a by b and store in modDest
modDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
}
// Host (Cpu) add c[i] = a[i] + b[i]
void hostAdd(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] + b[i];
}
}
// Host (Cpu) sub c[i] = a[i] - b[i]
void hostSub(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] - b[i];
}
}
// Host (Cpu) multiply c[i] = a[i] * b[i]
void hostMult(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] * b[i];
}
}
// Host (Cpu) divide c[i] = a[i] / b[i]
void hostDiv(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
if (b[i] != 0)
{
c[i] = a[i] / b[i];
}
else
{
c[i] = 0;
}
}
}
// Host (Cpu) mod c[i] = a[i] % b[i]
void hostMod(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
// Protect against divide by 0.
// cuda code catches this error and sets result to 0 by default.
if (b[i] == 0)
{
c[i] = 0;
}
else
{
c[i] = a[i] % b[i];
}
}
}
// Executes each of the host (cpu) tests by creating local memory and executing all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the GPU tests.
void executeHostTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Add all of the numbers c[i] = a[i] + b[i];
hostAdd(a,b,c, totalThreads);
// Subtract all of the numbers c[i] = a[i] - b[i];
hostSub(a,b,c, totalThreads);
// Multiply all of the numbers c[i] = a[i] * b[i];
hostMult(a,b,c, totalThreads);
// Divides all of the numbers c[i] = a[i] / b[i]; if b[i] == 0, c[i] = 0
hostDiv(a,b,c, totalThreads);
// Mod all of the numbers c[i] = a[i] % b[i];
hostMod(a,b,c, totalThreads);
}
// Executes each of the global memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeGlobalTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
cudaMemcpy(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
executeGlobalMathOperations<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, numBlocks * blockSize);
cudaMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
// Executes each of the shared memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data after creating shared memory. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeSharedTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
cudaMemcpy(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
executeSharedMathOperations<<<numBlocks, blockSize, 3 * totalThreads * sizeof(int)>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
cudaMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
// Executes each of the consnt memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data using constant values. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeConstantTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
executeConstantMathOperations<<<numBlocks, blockSize>>>(gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
cudaMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
auto startTime = std::chrono::system_clock::now();
executeHostTest(totalThreads, blockSize, numBlocks);
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> totalTime = endTime-startTime;
std::cout << "Host execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeGlobalTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Global Memory execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeSharedTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Shared Memory execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeConstantTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Constant Memory execution took: " << totalTime.count() << " seconds." << std::endl;
return 0;
} | code for sm_80
Function : _Z29executeConstantMathOperationsPiS_S_S_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R11, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0b7435 */
/* 0x000fe200000001ff */
/*0030*/ MOV R13, 0xfffffffe ; /* 0xfffffffe000d7802 */
/* 0x000fe20000000f00 */
/*0040*/ HFMA2.MMA R15, -RZ, RZ, 0, 1.78813934326171875e-07 ; /* 0x00000003ff0f7435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0070*/ IMAD R8, R8, c[0x0][0x0], R3 ; /* 0x0000000008087a24 */
/* 0x001fca00078e0203 */
/*0080*/ IMAD.WIDE R2, R8, R11, c[0x0][0x160] ; /* 0x0000580008027625 */
/* 0x000fc800078e020b */
/*0090*/ IMAD.WIDE R4, R8.reuse, R11.reuse, c[0x0][0x168] ; /* 0x00005a0008047625 */
/* 0x0c0fe200078e020b */
/*00a0*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x000fe6000c101904 */
/*00b0*/ IMAD.WIDE R6, R8.reuse, R11.reuse, c[0x0][0x170] ; /* 0x00005c0008067625 */
/* 0x0c0fe200078e020b */
/*00c0*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */
/* 0x000fe6000c101904 */
/*00d0*/ IMAD.WIDE R16, R8.reuse, R11.reuse, c[0x0][0x178] ; /* 0x00005e0008107625 */
/* 0x0c0fe200078e020b */
/*00e0*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x000fe6000c101904 */
/*00f0*/ IMAD.WIDE R8, R8, R11, c[0x0][0x180] ; /* 0x0000600008087625 */
/* 0x000fe200078e020b */
/*0100*/ STG.E [R16.64], RZ ; /* 0x000000ff10007986 */
/* 0x000fe8000c101904 */
/*0110*/ STG.E [R8.64], RZ ; /* 0x000000ff08007986 */
/* 0x000fe2000c101904 */
/*0120*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0130*/ BRA 0x130; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z27executeGlobalMathOperationsPiS_S_S_S_S_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R0, RZ, RZ, 0x4 ; /* 0x00000004ff007424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R7, R7, c[0x0][0x0], R2 ; /* 0x0000000007077a24 */
/* 0x001fc800078e0202 */
/*0060*/ IMAD.WIDE.U32 R2, R7, R0, c[0x0][0x160] ; /* 0x0000580007027625 */
/* 0x000fc800078e0000 */
/*0070*/ IMAD.WIDE.U32 R4, R7.reuse, R0.reuse, c[0x0][0x168] ; /* 0x00005a0007047625 */
/* 0x0c0fe200078e0000 */
/*0080*/ LDG.E R6, [R2.64] ; /* 0x0000000402067981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R11, [R4.64] ; /* 0x00000004040b7981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE.U32 R8, R7, R0, c[0x0][0x170] ; /* 0x00005c0007087625 */
/* 0x000fe200078e0000 */
/*00b0*/ IADD3 R15, R6, R11, RZ ; /* 0x0000000b060f7210 */
/* 0x004fca0007ffe0ff */
/*00c0*/ STG.E [R8.64], R15 ; /* 0x0000000f08007986 */
/* 0x0001e8000c101904 */
/*00d0*/ LDG.E R6, [R2.64] ; /* 0x0000000402067981 */
/* 0x000ea8000c1e1900 */
/*00e0*/ LDG.E R13, [R4.64] ; /* 0x00000004040d7981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ IMAD.WIDE.U32 R10, R7, R0, c[0x0][0x178] ; /* 0x00005e00070a7625 */
/* 0x000fc800078e0000 */
/*0100*/ IMAD.IADD R17, R6, 0x1, -R13 ; /* 0x0000000106117824 */
/* 0x004fca00078e0a0d */
/*0110*/ STG.E [R10.64], R17 ; /* 0x000000110a007986 */
/* 0x000fe8000c101904 */
/*0120*/ LDG.E R6, [R2.64] ; /* 0x0000000402067981 */
/* 0x000ea8000c1e1900 */
/*0130*/ LDG.E R19, [R4.64] ; /* 0x0000000404137981 */
/* 0x000ea2000c1e1900 */
/*0140*/ IMAD.WIDE.U32 R12, R7, R0, c[0x0][0x180] ; /* 0x00006000070c7625 */
/* 0x000fc800078e0000 */
/*0150*/ IMAD R19, R6, R19, RZ ; /* 0x0000001306137224 */
/* 0x004fca00078e02ff */
/*0160*/ STG.E [R12.64], R19 ; /* 0x000000130c007986 */
/* 0x0003e8000c101904 */
/*0170*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000ea8000c1e1900 */
/*0180*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */
/* 0x001ee2000c1e1900 */
/*0190*/ IABS R21, R6.reuse ; /* 0x0000000600157213 */
/* 0x084fe40000000000 */
/*01a0*/ IABS R12, R6 ; /* 0x00000006000c7213 */
/* 0x002fc40000000000 */
/*01b0*/ I2F.RP R14, R21 ; /* 0x00000015000e7306 */
/* 0x000e260000209400 */
/*01c0*/ IMAD.MOV R12, RZ, RZ, -R12 ; /* 0x000000ffff0c7224 */
/* 0x000fca00078e0a0c */
/*01d0*/ MUFU.RCP R14, R14 ; /* 0x0000000e000e7308 */
/* 0x001e240000001000 */
/*01e0*/ IADD3 R8, R14, 0xffffffe, RZ ; /* 0x0ffffffe0e087810 */
/* 0x001fcc0007ffe0ff */
/*01f0*/ F2I.FTZ.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */
/* 0x000064000021f000 */
/*0200*/ HFMA2.MMA R8, -RZ, RZ, 0, 0 ; /* 0x00000000ff087435 */
/* 0x001fe200000001ff */
/*0210*/ IADD3 R10, RZ, -R9, RZ ; /* 0x80000009ff0a7210 */
/* 0x002fca0007ffe0ff */
/*0220*/ IMAD R11, R10, R21, RZ ; /* 0x000000150a0b7224 */
/* 0x000fe200078e02ff */
/*0230*/ IABS R10, R15 ; /* 0x0000000f000a7213 */
/* 0x008fe40000000000 */
/*0240*/ LOP3.LUT R15, R15, R6, RZ, 0x3c, !PT ; /* 0x000000060f0f7212 */
/* 0x000fe200078e3cff */
/*0250*/ IMAD.HI.U32 R9, R9, R11, R8 ; /* 0x0000000b09097227 */
/* 0x000fc600078e0008 */
/*0260*/ ISETP.GE.AND P1, PT, R15, RZ, PT ; /* 0x000000ff0f00720c */
/* 0x000fe20003f26270 */
/*0270*/ IMAD.MOV.U32 R11, RZ, RZ, R12 ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e000c */
/*0280*/ IMAD.HI.U32 R9, R9, R10, RZ ; /* 0x0000000a09097227 */
/* 0x000fc800078e00ff */
/*0290*/ IMAD R8, R9, R11, R10 ; /* 0x0000000b09087224 */
/* 0x000fca00078e020a */
/*02a0*/ ISETP.GT.U32.AND P2, PT, R21, R8, PT ; /* 0x000000081500720c */
/* 0x000fda0003f44070 */
/*02b0*/ @!P2 IADD3 R8, R8, -R21.reuse, RZ ; /* 0x800000150808a210 */
/* 0x080fe40007ffe0ff */
/*02c0*/ @!P2 IADD3 R9, R9, 0x1, RZ ; /* 0x000000010909a810 */
/* 0x000fe40007ffe0ff */
/*02d0*/ ISETP.GE.U32.AND P0, PT, R8, R21, PT ; /* 0x000000150800720c */
/* 0x000fe40003f06070 */
/*02e0*/ ISETP.NE.AND P2, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fd60003f45270 */
/*02f0*/ @P0 IADD3 R9, R9, 0x1, RZ ; /* 0x0000000109090810 */
/* 0x000fca0007ffe0ff */
/*0300*/ IMAD.MOV.U32 R13, RZ, RZ, R9 ; /* 0x000000ffff0d7224 */
/* 0x000fe400078e0009 */
/*0310*/ IMAD.WIDE.U32 R8, R7, R0, c[0x0][0x188] ; /* 0x0000620007087625 */
/* 0x000fc600078e0000 */
/*0320*/ @!P1 IADD3 R13, -R13, RZ, RZ ; /* 0x000000ff0d0d9210 */
/* 0x000fe40007ffe1ff */
/*0330*/ @!P2 LOP3.LUT R13, RZ, R6, RZ, 0x33, !PT ; /* 0x00000006ff0da212 */
/* 0x000fca00078e33ff */
/*0340*/ STG.E [R8.64], R13 ; /* 0x0000000d08007986 */
/* 0x0001e8000c101904 */
/*0350*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0360*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000222000c1e1900 */
/*0370*/ IABS R12, R4.reuse ; /* 0x00000004000c7213 */
/* 0x084fe40000000000 */
/*0380*/ IABS R3, R4 ; /* 0x0000000400037213 */
/* 0x002fc40000000000 */
/*0390*/ I2F.RP R6, R12 ; /* 0x0000000c00067306 */
/* 0x000e620000209400 */
/*03a0*/ IABS R8, R2 ; /* 0x0000000200087213 */
/* 0x001fe40000000000 */
/*03b0*/ IMAD.MOV R3, RZ, RZ, -R3 ; /* 0x000000ffff037224 */
/* 0x000fe200078e0a03 */
/*03c0*/ ISETP.GE.AND P2, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fc80003f46270 */
/*03d0*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x002e240000001000 */
/*03e0*/ IADD3 R10, R6, 0xffffffe, RZ ; /* 0x0ffffffe060a7810 */
/* 0x001fcc0007ffe0ff */
/*03f0*/ F2I.FTZ.U32.TRUNC.NTZ R11, R10 ; /* 0x0000000a000b7305 */
/* 0x000064000021f000 */
/*0400*/ MOV R10, RZ ; /* 0x000000ff000a7202 */
/* 0x001fe20000000f00 */
/*0410*/ IMAD.MOV R15, RZ, RZ, -R11 ; /* 0x000000ffff0f7224 */
/* 0x002fc800078e0a0b */
/*0420*/ IMAD R5, R15, R12, RZ ; /* 0x0000000c0f057224 */
/* 0x000fc800078e02ff */
/*0430*/ IMAD.HI.U32 R11, R11, R5, R10 ; /* 0x000000050b0b7227 */
/* 0x000fcc00078e000a */
/*0440*/ IMAD.HI.U32 R11, R11, R8, RZ ; /* 0x000000080b0b7227 */
/* 0x000fc800078e00ff */
/*0450*/ IMAD R11, R11, R3, R8 ; /* 0x000000030b0b7224 */
/* 0x000fe400078e0208 */
/*0460*/ IMAD.WIDE.U32 R2, R7, R0, c[0x0][0x190] ; /* 0x0000640007027625 */
/* 0x000fc600078e0000 */
/*0470*/ ISETP.GT.U32.AND P0, PT, R12, R11, PT ; /* 0x0000000b0c00720c */
/* 0x000fda0003f04070 */
/*0480*/ @!P0 IADD3 R11, R11, -R12, RZ ; /* 0x8000000c0b0b8210 */
/* 0x000fe40007ffe0ff */
/*0490*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe40003f05270 */
/*04a0*/ ISETP.GT.U32.AND P1, PT, R12, R11, PT ; /* 0x0000000b0c00720c */
/* 0x000fda0003f24070 */
/*04b0*/ @!P1 IMAD.IADD R11, R11, 0x1, -R12 ; /* 0x000000010b0b9824 */
/* 0x000fca00078e0a0c */
/*04c0*/ @!P2 IADD3 R11, -R11, RZ, RZ ; /* 0x000000ff0b0ba210 */
/* 0x000fe40007ffe1ff */
/*04d0*/ @!P0 LOP3.LUT R11, RZ, R4, RZ, 0x33, !PT ; /* 0x00000004ff0b8212 */
/* 0x000fca00078e33ff */
/*04e0*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x000fe2000c101904 */
/*04f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0500*/ BRA 0x500; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z27executeSharedMathOperationsPiS_S_S_S_S_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0205 */
/*0060*/ IMAD.WIDE R6, R0, R3, c[0x0][0x160] ; /* 0x0000580000067625 */
/* 0x000fc800078e0203 */
/*0070*/ IMAD.WIDE R8, R0.reuse, R3.reuse, c[0x0][0x168] ; /* 0x00005a0000087625 */
/* 0x0c0fe200078e0203 */
/*0080*/ LDG.E R11, [R6.64] ; /* 0x00000004060b7981 */
/* 0x000eaa000c1e1900 */
/*0090*/ LDG.E R9, [R8.64] ; /* 0x0000000408097981 */
/* 0x000ee2000c1e1900 */
/*00a0*/ IMAD.SHL.U32 R4, R0.reuse, 0x4, RZ ; /* 0x0000000400047824 */
/* 0x040fe400078e00ff */
/*00b0*/ IMAD.WIDE R18, R0, R3, c[0x0][0x188] ; /* 0x0000620000127625 */
/* 0x000fc800078e0203 */
/*00c0*/ IMAD R4, R3, c[0x0][0x198], R4 ; /* 0x0000660003047a24 */
/* 0x000fe400078e0204 */
/*00d0*/ IMAD.WIDE R20, R0, R3, c[0x0][0x190] ; /* 0x0000640000147625 */
/* 0x000fc800078e0203 */
/*00e0*/ IMAD R2, R3, c[0x0][0x198], R4 ; /* 0x0000660003027a24 */
/* 0x000fe200078e0204 */
/*00f0*/ STS [R0.X4], R11 ; /* 0x0000000b00007388 */
/* 0x004fe80000004800 */
/*0100*/ STS [R4], R9 ; /* 0x0000000904007388 */
/* 0x008fe80000000800 */
/*0110*/ LDS R10, [R0.X4] ; /* 0x00000000000a7984 */
/* 0x000e240000004800 */
/*0120*/ IADD3 R5, R9, R10, RZ ; /* 0x0000000a09057210 */
/* 0x001fca0007ffe0ff */
/*0130*/ STS [R2], R5 ; /* 0x0000000502007388 */
/* 0x000fe80000000800 */
/*0140*/ LDS R10, [R0.X4] ; /* 0x00000000000a7984 */
/* 0x000fe80000004800 */
/*0150*/ LDS R7, [R4] ; /* 0x0000000004077984 */
/* 0x000e240000000800 */
/*0160*/ IMAD.IADD R7, R10, 0x1, -R7 ; /* 0x000000010a077824 */
/* 0x001fca00078e0a07 */
/*0170*/ STS [R2], R7 ; /* 0x0000000702007388 */
/* 0x000fe80000000800 */
/*0180*/ LDS R6, [R0.X4] ; /* 0x0000000000067984 */
/* 0x000fe80000004800 */
/*0190*/ LDS R11, [R4] ; /* 0x00000000040b7984 */
/* 0x000e240000000800 */
/*01a0*/ IMAD R9, R6, R11, RZ ; /* 0x0000000b06097224 */
/* 0x001fca00078e02ff */
/*01b0*/ STS [R2], R9 ; /* 0x0000000902007388 */
/* 0x000fe80000000800 */
/*01c0*/ LDS R13, [R4] ; /* 0x00000000040d7984 */
/* 0x000e280000000800 */
/*01d0*/ LDS R6, [R0.X4] ; /* 0x0000000000067984 */
/* 0x000e620000004800 */
/*01e0*/ IABS R15, R13.reuse ; /* 0x0000000d000f7213 */
/* 0x081fe40000000000 */
/*01f0*/ IABS R14, R13 ; /* 0x0000000d000e7213 */
/* 0x000fc40000000000 */
/*0200*/ I2F.RP R8, R15 ; /* 0x0000000f00087306 */
/* 0x000e300000209400 */
/*0210*/ MUFU.RCP R8, R8 ; /* 0x0000000800087308 */
/* 0x001e240000001000 */
/*0220*/ IADD3 R10, R8, 0xffffffe, RZ ; /* 0x0ffffffe080a7810 */
/* 0x001fc40007ffe0ff */
/*0230*/ IABS R8, R6 ; /* 0x0000000600087213 */
/* 0x002fc80000000000 */
/*0240*/ F2I.FTZ.U32.TRUNC.NTZ R11, R10 ; /* 0x0000000a000b7305 */
/* 0x000062000021f000 */
/*0250*/ LOP3.LUT R6, R6, R13, RZ, 0x3c, !PT ; /* 0x0000000d06067212 */
/* 0x000fc800078e3cff */
/*0260*/ ISETP.GE.AND P1, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f26270 */
/*0270*/ HFMA2.MMA R10, -RZ, RZ, 0, 0 ; /* 0x00000000ff0a7435 */
/* 0x001fe200000001ff */
/*0280*/ IADD3 R12, RZ, -R11, RZ ; /* 0x8000000bff0c7210 */
/* 0x002fca0007ffe0ff */
/*0290*/ IMAD R17, R12, R15, RZ ; /* 0x0000000f0c117224 */
/* 0x000fe400078e02ff */
/*02a0*/ IMAD.MOV R12, RZ, RZ, -R14 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0a0e */
/*02b0*/ IMAD.HI.U32 R11, R11, R17, R10 ; /* 0x000000110b0b7227 */
/* 0x000fc800078e000a */
/*02c0*/ IMAD.WIDE R16, R0, R3, c[0x0][0x180] ; /* 0x0000600000107625 */
/* 0x000fc800078e0203 */
/*02d0*/ IMAD.HI.U32 R11, R11, R8, RZ ; /* 0x000000080b0b7227 */
/* 0x000fc800078e00ff */
/*02e0*/ IMAD R8, R11, R12, R8 ; /* 0x0000000c0b087224 */
/* 0x000fca00078e0208 */
/*02f0*/ ISETP.GT.U32.AND P2, PT, R15, R8, PT ; /* 0x000000080f00720c */
/* 0x000fda0003f44070 */
/*0300*/ @!P2 IMAD.IADD R8, R8, 0x1, -R15 ; /* 0x000000010808a824 */
/* 0x000fe200078e0a0f */
/*0310*/ @!P2 IADD3 R11, R11, 0x1, RZ ; /* 0x000000010b0ba810 */
/* 0x000fe40007ffe0ff */
/*0320*/ ISETP.NE.AND P2, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x000fe40003f45270 */
/*0330*/ ISETP.GE.U32.AND P0, PT, R8, R15, PT ; /* 0x0000000f0800720c */
/* 0x000fda0003f06070 */
/*0340*/ @P0 IADD3 R11, R11, 0x1, RZ ; /* 0x000000010b0b0810 */
/* 0x000fc80007ffe0ff */
/*0350*/ @!P1 IADD3 R11, -R11, RZ, RZ ; /* 0x000000ff0b0b9210 */
/* 0x000fe40007ffe1ff */
/*0360*/ @!P2 LOP3.LUT R11, RZ, R13, RZ, 0x33, !PT ; /* 0x0000000dff0ba212 */
/* 0x000fca00078e33ff */
/*0370*/ STS [R2], R11 ; /* 0x0000000b02007388 */
/* 0x000fe80000000800 */
/*0380*/ LDS R4, [R4] ; /* 0x0000000004047984 */
/* 0x000e280000000800 */
/*0390*/ LDS R6, [R0.X4] ; /* 0x0000000000067984 */
/* 0x000e620000004800 */
/*03a0*/ IABS R10, R4.reuse ; /* 0x00000004000a7213 */
/* 0x081fe40000000000 */
/*03b0*/ IABS R14, R4 ; /* 0x00000004000e7213 */
/* 0x000fc40000000000 */
/*03c0*/ I2F.RP R8, R10 ; /* 0x0000000a00087306 */
/* 0x000e220000209400 */
/*03d0*/ ISETP.GE.AND P2, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x002fe40003f46270 */
/*03e0*/ IADD3 R14, RZ, -R14, RZ ; /* 0x8000000eff0e7210 */
/* 0x000fca0007ffe0ff */
/*03f0*/ MUFU.RCP R8, R8 ; /* 0x0000000800087308 */
/* 0x001e240000001000 */
/*0400*/ IADD3 R12, R8, 0xffffffe, RZ ; /* 0x0ffffffe080c7810 */
/* 0x001fe40007ffe0ff */
/*0410*/ IABS R8, R6 ; /* 0x0000000600087213 */
/* 0x000fc80000000000 */
/*0420*/ F2I.FTZ.U32.TRUNC.NTZ R13, R12 ; /* 0x0000000c000d7305 */
/* 0x000064000021f000 */
/*0430*/ IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0c7224 */
/* 0x001fe400078e00ff */
/*0440*/ IMAD.MOV R15, RZ, RZ, -R13 ; /* 0x000000ffff0f7224 */
/* 0x002fc800078e0a0d */
/*0450*/ IMAD R15, R15, R10, RZ ; /* 0x0000000a0f0f7224 */
/* 0x000fc800078e02ff */
/*0460*/ IMAD.HI.U32 R13, R13, R15, R12 ; /* 0x0000000f0d0d7227 */
/* 0x000fcc00078e000c */
/*0470*/ IMAD.HI.U32 R13, R13, R8, RZ ; /* 0x000000080d0d7227 */
/* 0x000fc800078e00ff */
/*0480*/ IMAD R13, R13, R14, R8 ; /* 0x0000000e0d0d7224 */
/* 0x000fe400078e0208 */
/*0490*/ IMAD.WIDE R14, R0, R3, c[0x0][0x178] ; /* 0x00005e00000e7625 */
/* 0x000fc600078e0203 */
/*04a0*/ ISETP.GT.U32.AND P0, PT, R10, R13, PT ; /* 0x0000000d0a00720c */
/* 0x000fda0003f04070 */
/*04b0*/ @!P0 IADD3 R13, R13, -R10, RZ ; /* 0x8000000a0d0d8210 */
/* 0x000fe40007ffe0ff */
/*04c0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe40003f05270 */
/*04d0*/ ISETP.GT.U32.AND P1, PT, R10, R13, PT ; /* 0x0000000d0a00720c */
/* 0x000fda0003f24070 */
/*04e0*/ @!P1 IMAD.IADD R13, R13, 0x1, -R10 ; /* 0x000000010d0d9824 */
/* 0x000fca00078e0a0a */
/*04f0*/ MOV R23, R13 ; /* 0x0000000d00177202 */
/* 0x000fe20000000f00 */
/*0500*/ IMAD.WIDE R12, R0, R3, c[0x0][0x170] ; /* 0x00005c00000c7625 */
/* 0x000fc800078e0203 */
/*0510*/ @!P2 IMAD.MOV R23, RZ, RZ, -R23 ; /* 0x000000ffff17a224 */
/* 0x000fe200078e0a17 */
/*0520*/ @!P0 LOP3.LUT R23, RZ, R4, RZ, 0x33, !PT ; /* 0x00000004ff178212 */
/* 0x000fe200078e33ff */
/*0530*/ STG.E [R12.64], R5 ; /* 0x000000050c007986 */
/* 0x000fe8000c101904 */
/*0540*/ STG.E [R14.64], R7 ; /* 0x000000070e007986 */
/* 0x000fe8000c101904 */
/*0550*/ STG.E [R16.64], R9 ; /* 0x0000000910007986 */
/* 0x000fe8000c101904 */
/*0560*/ STG.E [R18.64], R11 ; /* 0x0000000b12007986 */
/* 0x000fe8000c101904 */
/*0570*/ STG.E [R20.64], R23 ; /* 0x0000001714007986 */
/* 0x000fe8000c101904 */
/*0580*/ STS [R2], R23 ; /* 0x0000001702007388 */
/* 0x000fe20000000800 */
/*0590*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*05a0*/ BRA 0x5a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0600*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0610*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0620*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0630*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0640*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0650*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0660*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //Based on the work of Andrew Krepps
#include <iostream>
#include <random>
#include <chrono>
#include <stdio.h>
__constant__ static const int VAL_A = 1;
__constant__ static const int VAL_B = 3;
// Device GPU add c[i] = a[i] + b[i]
__device__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Device GPU subtract c[i] = a[i] - b[i]
__device__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Device GPU multiply c[i] = a[i] * b[i]
__device__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Device GPU div c[i] = a[i] / b[i]
__device__ void div(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Device GPU mod c[i] = a[i] % b[i]
__device__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Executes all 5 shared math operations
__global__ void executeSharedMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
extern __shared__ int sharedMem[];
// Use offsets in the shared mem to create arrays.
int * sharedA = &sharedMem[0];
int * sharedB = &sharedMem[size];
int * sharedRet = &sharedMem[2*size];
sharedA[tid] = a[tid];
sharedB[tid] = b[tid];
// Add sharedA to sharedB and store in addDest
add(sharedA, sharedB, sharedRet);
addDest[tid] = sharedRet[tid];
// Subtract sharedB from sharedA and store in subDest
subtract(sharedA, sharedB, sharedRet);
subDest[tid] = sharedRet[tid];
// Multiply sharedA to sharedB and store in mutlDest
mult(sharedA, sharedB, sharedRet);
multDest[tid] = sharedRet[tid];
// Divide sharedA by sharedB and store in divDest
div(sharedA, sharedB, sharedRet);
divDest[tid] = sharedRet[tid];
// Mod sharedA by sharedB and store in modDest
mod(sharedA, sharedB, sharedRet);
modDest[tid] = sharedRet[tid];
}
// Executes all 5 global math operations
__global__ void executeGlobalMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
add(a, b, addDest);
// Subtract a from b and store in subDest
subtract(a, b, subDest);
// Multiply a to b and store in mutlDest
mult(a, b, multDest);
// Divide a by b and store in divDest
div(a, b, divDest);
// Mod a by b and store in modDest
mod(a, b, modDest);
}
// Executes all 5 constant math operations
__global__ void executeConstantMathOperations(int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Add VAL_A to VAL_B and store in addDest
addDest[tid] = VAL_A + VAL_B;
// Subtract a from b and store in subDest
subDest[tid] = VAL_A - VAL_B;
// Multiply a to b and store in mutlDest
multDest[tid] = VAL_A * VAL_B;
// Divide a by b and store in divDest
divDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
// Mod a by b and store in modDest
modDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
}
// Host (Cpu) add c[i] = a[i] + b[i]
void hostAdd(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] + b[i];
}
}
// Host (Cpu) sub c[i] = a[i] - b[i]
void hostSub(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] - b[i];
}
}
// Host (Cpu) multiply c[i] = a[i] * b[i]
void hostMult(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] * b[i];
}
}
// Host (Cpu) divide c[i] = a[i] / b[i]
void hostDiv(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
if (b[i] != 0)
{
c[i] = a[i] / b[i];
}
else
{
c[i] = 0;
}
}
}
// Host (Cpu) mod c[i] = a[i] % b[i]
void hostMod(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
// Protect against divide by 0.
// cuda code catches this error and sets result to 0 by default.
if (b[i] == 0)
{
c[i] = 0;
}
else
{
c[i] = a[i] % b[i];
}
}
}
// Executes each of the host (cpu) tests by creating local memory and executing all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the GPU tests.
void executeHostTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Add all of the numbers c[i] = a[i] + b[i];
hostAdd(a,b,c, totalThreads);
// Subtract all of the numbers c[i] = a[i] - b[i];
hostSub(a,b,c, totalThreads);
// Multiply all of the numbers c[i] = a[i] * b[i];
hostMult(a,b,c, totalThreads);
// Divides all of the numbers c[i] = a[i] / b[i]; if b[i] == 0, c[i] = 0
hostDiv(a,b,c, totalThreads);
// Mod all of the numbers c[i] = a[i] % b[i];
hostMod(a,b,c, totalThreads);
}
// Executes each of the global memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeGlobalTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
cudaMemcpy(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
executeGlobalMathOperations<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, numBlocks * blockSize);
cudaMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
// Executes each of the shared memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data after creating shared memory. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeSharedTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
cudaMemcpy(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
executeSharedMathOperations<<<numBlocks, blockSize, 3 * totalThreads * sizeof(int)>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
cudaMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
// Executes each of the consnt memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data using constant values. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeConstantTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
executeConstantMathOperations<<<numBlocks, blockSize>>>(gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
cudaMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
auto startTime = std::chrono::system_clock::now();
executeHostTest(totalThreads, blockSize, numBlocks);
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> totalTime = endTime-startTime;
std::cout << "Host execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeGlobalTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Global Memory execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeSharedTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Shared Memory execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeConstantTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Constant Memory execution took: " << totalTime.count() << " seconds." << std::endl;
return 0;
} | //Based on the work of Andrew Krepps
#include <hip/hip_runtime.h>
#include <iostream>
#include <random>
#include <chrono>
#include <stdio.h>
__constant__ static const int VAL_A = 1;
__constant__ static const int VAL_B = 3;
// Device GPU add c[i] = a[i] + b[i]
__device__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Device GPU subtract c[i] = a[i] - b[i]
__device__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Device GPU multiply c[i] = a[i] * b[i]
__device__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Device GPU div c[i] = a[i] / b[i]
__device__ void div(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Device GPU mod c[i] = a[i] % b[i]
__device__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Executes all 5 shared math operations
__global__ void executeSharedMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
extern __shared__ int sharedMem[];
// Use offsets in the shared mem to create arrays.
int * sharedA = &sharedMem[0];
int * sharedB = &sharedMem[size];
int * sharedRet = &sharedMem[2*size];
sharedA[tid] = a[tid];
sharedB[tid] = b[tid];
// Add sharedA to sharedB and store in addDest
add(sharedA, sharedB, sharedRet);
addDest[tid] = sharedRet[tid];
// Subtract sharedB from sharedA and store in subDest
subtract(sharedA, sharedB, sharedRet);
subDest[tid] = sharedRet[tid];
// Multiply sharedA to sharedB and store in mutlDest
mult(sharedA, sharedB, sharedRet);
multDest[tid] = sharedRet[tid];
// Divide sharedA by sharedB and store in divDest
div(sharedA, sharedB, sharedRet);
divDest[tid] = sharedRet[tid];
// Mod sharedA by sharedB and store in modDest
mod(sharedA, sharedB, sharedRet);
modDest[tid] = sharedRet[tid];
}
// Executes all 5 global math operations
__global__ void executeGlobalMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
add(a, b, addDest);
// Subtract a from b and store in subDest
subtract(a, b, subDest);
// Multiply a to b and store in mutlDest
mult(a, b, multDest);
// Divide a by b and store in divDest
div(a, b, divDest);
// Mod a by b and store in modDest
mod(a, b, modDest);
}
// Executes all 5 constant math operations
__global__ void executeConstantMathOperations(int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Add VAL_A to VAL_B and store in addDest
addDest[tid] = VAL_A + VAL_B;
// Subtract a from b and store in subDest
subDest[tid] = VAL_A - VAL_B;
// Multiply a to b and store in mutlDest
multDest[tid] = VAL_A * VAL_B;
// Divide a by b and store in divDest
divDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
// Mod a by b and store in modDest
modDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
}
// Host (Cpu) add c[i] = a[i] + b[i]
void hostAdd(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] + b[i];
}
}
// Host (Cpu) sub c[i] = a[i] - b[i]
void hostSub(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] - b[i];
}
}
// Host (Cpu) multiply c[i] = a[i] * b[i]
void hostMult(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] * b[i];
}
}
// Host (Cpu) divide c[i] = a[i] / b[i]
void hostDiv(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
if (b[i] != 0)
{
c[i] = a[i] / b[i];
}
else
{
c[i] = 0;
}
}
}
// Host (Cpu) mod c[i] = a[i] % b[i]
void hostMod(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
// Protect against divide by 0.
// cuda code catches this error and sets result to 0 by default.
if (b[i] == 0)
{
c[i] = 0;
}
else
{
c[i] = a[i] % b[i];
}
}
}
// Executes each of the host (cpu) tests by creating local memory and executing all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the GPU tests.
void executeHostTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Add all of the numbers c[i] = a[i] + b[i];
hostAdd(a,b,c, totalThreads);
// Subtract all of the numbers c[i] = a[i] - b[i];
hostSub(a,b,c, totalThreads);
// Multiply all of the numbers c[i] = a[i] * b[i];
hostMult(a,b,c, totalThreads);
// Divides all of the numbers c[i] = a[i] / b[i]; if b[i] == 0, c[i] = 0
hostDiv(a,b,c, totalThreads);
// Mod all of the numbers c[i] = a[i] % b[i];
hostMod(a,b,c, totalThreads);
}
// Executes each of the global memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeGlobalTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
hipMalloc((void**)&gpu_a, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_b, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
hipMemcpy(gpu_a, a, totalThreads * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(gpu_b, b, totalThreads * sizeof(int), hipMemcpyHostToDevice);
executeGlobalMathOperations<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, numBlocks * blockSize);
hipMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_add_dest);
hipFree(gpu_sub_dest);
hipFree(gpu_mult_dest);
hipFree(gpu_div_dest);
hipFree(gpu_mod_dest);
}
// Executes each of the shared memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data after creating shared memory. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeSharedTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
hipMalloc((void**)&gpu_a, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_b, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
hipMemcpy(gpu_a, a, totalThreads * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(gpu_b, b, totalThreads * sizeof(int), hipMemcpyHostToDevice);
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
executeSharedMathOperations<<<numBlocks, blockSize, 3 * totalThreads * sizeof(int)>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
hipMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_add_dest);
hipFree(gpu_sub_dest);
hipFree(gpu_mult_dest);
hipFree(gpu_div_dest);
hipFree(gpu_mod_dest);
}
// Executes each of the consnt memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data using constant values. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeConstantTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
hipMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
executeConstantMathOperations<<<numBlocks, blockSize>>>(gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
hipMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipFree(gpu_add_dest);
hipFree(gpu_sub_dest);
hipFree(gpu_mult_dest);
hipFree(gpu_div_dest);
hipFree(gpu_mod_dest);
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
auto startTime = std::chrono::system_clock::now();
executeHostTest(totalThreads, blockSize, numBlocks);
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> totalTime = endTime-startTime;
std::cout << "Host execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeGlobalTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Global Memory execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeSharedTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Shared Memory execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeConstantTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Constant Memory execution took: " << totalTime.count() << " seconds." << std::endl;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //Based on the work of Andrew Krepps
#include <hip/hip_runtime.h>
#include <iostream>
#include <random>
#include <chrono>
#include <stdio.h>
__constant__ static const int VAL_A = 1;
__constant__ static const int VAL_B = 3;
// Device GPU add c[i] = a[i] + b[i]
__device__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Device GPU subtract c[i] = a[i] - b[i]
__device__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Device GPU multiply c[i] = a[i] * b[i]
__device__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Device GPU div c[i] = a[i] / b[i]
__device__ void div(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Device GPU mod c[i] = a[i] % b[i]
__device__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Executes all 5 shared math operations
__global__ void executeSharedMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
extern __shared__ int sharedMem[];
// Use offsets in the shared mem to create arrays.
int * sharedA = &sharedMem[0];
int * sharedB = &sharedMem[size];
int * sharedRet = &sharedMem[2*size];
sharedA[tid] = a[tid];
sharedB[tid] = b[tid];
// Add sharedA to sharedB and store in addDest
add(sharedA, sharedB, sharedRet);
addDest[tid] = sharedRet[tid];
// Subtract sharedB from sharedA and store in subDest
subtract(sharedA, sharedB, sharedRet);
subDest[tid] = sharedRet[tid];
// Multiply sharedA to sharedB and store in mutlDest
mult(sharedA, sharedB, sharedRet);
multDest[tid] = sharedRet[tid];
// Divide sharedA by sharedB and store in divDest
div(sharedA, sharedB, sharedRet);
divDest[tid] = sharedRet[tid];
// Mod sharedA by sharedB and store in modDest
mod(sharedA, sharedB, sharedRet);
modDest[tid] = sharedRet[tid];
}
// Executes all 5 global math operations
__global__ void executeGlobalMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
add(a, b, addDest);
// Subtract a from b and store in subDest
subtract(a, b, subDest);
// Multiply a to b and store in mutlDest
mult(a, b, multDest);
// Divide a by b and store in divDest
div(a, b, divDest);
// Mod a by b and store in modDest
mod(a, b, modDest);
}
// Executes all 5 constant math operations
__global__ void executeConstantMathOperations(int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Add VAL_A to VAL_B and store in addDest
addDest[tid] = VAL_A + VAL_B;
// Subtract a from b and store in subDest
subDest[tid] = VAL_A - VAL_B;
// Multiply a to b and store in mutlDest
multDest[tid] = VAL_A * VAL_B;
// Divide a by b and store in divDest
divDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
// Mod a by b and store in modDest
modDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
}
// Host (Cpu) add c[i] = a[i] + b[i]
void hostAdd(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] + b[i];
}
}
// Host (Cpu) sub c[i] = a[i] - b[i]
void hostSub(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] - b[i];
}
}
// Host (Cpu) multiply c[i] = a[i] * b[i]
void hostMult(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] * b[i];
}
}
// Host (Cpu) divide c[i] = a[i] / b[i]
void hostDiv(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
if (b[i] != 0)
{
c[i] = a[i] / b[i];
}
else
{
c[i] = 0;
}
}
}
// Host (Cpu) mod c[i] = a[i] % b[i]
void hostMod(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
// Protect against divide by 0.
// cuda code catches this error and sets result to 0 by default.
if (b[i] == 0)
{
c[i] = 0;
}
else
{
c[i] = a[i] % b[i];
}
}
}
// Executes each of the host (cpu) tests by creating local memory and executing all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the GPU tests.
void executeHostTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Add all of the numbers c[i] = a[i] + b[i];
hostAdd(a,b,c, totalThreads);
// Subtract all of the numbers c[i] = a[i] - b[i];
hostSub(a,b,c, totalThreads);
// Multiply all of the numbers c[i] = a[i] * b[i];
hostMult(a,b,c, totalThreads);
// Divides all of the numbers c[i] = a[i] / b[i]; if b[i] == 0, c[i] = 0
hostDiv(a,b,c, totalThreads);
// Mod all of the numbers c[i] = a[i] % b[i];
hostMod(a,b,c, totalThreads);
}
// Executes each of the global memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeGlobalTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
hipMalloc((void**)&gpu_a, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_b, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
hipMemcpy(gpu_a, a, totalThreads * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(gpu_b, b, totalThreads * sizeof(int), hipMemcpyHostToDevice);
executeGlobalMathOperations<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, numBlocks * blockSize);
hipMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_add_dest);
hipFree(gpu_sub_dest);
hipFree(gpu_mult_dest);
hipFree(gpu_div_dest);
hipFree(gpu_mod_dest);
}
// Executes each of the shared memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data after creating shared memory. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeSharedTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
hipMalloc((void**)&gpu_a, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_b, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
hipMemcpy(gpu_a, a, totalThreads * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(gpu_b, b, totalThreads * sizeof(int), hipMemcpyHostToDevice);
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
executeSharedMathOperations<<<numBlocks, blockSize, 3 * totalThreads * sizeof(int)>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
hipMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_add_dest);
hipFree(gpu_sub_dest);
hipFree(gpu_mult_dest);
hipFree(gpu_div_dest);
hipFree(gpu_mod_dest);
}
// Executes each of the consnt memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data using constant values. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeConstantTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
hipMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
executeConstantMathOperations<<<numBlocks, blockSize>>>(gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
hipMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipFree(gpu_add_dest);
hipFree(gpu_sub_dest);
hipFree(gpu_mult_dest);
hipFree(gpu_div_dest);
hipFree(gpu_mod_dest);
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
auto startTime = std::chrono::system_clock::now();
executeHostTest(totalThreads, blockSize, numBlocks);
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> totalTime = endTime-startTime;
std::cout << "Host execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeGlobalTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Global Memory execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeSharedTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Shared Memory execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeConstantTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Constant Memory execution took: " << totalTime.count() << " seconds." << std::endl;
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z27executeSharedMathOperationsPiS_S_S_S_S_S_i
.globl _Z27executeSharedMathOperationsPiS_S_S_S_S_S_i
.p2align 8
.type _Z27executeSharedMathOperationsPiS_S_S_S_S_S_i,@function
_Z27executeSharedMathOperationsPiS_S_S_S_S_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x4c
s_load_b256 s[4:11], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_load_b32 s2, s[0:1], 0x38
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_lshlrev_b32_e32 v1, 2, v1
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
s_load_b128 s[4:7], s[0:1], 0x20
global_load_b32 v0, v[4:5], off
global_load_b32 v4, v[6:7], off
s_waitcnt lgkmcnt(0)
s_lshl_b32 s3, s2, 2
v_add_nc_u32_e32 v5, 0, v1
v_add3_u32 v6, 0, s3, v1
s_lshl_b32 s2, s2, 3
s_load_b64 s[0:1], s[0:1], 0x30
v_add3_u32 v10, 0, s2, v1
s_waitcnt vmcnt(1)
ds_store_b32 v5, v0
s_waitcnt vmcnt(0)
ds_store_b32 v6, v4
ds_load_b32 v0, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v11, v0, v4
ds_store_b32 v10, v11
ds_load_b32 v0, v5
ds_load_b32 v1, v6
s_waitcnt lgkmcnt(0)
v_sub_nc_u32_e32 v12, v0, v1
ds_store_b32 v10, v12
ds_load_b32 v0, v5
ds_load_b32 v1, v6
s_waitcnt lgkmcnt(0)
v_mul_lo_u32 v13, v1, v0
ds_store_b32 v10, v13
ds_load_b32 v0, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(1)
v_ashrrev_i32_e32 v1, 31, v0
s_waitcnt lgkmcnt(0)
v_ashrrev_i32_e32 v9, 31, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v0, v0, v1
v_add_nc_u32_e32 v7, v7, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_xor_b32_e32 v0, v0, v1
v_xor_b32_e32 v7, v7, v9
v_xor_b32_e32 v1, v9, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cvt_f32_u32_e32 v4, v0
v_sub_nc_u32_e32 v8, 0, v0
v_rcp_iflag_f32_e32 v4, v4
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v4, 0x4f7ffffe, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v4, v4
v_mul_lo_u32 v8, v8, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v8, v4, v8
v_add_nc_u32_e32 v4, v4, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v4, v7, v4
v_mul_lo_u32 v8, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v7, v7, v8
v_add_nc_u32_e32 v8, 1, v4
v_sub_nc_u32_e32 v14, v7, v0
v_cmp_ge_u32_e32 vcc_lo, v7, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v4, v4, v8 :: v_dual_cndmask_b32 v7, v7, v14
v_add_nc_u32_e32 v8, 1, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_ge_u32_e32 vcc_lo, v7, v0
v_cndmask_b32_e32 v0, v4, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v0, v0, v1
v_sub_nc_u32_e32 v14, v0, v1
ds_store_b32 v10, v14
ds_load_b32 v0, v6
s_waitcnt lgkmcnt(0)
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v0, v0, v1
v_xor_b32_e32 v8, v0, v1
ds_load_b32 v1, v5
v_cvt_f32_u32_e32 v0, v8
v_sub_nc_u32_e32 v4, 0, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v0, v0
s_waitcnt lgkmcnt(0)
v_ashrrev_i32_e32 v15, 31, v1
s_waitcnt_depctr 0xfff
v_dual_mul_f32 v0, 0x4f7ffffe, v0 :: v_dual_add_nc_u32 v1, v1, v15
v_cvt_u32_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_xor_b32_e32 v1, v1, v15
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v4, v0, v4
v_add_nc_u32_e32 v0, v0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v0, v1, v0
v_mul_lo_u32 v0, v0, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_sub_nc_u32_e32 v4, v1, v0
v_add_co_u32 v0, vcc_lo, s8, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s9, v3, vcc_lo
v_sub_nc_u32_e32 v5, v4, v8
v_cmp_ge_u32_e32 vcc_lo, v4, v8
s_delay_alu instid0(VALU_DEP_2)
v_cndmask_b32_e32 v9, v4, v5, vcc_lo
v_add_co_u32 v4, vcc_lo, s10, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v3, vcc_lo
v_sub_nc_u32_e32 v16, v9, v8
v_cmp_ge_u32_e32 vcc_lo, v9, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v16, v9, v16, vcc_lo
v_add_co_u32 v8, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v3, vcc_lo
v_xor_b32_e32 v16, v16, v15
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3)
v_sub_nc_u32_e32 v15, v16, v15
global_store_b32 v[0:1], v11, off
global_store_b32 v[4:5], v12, off
global_store_b32 v[6:7], v13, off
global_store_b32 v[8:9], v14, off
ds_store_b32 v10, v15
global_store_b32 v[2:3], v15, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z27executeSharedMathOperationsPiS_S_S_S_S_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 320
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 17
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z27executeSharedMathOperationsPiS_S_S_S_S_S_i, .Lfunc_end0-_Z27executeSharedMathOperationsPiS_S_S_S_S_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z27executeGlobalMathOperationsPiS_S_S_S_S_S_i
.globl _Z27executeGlobalMathOperationsPiS_S_S_S_S_S_i
.p2align 8
.type _Z27executeGlobalMathOperationsPiS_S_S_S_S_S_i,@function
_Z27executeGlobalMathOperationsPiS_S_S_S_S_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x4c
s_load_b256 s[4:11], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_mov_b32_e32 v2, 0
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v6, vcc_lo, s8, v0
global_load_b32 v8, v[2:3], off
global_load_b32 v9, v[4:5], off
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v8, v9, v8
global_store_b32 v[6:7], v8, off
global_load_b32 v8, v[2:3], off
global_load_b32 v9, v[4:5], off
v_add_co_u32 v6, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s11, v1, vcc_lo
s_waitcnt vmcnt(0)
v_sub_nc_u32_e32 v8, v8, v9
global_store_b32 v[6:7], v8, off
global_load_b32 v6, v[2:3], off
global_load_b32 v7, v[4:5], off
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x20
s_load_b64 s[0:1], s[0:1], 0x30
s_waitcnt vmcnt(0)
v_mul_lo_u32 v8, v7, v6
s_waitcnt lgkmcnt(0)
v_add_co_u32 v6, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v1, vcc_lo
global_store_b32 v[6:7], v8, off
global_load_b32 v6, v[4:5], off
global_load_b32 v7, v[2:3], off
s_waitcnt vmcnt(1)
v_ashrrev_i32_e32 v8, 31, v6
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v11, 31, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v6, v6, v8
v_add_nc_u32_e32 v7, v7, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_xor_b32_e32 v6, v6, v8
v_xor_b32_e32 v7, v7, v11
v_xor_b32_e32 v8, v11, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cvt_f32_u32_e32 v9, v6
v_sub_nc_u32_e32 v10, 0, v6
v_rcp_iflag_f32_e32 v9, v9
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v9, 0x4f7ffffe, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v9, v9
v_mul_lo_u32 v10, v10, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v10, v9, v10
v_add_nc_u32_e32 v9, v9, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v9, v7, v9
v_mul_lo_u32 v10, v9, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v7, v7, v10
v_sub_nc_u32_e32 v12, v7, v6
v_cmp_ge_u32_e32 vcc_lo, v7, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v7, v7, v12 :: v_dual_add_nc_u32 v10, 1, v9
v_cndmask_b32_e32 v9, v9, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_ge_u32_e32 vcc_lo, v7, v6
v_add_nc_u32_e32 v10, 1, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v6, v9, v10, vcc_lo
v_xor_b32_e32 v9, v6, v8
v_add_co_u32 v6, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_3)
v_sub_nc_u32_e32 v8, v9, v8
global_store_b32 v[6:7], v8, off
global_load_b32 v4, v[4:5], off
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(1)
v_ashrrev_i32_e32 v3, 31, v4
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v6, 31, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v4, v4, v3
v_add_nc_u32_e32 v2, v2, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_xor_b32_e32 v3, v4, v3
v_xor_b32_e32 v2, v2, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cvt_f32_u32_e32 v4, v3
v_sub_nc_u32_e32 v5, 0, v3
v_rcp_iflag_f32_e32 v4, v4
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v4, 0x4f7ffffe, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v4, v4
v_mul_lo_u32 v5, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v5, v4, v5
v_add_nc_u32_e32 v4, v4, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v4, v2, v4
v_mul_lo_u32 v4, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v2, v2, v4
v_sub_nc_u32_e32 v4, v2, v3
v_cmp_ge_u32_e32 vcc_lo, v2, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v2, v2, v4, vcc_lo
v_sub_nc_u32_e32 v4, v2, v3
v_cmp_ge_u32_e32 vcc_lo, v2, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v2, v2, v4, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
v_xor_b32_e32 v2, v2, v6
s_delay_alu instid0(VALU_DEP_1)
v_sub_nc_u32_e32 v2, v2, v6
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z27executeGlobalMathOperationsPiS_S_S_S_S_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 320
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z27executeGlobalMathOperationsPiS_S_S_S_S_S_i, .Lfunc_end1-_Z27executeGlobalMathOperationsPiS_S_S_S_S_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z29executeConstantMathOperationsPiS_S_S_S_i
.globl _Z29executeConstantMathOperationsPiS_S_S_S_i
.p2align 8
.type _Z29executeConstantMathOperationsPiS_S_S_S_i,@function
_Z29executeConstantMathOperationsPiS_S_S_S_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x3c
s_load_b256 s[4:11], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x20
v_dual_mov_b32 v10, 4 :: v_dual_mov_b32 v11, -2
v_dual_mov_b32 v12, 3 :: v_dual_mov_b32 v13, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v6, vcc_lo, s8, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v1, vcc_lo
v_add_co_u32 v8, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v9, vcc_lo, s11, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[2:3], v10, off
global_store_b32 v[4:5], v11, off
global_store_b32 v[6:7], v12, off
global_store_b32 v[8:9], v13, off
global_store_b32 v[0:1], v13, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z29executeConstantMathOperationsPiS_S_S_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z29executeConstantMathOperationsPiS_S_S_S_i, .Lfunc_end2-_Z29executeConstantMathOperationsPiS_S_S_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .offset: 56
.size: 4
.value_kind: by_value
- .offset: 64
.size: 4
.value_kind: hidden_block_count_x
- .offset: 68
.size: 4
.value_kind: hidden_block_count_y
- .offset: 72
.size: 4
.value_kind: hidden_block_count_z
- .offset: 76
.size: 2
.value_kind: hidden_group_size_x
- .offset: 78
.size: 2
.value_kind: hidden_group_size_y
- .offset: 80
.size: 2
.value_kind: hidden_group_size_z
- .offset: 82
.size: 2
.value_kind: hidden_remainder_x
- .offset: 84
.size: 2
.value_kind: hidden_remainder_y
- .offset: 86
.size: 2
.value_kind: hidden_remainder_z
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 128
.size: 2
.value_kind: hidden_grid_dims
- .offset: 184
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 320
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z27executeSharedMathOperationsPiS_S_S_S_S_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z27executeSharedMathOperationsPiS_S_S_S_S_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 17
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .offset: 56
.size: 4
.value_kind: by_value
- .offset: 64
.size: 4
.value_kind: hidden_block_count_x
- .offset: 68
.size: 4
.value_kind: hidden_block_count_y
- .offset: 72
.size: 4
.value_kind: hidden_block_count_z
- .offset: 76
.size: 2
.value_kind: hidden_group_size_x
- .offset: 78
.size: 2
.value_kind: hidden_group_size_y
- .offset: 80
.size: 2
.value_kind: hidden_group_size_z
- .offset: 82
.size: 2
.value_kind: hidden_remainder_x
- .offset: 84
.size: 2
.value_kind: hidden_remainder_y
- .offset: 86
.size: 2
.value_kind: hidden_remainder_z
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 128
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 320
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z27executeGlobalMathOperationsPiS_S_S_S_S_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z27executeGlobalMathOperationsPiS_S_S_S_S_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z29executeConstantMathOperationsPiS_S_S_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z29executeConstantMathOperationsPiS_S_S_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33,float var_34,float var_35,float var_36,float var_37,float var_38,float var_39,float var_40,float var_41,float var_42,float var_43) {
if (comp <= (+1.2816E-37f / powf(-1.0187E-35f, var_1 - -1.9877E-37f - var_2))) {
if (comp >= -1.8770E-44f - var_3) {
if (comp == var_4 / (+1.3481E-35f - +0.0f * var_5)) {
comp += (var_6 - var_7 - -1.7504E-42f + (var_8 - (var_9 / +1.5357E35f)));
float tmp_1 = powf(-1.3450E-37f * (var_10 + fabsf(+1.1372E26f)), (+1.2480E-36f + var_11));
comp = tmp_1 / +1.5369E-35f - log10f((var_12 / var_13 + var_14 * var_15));
if (comp >= powf(+1.5375E34f, var_16 * (-1.3830E-5f / expf((-1.1300E-35f - (+1.2449E35f * atan2f(-1.6221E36f, (+0.0f / -1.7653E25f / ldexpf((+1.3912E-43f * (-1.7357E-35f + -1.7682E-42f - var_17 * expf((var_18 / ceilf(+1.2437E-42f))))), 2))))))))) {
comp += atanf((-1.9104E-41f * +1.5842E-43f + -1.1733E24f * log10f(sinf(-1.5217E2f * var_19 - (-1.5927E35f + -1.9048E-35f)))));
comp += (var_20 * log10f((+0.0f * (+1.4060E34f * +1.6696E-37f))));
comp += +1.3113E-35f * +1.9907E-37f / var_21;
comp += (var_22 - (+0.0f - asinf(var_23 / fmodf(+1.2304E-43f, var_24 / logf((+1.2814E-37f - (var_25 / (-1.0064E-42f * -1.0711E-41f))))))));
}
if (comp == (var_26 + +1.0784E-15f / (var_27 - -1.0220E34f / (+0.0f + +1.9677E34f)))) {
comp = var_28 + (+1.4433E36f * var_29);
comp = (var_30 / +1.4266E21f - var_31 * +1.7675E-36f);
comp += (var_32 / var_33 + var_34 * +1.6759E-37f);
}
if (comp < (-1.5166E-41f - var_35)) {
comp = atanf(var_36 + (var_37 + -1.0570E-42f));
float tmp_2 = var_38 - asinf((var_39 + coshf(-1.5107E-37f * +1.6173E-43f * (var_40 - var_41 - +0.0f))));
comp = tmp_2 * -0.0f / (var_42 * var_43);
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
float tmp_39 = atof(argv[39]);
float tmp_40 = atof(argv[40]);
float tmp_41 = atof(argv[41]);
float tmp_42 = atof(argv[42]);
float tmp_43 = atof(argv[43]);
float tmp_44 = atof(argv[44]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39,tmp_40,tmp_41,tmp_42,tmp_43,tmp_44);
cudaDeviceSynchronize();
return 0;
} | .file "tmpxft_0018b930_00000000-6_test.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11initPointerf
.type _Z11initPointerf, @function
_Z11initPointerf:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movd %xmm0, %ebx
movl $40, %edi
call malloc@PLT
movq %rax, %rdx
leaq 40(%rax), %rcx
.L4:
movl %ebx, (%rdx)
addq $4, %rdx
cmpq %rcx, %rdx
jne .L4
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z11initPointerf, .-_Z11initPointerf
.globl _Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
.type _Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff, @function
_Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff:
.LFB2083:
.cfi_startproc
endbr64
subq $472, %rsp
.cfi_def_cfa_offset 480
movss %xmm0, 28(%rsp)
movss %xmm1, 24(%rsp)
movss %xmm2, 20(%rsp)
movss %xmm3, 16(%rsp)
movss %xmm4, 12(%rsp)
movss %xmm5, 8(%rsp)
movss %xmm6, 4(%rsp)
movss %xmm7, (%rsp)
movq %fs:40, %rax
movq %rax, 456(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 4(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 480(%rsp), %rax
movq %rax, 160(%rsp)
leaq 488(%rsp), %rax
movq %rax, 168(%rsp)
leaq 496(%rsp), %rax
movq %rax, 176(%rsp)
leaq 504(%rsp), %rax
movq %rax, 184(%rsp)
leaq 512(%rsp), %rax
movq %rax, 192(%rsp)
leaq 520(%rsp), %rax
movq %rax, 200(%rsp)
leaq 528(%rsp), %rax
movq %rax, 208(%rsp)
leaq 536(%rsp), %rax
movq %rax, 216(%rsp)
leaq 544(%rsp), %rax
movq %rax, 224(%rsp)
leaq 552(%rsp), %rax
movq %rax, 232(%rsp)
leaq 560(%rsp), %rax
movq %rax, 240(%rsp)
leaq 568(%rsp), %rax
movq %rax, 248(%rsp)
leaq 576(%rsp), %rax
movq %rax, 256(%rsp)
leaq 584(%rsp), %rax
movq %rax, 264(%rsp)
leaq 592(%rsp), %rax
movq %rax, 272(%rsp)
leaq 600(%rsp), %rax
movq %rax, 280(%rsp)
leaq 608(%rsp), %rax
movq %rax, 288(%rsp)
leaq 616(%rsp), %rax
movq %rax, 296(%rsp)
leaq 624(%rsp), %rax
movq %rax, 304(%rsp)
leaq 632(%rsp), %rax
movq %rax, 312(%rsp)
leaq 640(%rsp), %rax
movq %rax, 320(%rsp)
leaq 648(%rsp), %rax
movq %rax, 328(%rsp)
leaq 656(%rsp), %rax
movq %rax, 336(%rsp)
leaq 664(%rsp), %rax
movq %rax, 344(%rsp)
leaq 672(%rsp), %rax
movq %rax, 352(%rsp)
leaq 680(%rsp), %rax
movq %rax, 360(%rsp)
leaq 688(%rsp), %rax
movq %rax, 368(%rsp)
leaq 696(%rsp), %rax
movq %rax, 376(%rsp)
leaq 704(%rsp), %rax
movq %rax, 384(%rsp)
leaq 712(%rsp), %rax
movq %rax, 392(%rsp)
leaq 720(%rsp), %rax
movq %rax, 400(%rsp)
leaq 728(%rsp), %rax
movq %rax, 408(%rsp)
leaq 736(%rsp), %rax
movq %rax, 416(%rsp)
leaq 744(%rsp), %rax
movq %rax, 424(%rsp)
leaq 752(%rsp), %rax
movq %rax, 432(%rsp)
leaq 760(%rsp), %rax
movq %rax, 440(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 456(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $472, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 488
pushq 40(%rsp)
.cfi_def_cfa_offset 496
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7computeffffffffffffffffffffffffffffffffffffffffffff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 480
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff, .-_Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
.globl _Z7computeffffffffffffffffffffffffffffffffffffffffffff
.type _Z7computeffffffffffffffffffffffffffffffffffffffffffff, @function
_Z7computeffffffffffffffffffffffffffffffffffffffffffff:
.LFB2084:
.cfi_startproc
endbr64
subq $296, %rsp
.cfi_def_cfa_offset 304
movss 584(%rsp), %xmm8
movss %xmm8, 280(%rsp)
movss 576(%rsp), %xmm8
movss %xmm8, 272(%rsp)
movss 568(%rsp), %xmm8
movss %xmm8, 264(%rsp)
movss 560(%rsp), %xmm8
movss %xmm8, 256(%rsp)
movss 552(%rsp), %xmm8
movss %xmm8, 248(%rsp)
movss 544(%rsp), %xmm8
movss %xmm8, 240(%rsp)
movss 536(%rsp), %xmm8
movss %xmm8, 232(%rsp)
movss 528(%rsp), %xmm8
movss %xmm8, 224(%rsp)
movss 520(%rsp), %xmm8
movss %xmm8, 216(%rsp)
movss 512(%rsp), %xmm8
movss %xmm8, 208(%rsp)
movss 504(%rsp), %xmm8
movss %xmm8, 200(%rsp)
movss 496(%rsp), %xmm8
movss %xmm8, 192(%rsp)
movss 488(%rsp), %xmm8
movss %xmm8, 184(%rsp)
movss 480(%rsp), %xmm8
movss %xmm8, 176(%rsp)
movss 472(%rsp), %xmm8
movss %xmm8, 168(%rsp)
movss 464(%rsp), %xmm8
movss %xmm8, 160(%rsp)
movss 456(%rsp), %xmm8
movss %xmm8, 152(%rsp)
movss 448(%rsp), %xmm8
movss %xmm8, 144(%rsp)
movss 440(%rsp), %xmm8
movss %xmm8, 136(%rsp)
movss 432(%rsp), %xmm8
movss %xmm8, 128(%rsp)
movss 424(%rsp), %xmm8
movss %xmm8, 120(%rsp)
movss 416(%rsp), %xmm8
movss %xmm8, 112(%rsp)
movss 408(%rsp), %xmm8
movss %xmm8, 104(%rsp)
movss 400(%rsp), %xmm8
movss %xmm8, 96(%rsp)
movss 392(%rsp), %xmm8
movss %xmm8, 88(%rsp)
movss 384(%rsp), %xmm8
movss %xmm8, 80(%rsp)
movss 376(%rsp), %xmm8
movss %xmm8, 72(%rsp)
movss 368(%rsp), %xmm8
movss %xmm8, 64(%rsp)
movss 360(%rsp), %xmm8
movss %xmm8, 56(%rsp)
movss 352(%rsp), %xmm8
movss %xmm8, 48(%rsp)
movss 344(%rsp), %xmm8
movss %xmm8, 40(%rsp)
movss 336(%rsp), %xmm8
movss %xmm8, 32(%rsp)
movss 328(%rsp), %xmm8
movss %xmm8, 24(%rsp)
movss 320(%rsp), %xmm8
movss %xmm8, 16(%rsp)
movss 312(%rsp), %xmm8
movss %xmm8, 8(%rsp)
movss 304(%rsp), %xmm8
movss %xmm8, (%rsp)
call _Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
addq $296, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z7computeffffffffffffffffffffffffffffffffffffffffffff, .-_Z7computeffffffffffffffffffffffffffffffffffffffffffff
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $384, %rsp
.cfi_def_cfa_offset 400
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 344(%rsp)
movq 16(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 336(%rsp)
movq 24(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 328(%rsp)
movq 32(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 320(%rsp)
movq 40(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 312(%rsp)
movq 48(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 304(%rsp)
movq 56(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 296(%rsp)
movq 64(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 288(%rsp)
movq 72(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 280(%rsp)
movq 80(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 272(%rsp)
movq 88(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 264(%rsp)
movq 96(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 256(%rsp)
movq 104(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 248(%rsp)
movq 112(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 240(%rsp)
movq 120(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 232(%rsp)
movq 128(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 224(%rsp)
movq 136(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 216(%rsp)
movq 144(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 208(%rsp)
movq 152(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 200(%rsp)
movq 160(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 192(%rsp)
movq 168(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 184(%rsp)
movq 176(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 176(%rsp)
movq 184(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 168(%rsp)
movq 192(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 160(%rsp)
movq 200(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 152(%rsp)
movq 208(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 144(%rsp)
movq 216(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 136(%rsp)
movq 224(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 128(%rsp)
movq 232(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 120(%rsp)
movq 240(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 112(%rsp)
movq 248(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 104(%rsp)
movq 256(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 96(%rsp)
movq 264(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 88(%rsp)
movq 272(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 80(%rsp)
movq 280(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 72(%rsp)
movq 288(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 64(%rsp)
movq 296(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 56(%rsp)
movq 304(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 48(%rsp)
movq 312(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 40(%rsp)
movq 320(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 32(%rsp)
movq 328(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 24(%rsp)
movq 336(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 16(%rsp)
movq 344(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 8(%rsp)
movq 352(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, (%rsp)
movl $1, 372(%rsp)
movl $1, 376(%rsp)
movl $1, 360(%rsp)
movl $1, 364(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 372(%rsp), %rdx
movl $1, %ecx
movq 360(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L16:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $384, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pxor %xmm0, %xmm0
cvtsd2ss 344(%rsp), %xmm0
pxor %xmm1, %xmm1
cvtsd2ss (%rsp), %xmm1
leaq -288(%rsp), %rsp
.cfi_def_cfa_offset 688
movss %xmm1, 280(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 296(%rsp), %xmm1
movss %xmm1, 272(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 304(%rsp), %xmm1
movss %xmm1, 264(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 312(%rsp), %xmm1
movss %xmm1, 256(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 320(%rsp), %xmm1
movss %xmm1, 248(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 328(%rsp), %xmm1
movss %xmm1, 240(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 336(%rsp), %xmm1
movss %xmm1, 232(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 344(%rsp), %xmm1
movss %xmm1, 224(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 352(%rsp), %xmm1
movss %xmm1, 216(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 360(%rsp), %xmm1
movss %xmm1, 208(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 368(%rsp), %xmm1
movss %xmm1, 200(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 376(%rsp), %xmm1
movss %xmm1, 192(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 384(%rsp), %xmm1
movss %xmm1, 184(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 392(%rsp), %xmm1
movss %xmm1, 176(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 400(%rsp), %xmm1
movss %xmm1, 168(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 408(%rsp), %xmm1
movss %xmm1, 160(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 416(%rsp), %xmm1
movss %xmm1, 152(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 424(%rsp), %xmm1
movss %xmm1, 144(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 432(%rsp), %xmm1
movss %xmm1, 136(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 440(%rsp), %xmm1
movss %xmm1, 128(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 448(%rsp), %xmm1
movss %xmm1, 120(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 456(%rsp), %xmm1
movss %xmm1, 112(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 464(%rsp), %xmm1
movss %xmm1, 104(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 472(%rsp), %xmm1
movss %xmm1, 96(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 480(%rsp), %xmm1
movss %xmm1, 88(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 488(%rsp), %xmm1
movss %xmm1, 80(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 496(%rsp), %xmm1
movss %xmm1, 72(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 504(%rsp), %xmm1
movss %xmm1, 64(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 512(%rsp), %xmm1
movss %xmm1, 56(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 520(%rsp), %xmm1
movss %xmm1, 48(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 528(%rsp), %xmm1
movss %xmm1, 40(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 536(%rsp), %xmm1
movss %xmm1, 32(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 544(%rsp), %xmm1
movss %xmm1, 24(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 552(%rsp), %xmm1
movss %xmm1, 16(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 560(%rsp), %xmm1
movss %xmm1, 8(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 568(%rsp), %xmm1
movss %xmm1, (%rsp)
pxor %xmm7, %xmm7
cvtsd2ss 576(%rsp), %xmm7
pxor %xmm6, %xmm6
cvtsd2ss 584(%rsp), %xmm6
pxor %xmm5, %xmm5
cvtsd2ss 592(%rsp), %xmm5
pxor %xmm4, %xmm4
cvtsd2ss 600(%rsp), %xmm4
pxor %xmm3, %xmm3
cvtsd2ss 608(%rsp), %xmm3
pxor %xmm2, %xmm2
cvtsd2ss 616(%rsp), %xmm2
pxor %xmm1, %xmm1
cvtsd2ss 624(%rsp), %xmm1
call _Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
addq $288, %rsp
.cfi_def_cfa_offset 400
jmp .L16
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z7computeffffffffffffffffffffffffffffffffffffffffffff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7computeffffffffffffffffffffffffffffffffffffffffffff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33,float var_34,float var_35,float var_36,float var_37,float var_38,float var_39,float var_40,float var_41,float var_42,float var_43) {
if (comp <= (+1.2816E-37f / powf(-1.0187E-35f, var_1 - -1.9877E-37f - var_2))) {
if (comp >= -1.8770E-44f - var_3) {
if (comp == var_4 / (+1.3481E-35f - +0.0f * var_5)) {
comp += (var_6 - var_7 - -1.7504E-42f + (var_8 - (var_9 / +1.5357E35f)));
float tmp_1 = powf(-1.3450E-37f * (var_10 + fabsf(+1.1372E26f)), (+1.2480E-36f + var_11));
comp = tmp_1 / +1.5369E-35f - log10f((var_12 / var_13 + var_14 * var_15));
if (comp >= powf(+1.5375E34f, var_16 * (-1.3830E-5f / expf((-1.1300E-35f - (+1.2449E35f * atan2f(-1.6221E36f, (+0.0f / -1.7653E25f / ldexpf((+1.3912E-43f * (-1.7357E-35f + -1.7682E-42f - var_17 * expf((var_18 / ceilf(+1.2437E-42f))))), 2))))))))) {
comp += atanf((-1.9104E-41f * +1.5842E-43f + -1.1733E24f * log10f(sinf(-1.5217E2f * var_19 - (-1.5927E35f + -1.9048E-35f)))));
comp += (var_20 * log10f((+0.0f * (+1.4060E34f * +1.6696E-37f))));
comp += +1.3113E-35f * +1.9907E-37f / var_21;
comp += (var_22 - (+0.0f - asinf(var_23 / fmodf(+1.2304E-43f, var_24 / logf((+1.2814E-37f - (var_25 / (-1.0064E-42f * -1.0711E-41f))))))));
}
if (comp == (var_26 + +1.0784E-15f / (var_27 - -1.0220E34f / (+0.0f + +1.9677E34f)))) {
comp = var_28 + (+1.4433E36f * var_29);
comp = (var_30 / +1.4266E21f - var_31 * +1.7675E-36f);
comp += (var_32 / var_33 + var_34 * +1.6759E-37f);
}
if (comp < (-1.5166E-41f - var_35)) {
comp = atanf(var_36 + (var_37 + -1.0570E-42f));
float tmp_2 = var_38 - asinf((var_39 + coshf(-1.5107E-37f * +1.6173E-43f * (var_40 - var_41 - +0.0f))));
comp = tmp_2 * -0.0f / (var_42 * var_43);
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
float tmp_39 = atof(argv[39]);
float tmp_40 = atof(argv[40]);
float tmp_41 = atof(argv[41]);
float tmp_42 = atof(argv[42]);
float tmp_43 = atof(argv[43]);
float tmp_44 = atof(argv[44]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39,tmp_40,tmp_41,tmp_42,tmp_43,tmp_44);
cudaDeviceSynchronize();
return 0;
} | /* This is a automatically generated test. Do not modify */
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33,float var_34,float var_35,float var_36,float var_37,float var_38,float var_39,float var_40,float var_41,float var_42,float var_43) {
if (comp <= (+1.2816E-37f / powf(-1.0187E-35f, var_1 - -1.9877E-37f - var_2))) {
if (comp >= -1.8770E-44f - var_3) {
if (comp == var_4 / (+1.3481E-35f - +0.0f * var_5)) {
comp += (var_6 - var_7 - -1.7504E-42f + (var_8 - (var_9 / +1.5357E35f)));
float tmp_1 = powf(-1.3450E-37f * (var_10 + fabsf(+1.1372E26f)), (+1.2480E-36f + var_11));
comp = tmp_1 / +1.5369E-35f - log10f((var_12 / var_13 + var_14 * var_15));
if (comp >= powf(+1.5375E34f, var_16 * (-1.3830E-5f / expf((-1.1300E-35f - (+1.2449E35f * atan2f(-1.6221E36f, (+0.0f / -1.7653E25f / ldexpf((+1.3912E-43f * (-1.7357E-35f + -1.7682E-42f - var_17 * expf((var_18 / ceilf(+1.2437E-42f))))), 2))))))))) {
comp += atanf((-1.9104E-41f * +1.5842E-43f + -1.1733E24f * log10f(sinf(-1.5217E2f * var_19 - (-1.5927E35f + -1.9048E-35f)))));
comp += (var_20 * log10f((+0.0f * (+1.4060E34f * +1.6696E-37f))));
comp += +1.3113E-35f * +1.9907E-37f / var_21;
comp += (var_22 - (+0.0f - asinf(var_23 / fmodf(+1.2304E-43f, var_24 / logf((+1.2814E-37f - (var_25 / (-1.0064E-42f * -1.0711E-41f))))))));
}
if (comp == (var_26 + +1.0784E-15f / (var_27 - -1.0220E34f / (+0.0f + +1.9677E34f)))) {
comp = var_28 + (+1.4433E36f * var_29);
comp = (var_30 / +1.4266E21f - var_31 * +1.7675E-36f);
comp += (var_32 / var_33 + var_34 * +1.6759E-37f);
}
if (comp < (-1.5166E-41f - var_35)) {
comp = atanf(var_36 + (var_37 + -1.0570E-42f));
float tmp_2 = var_38 - asinf((var_39 + coshf(-1.5107E-37f * +1.6173E-43f * (var_40 - var_41 - +0.0f))));
comp = tmp_2 * -0.0f / (var_42 * var_43);
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
float tmp_39 = atof(argv[39]);
float tmp_40 = atof(argv[40]);
float tmp_41 = atof(argv[41]);
float tmp_42 = atof(argv[42]);
float tmp_43 = atof(argv[43]);
float tmp_44 = atof(argv[44]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39,tmp_40,tmp_41,tmp_42,tmp_43,tmp_44);
hipDeviceSynchronize();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* This is a automatically generated test. Do not modify */
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33,float var_34,float var_35,float var_36,float var_37,float var_38,float var_39,float var_40,float var_41,float var_42,float var_43) {
if (comp <= (+1.2816E-37f / powf(-1.0187E-35f, var_1 - -1.9877E-37f - var_2))) {
if (comp >= -1.8770E-44f - var_3) {
if (comp == var_4 / (+1.3481E-35f - +0.0f * var_5)) {
comp += (var_6 - var_7 - -1.7504E-42f + (var_8 - (var_9 / +1.5357E35f)));
float tmp_1 = powf(-1.3450E-37f * (var_10 + fabsf(+1.1372E26f)), (+1.2480E-36f + var_11));
comp = tmp_1 / +1.5369E-35f - log10f((var_12 / var_13 + var_14 * var_15));
if (comp >= powf(+1.5375E34f, var_16 * (-1.3830E-5f / expf((-1.1300E-35f - (+1.2449E35f * atan2f(-1.6221E36f, (+0.0f / -1.7653E25f / ldexpf((+1.3912E-43f * (-1.7357E-35f + -1.7682E-42f - var_17 * expf((var_18 / ceilf(+1.2437E-42f))))), 2))))))))) {
comp += atanf((-1.9104E-41f * +1.5842E-43f + -1.1733E24f * log10f(sinf(-1.5217E2f * var_19 - (-1.5927E35f + -1.9048E-35f)))));
comp += (var_20 * log10f((+0.0f * (+1.4060E34f * +1.6696E-37f))));
comp += +1.3113E-35f * +1.9907E-37f / var_21;
comp += (var_22 - (+0.0f - asinf(var_23 / fmodf(+1.2304E-43f, var_24 / logf((+1.2814E-37f - (var_25 / (-1.0064E-42f * -1.0711E-41f))))))));
}
if (comp == (var_26 + +1.0784E-15f / (var_27 - -1.0220E34f / (+0.0f + +1.9677E34f)))) {
comp = var_28 + (+1.4433E36f * var_29);
comp = (var_30 / +1.4266E21f - var_31 * +1.7675E-36f);
comp += (var_32 / var_33 + var_34 * +1.6759E-37f);
}
if (comp < (-1.5166E-41f - var_35)) {
comp = atanf(var_36 + (var_37 + -1.0570E-42f));
float tmp_2 = var_38 - asinf((var_39 + coshf(-1.5107E-37f * +1.6173E-43f * (var_40 - var_41 - +0.0f))));
comp = tmp_2 * -0.0f / (var_42 * var_43);
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
float tmp_39 = atof(argv[39]);
float tmp_40 = atof(argv[40]);
float tmp_41 = atof(argv[41]);
float tmp_42 = atof(argv[42]);
float tmp_43 = atof(argv[43]);
float tmp_44 = atof(argv[44]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39,tmp_40,tmp_41,tmp_42,tmp_43,tmp_44);
hipDeviceSynchronize();
return 0;
} | .text
.file "test.hip"
.globl _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff # -- Begin function _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff
.p2align 4, 0x90
.type _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff,@function
_Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff: # @_Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff
.cfi_startproc
# %bb.0:
subq $440, %rsp # imm = 0x1B8
.cfi_def_cfa_offset 448
movss %xmm0, 28(%rsp)
movss %xmm1, 24(%rsp)
movss %xmm2, 20(%rsp)
movss %xmm3, 16(%rsp)
movss %xmm4, 12(%rsp)
movss %xmm5, 8(%rsp)
movss %xmm6, 4(%rsp)
movss %xmm7, (%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
leaq 448(%rsp), %rax
movq %rax, 144(%rsp)
leaq 456(%rsp), %rax
movq %rax, 152(%rsp)
leaq 464(%rsp), %rax
movq %rax, 160(%rsp)
leaq 472(%rsp), %rax
movq %rax, 168(%rsp)
leaq 480(%rsp), %rax
movq %rax, 176(%rsp)
leaq 488(%rsp), %rax
movq %rax, 184(%rsp)
leaq 496(%rsp), %rax
movq %rax, 192(%rsp)
leaq 504(%rsp), %rax
movq %rax, 200(%rsp)
leaq 512(%rsp), %rax
movq %rax, 208(%rsp)
leaq 520(%rsp), %rax
movq %rax, 216(%rsp)
leaq 528(%rsp), %rax
movq %rax, 224(%rsp)
leaq 536(%rsp), %rax
movq %rax, 232(%rsp)
leaq 544(%rsp), %rax
movq %rax, 240(%rsp)
leaq 552(%rsp), %rax
movq %rax, 248(%rsp)
leaq 560(%rsp), %rax
movq %rax, 256(%rsp)
leaq 568(%rsp), %rax
movq %rax, 264(%rsp)
leaq 576(%rsp), %rax
movq %rax, 272(%rsp)
leaq 584(%rsp), %rax
movq %rax, 280(%rsp)
leaq 592(%rsp), %rax
movq %rax, 288(%rsp)
leaq 600(%rsp), %rax
movq %rax, 296(%rsp)
leaq 608(%rsp), %rax
movq %rax, 304(%rsp)
leaq 616(%rsp), %rax
movq %rax, 312(%rsp)
leaq 624(%rsp), %rax
movq %rax, 320(%rsp)
leaq 632(%rsp), %rax
movq %rax, 328(%rsp)
leaq 640(%rsp), %rax
movq %rax, 336(%rsp)
leaq 648(%rsp), %rax
movq %rax, 344(%rsp)
leaq 656(%rsp), %rax
movq %rax, 352(%rsp)
leaq 664(%rsp), %rax
movq %rax, 360(%rsp)
leaq 672(%rsp), %rax
movq %rax, 368(%rsp)
leaq 680(%rsp), %rax
movq %rax, 376(%rsp)
leaq 688(%rsp), %rax
movq %rax, 384(%rsp)
leaq 696(%rsp), %rax
movq %rax, 392(%rsp)
leaq 704(%rsp), %rax
movq %rax, 400(%rsp)
leaq 712(%rsp), %rax
movq %rax, 408(%rsp)
leaq 720(%rsp), %rax
movq %rax, 416(%rsp)
leaq 728(%rsp), %rax
movq %rax, 424(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7computeffffffffffffffffffffffffffffffffffffffffffff, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $456, %rsp # imm = 0x1C8
.cfi_adjust_cfa_offset -456
retq
.Lfunc_end0:
.size _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff, .Lfunc_end0-_Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff
.cfi_endproc
# -- End function
.globl _Z11initPointerf # -- Begin function _Z11initPointerf
.p2align 4, 0x90
.type _Z11initPointerf,@function
_Z11initPointerf: # @_Z11initPointerf
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movss %xmm0, 4(%rsp) # 4-byte Spill
movl $40, %edi
callq malloc
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movss %xmm0, (%rax,%rcx,4)
incq %rcx
cmpq $10, %rcx
jne .LBB1_1
# %bb.2:
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z11initPointerf, .Lfunc_end1-_Z11initPointerf
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $640, %rsp # imm = 0x280
.cfi_def_cfa_offset 656
.cfi_offset %rbx, -16
movq %rsi, %rbx
movq 8(%rsi), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 632(%rsp) # 8-byte Spill
movq 16(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 624(%rsp) # 8-byte Spill
movq 24(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 616(%rsp) # 8-byte Spill
movq 32(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 608(%rsp) # 8-byte Spill
movq 40(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 504(%rsp) # 8-byte Spill
movq 48(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 496(%rsp) # 8-byte Spill
movq 56(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 488(%rsp) # 8-byte Spill
movq 64(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 480(%rsp) # 8-byte Spill
movq 72(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 472(%rsp) # 8-byte Spill
movq 80(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 464(%rsp) # 8-byte Spill
movq 88(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 456(%rsp) # 8-byte Spill
movq 96(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 448(%rsp) # 8-byte Spill
movq 104(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 440(%rsp) # 8-byte Spill
movq 112(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 432(%rsp) # 8-byte Spill
movq 120(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 424(%rsp) # 8-byte Spill
movq 128(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 416(%rsp) # 8-byte Spill
movq 136(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 408(%rsp) # 8-byte Spill
movq 144(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 400(%rsp) # 8-byte Spill
movq 152(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 392(%rsp) # 8-byte Spill
movq 160(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 384(%rsp) # 8-byte Spill
movq 168(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 376(%rsp) # 8-byte Spill
movq 176(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 368(%rsp) # 8-byte Spill
movq 184(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 360(%rsp) # 8-byte Spill
movq 192(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 352(%rsp) # 8-byte Spill
movq 200(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 344(%rsp) # 8-byte Spill
movq 208(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 336(%rsp) # 8-byte Spill
movq 216(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 328(%rsp) # 8-byte Spill
movq 224(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 320(%rsp) # 8-byte Spill
movq 232(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 312(%rsp) # 8-byte Spill
movq 240(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 304(%rsp) # 8-byte Spill
movq 248(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 296(%rsp) # 8-byte Spill
movq 256(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 288(%rsp) # 8-byte Spill
movq 264(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 600(%rsp) # 8-byte Spill
movq 272(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 592(%rsp) # 8-byte Spill
movq 280(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 584(%rsp) # 8-byte Spill
movq 288(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 576(%rsp) # 8-byte Spill
movq 296(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 568(%rsp) # 8-byte Spill
movq 304(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 560(%rsp) # 8-byte Spill
movq 312(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 552(%rsp) # 8-byte Spill
movq 320(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 544(%rsp) # 8-byte Spill
movq 328(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 536(%rsp) # 8-byte Spill
movq 336(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 528(%rsp) # 8-byte Spill
movq 344(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 520(%rsp) # 8-byte Spill
movq 352(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 512(%rsp) # 8-byte Spill
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movsd 512(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm8
movsd 520(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm9
movsd 528(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm10
movsd 536(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm11
movsd 544(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm12
movsd 552(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm13
movsd 560(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm14
movsd 568(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm15
movsd 576(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm4
movsd 584(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm5
movsd 592(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm6
movsd 600(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm7
movsd 288(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 288(%rsp) # 4-byte Spill
movsd 296(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 296(%rsp) # 4-byte Spill
movsd 304(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 304(%rsp) # 4-byte Spill
movsd 312(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 312(%rsp) # 4-byte Spill
movsd 320(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 320(%rsp) # 4-byte Spill
movsd 328(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 328(%rsp) # 4-byte Spill
movsd 336(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 336(%rsp) # 4-byte Spill
movsd 344(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 344(%rsp) # 4-byte Spill
movsd 352(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 352(%rsp) # 4-byte Spill
movsd 360(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 360(%rsp) # 4-byte Spill
movsd 368(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 368(%rsp) # 4-byte Spill
movsd 376(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 376(%rsp) # 4-byte Spill
movsd 384(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 384(%rsp) # 4-byte Spill
movsd 392(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 392(%rsp) # 4-byte Spill
movsd 400(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 400(%rsp) # 4-byte Spill
movsd 408(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 408(%rsp) # 4-byte Spill
movsd 416(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 416(%rsp) # 4-byte Spill
movsd 424(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 424(%rsp) # 4-byte Spill
movsd 432(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 432(%rsp) # 4-byte Spill
movsd 440(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 440(%rsp) # 4-byte Spill
movsd 448(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 448(%rsp) # 4-byte Spill
movsd 456(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 456(%rsp) # 4-byte Spill
movsd 464(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 464(%rsp) # 4-byte Spill
movsd 472(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 472(%rsp) # 4-byte Spill
movsd 480(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 480(%rsp) # 4-byte Spill
movsd 488(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 488(%rsp) # 4-byte Spill
movsd 496(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 496(%rsp) # 4-byte Spill
movsd 504(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 504(%rsp) # 4-byte Spill
movsd 608(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm3
movsd 616(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm2
movsd 624(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm1
movsd 632(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm8, 280(%rsp)
movss %xmm9, 272(%rsp)
movss %xmm10, 264(%rsp)
movss %xmm11, 256(%rsp)
movss %xmm12, 248(%rsp)
movss %xmm13, 240(%rsp)
movss %xmm14, 232(%rsp)
movss %xmm15, 224(%rsp)
movss %xmm4, 216(%rsp)
movss %xmm5, 208(%rsp)
movss %xmm6, 200(%rsp)
movss %xmm7, 192(%rsp)
movss 288(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 184(%rsp)
movss 296(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 176(%rsp)
movss 304(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 168(%rsp)
movss 312(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 160(%rsp)
movss 320(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 152(%rsp)
movss 328(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 144(%rsp)
movss 336(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 136(%rsp)
movss 344(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 128(%rsp)
movss 352(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 120(%rsp)
movss 360(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 112(%rsp)
movss 368(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 104(%rsp)
movss 376(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 96(%rsp)
movss 384(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 88(%rsp)
movss 392(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 80(%rsp)
movss 400(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 72(%rsp)
movss 408(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 64(%rsp)
movss 416(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 56(%rsp)
movss 424(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 48(%rsp)
movss 432(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 40(%rsp)
movss 440(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 32(%rsp)
movss 448(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 24(%rsp)
movss 456(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 16(%rsp)
movss 464(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 8(%rsp)
movss 472(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, (%rsp)
movss 504(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss 496(%rsp), %xmm5 # 4-byte Reload
# xmm5 = mem[0],zero,zero,zero
movss 488(%rsp), %xmm6 # 4-byte Reload
# xmm6 = mem[0],zero,zero,zero
movss 480(%rsp), %xmm7 # 4-byte Reload
# xmm7 = mem[0],zero,zero,zero
callq _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff
.LBB2_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $640, %rsp # imm = 0x280
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7computeffffffffffffffffffffffffffffffffffffffffffff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7computeffffffffffffffffffffffffffffffffffffffffffff,@object # @_Z7computeffffffffffffffffffffffffffffffffffffffffffff
.section .rodata,"a",@progbits
.globl _Z7computeffffffffffffffffffffffffffffffffffffffffffff
.p2align 3, 0x0
_Z7computeffffffffffffffffffffffffffffffffffffffffffff:
.quad _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff
.size _Z7computeffffffffffffffffffffffffffffffffffffffffffff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7computeffffffffffffffffffffffffffffffffffffffffffff"
.size .L__unnamed_1, 55
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7computeffffffffffffffffffffffffffffffffffffffffffff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0018b930_00000000-6_test.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11initPointerf
.type _Z11initPointerf, @function
_Z11initPointerf:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movd %xmm0, %ebx
movl $40, %edi
call malloc@PLT
movq %rax, %rdx
leaq 40(%rax), %rcx
.L4:
movl %ebx, (%rdx)
addq $4, %rdx
cmpq %rcx, %rdx
jne .L4
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z11initPointerf, .-_Z11initPointerf
.globl _Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
.type _Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff, @function
_Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff:
.LFB2083:
.cfi_startproc
endbr64
subq $472, %rsp
.cfi_def_cfa_offset 480
movss %xmm0, 28(%rsp)
movss %xmm1, 24(%rsp)
movss %xmm2, 20(%rsp)
movss %xmm3, 16(%rsp)
movss %xmm4, 12(%rsp)
movss %xmm5, 8(%rsp)
movss %xmm6, 4(%rsp)
movss %xmm7, (%rsp)
movq %fs:40, %rax
movq %rax, 456(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 4(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 480(%rsp), %rax
movq %rax, 160(%rsp)
leaq 488(%rsp), %rax
movq %rax, 168(%rsp)
leaq 496(%rsp), %rax
movq %rax, 176(%rsp)
leaq 504(%rsp), %rax
movq %rax, 184(%rsp)
leaq 512(%rsp), %rax
movq %rax, 192(%rsp)
leaq 520(%rsp), %rax
movq %rax, 200(%rsp)
leaq 528(%rsp), %rax
movq %rax, 208(%rsp)
leaq 536(%rsp), %rax
movq %rax, 216(%rsp)
leaq 544(%rsp), %rax
movq %rax, 224(%rsp)
leaq 552(%rsp), %rax
movq %rax, 232(%rsp)
leaq 560(%rsp), %rax
movq %rax, 240(%rsp)
leaq 568(%rsp), %rax
movq %rax, 248(%rsp)
leaq 576(%rsp), %rax
movq %rax, 256(%rsp)
leaq 584(%rsp), %rax
movq %rax, 264(%rsp)
leaq 592(%rsp), %rax
movq %rax, 272(%rsp)
leaq 600(%rsp), %rax
movq %rax, 280(%rsp)
leaq 608(%rsp), %rax
movq %rax, 288(%rsp)
leaq 616(%rsp), %rax
movq %rax, 296(%rsp)
leaq 624(%rsp), %rax
movq %rax, 304(%rsp)
leaq 632(%rsp), %rax
movq %rax, 312(%rsp)
leaq 640(%rsp), %rax
movq %rax, 320(%rsp)
leaq 648(%rsp), %rax
movq %rax, 328(%rsp)
leaq 656(%rsp), %rax
movq %rax, 336(%rsp)
leaq 664(%rsp), %rax
movq %rax, 344(%rsp)
leaq 672(%rsp), %rax
movq %rax, 352(%rsp)
leaq 680(%rsp), %rax
movq %rax, 360(%rsp)
leaq 688(%rsp), %rax
movq %rax, 368(%rsp)
leaq 696(%rsp), %rax
movq %rax, 376(%rsp)
leaq 704(%rsp), %rax
movq %rax, 384(%rsp)
leaq 712(%rsp), %rax
movq %rax, 392(%rsp)
leaq 720(%rsp), %rax
movq %rax, 400(%rsp)
leaq 728(%rsp), %rax
movq %rax, 408(%rsp)
leaq 736(%rsp), %rax
movq %rax, 416(%rsp)
leaq 744(%rsp), %rax
movq %rax, 424(%rsp)
leaq 752(%rsp), %rax
movq %rax, 432(%rsp)
leaq 760(%rsp), %rax
movq %rax, 440(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 456(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $472, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 488
pushq 40(%rsp)
.cfi_def_cfa_offset 496
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7computeffffffffffffffffffffffffffffffffffffffffffff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 480
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff, .-_Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
.globl _Z7computeffffffffffffffffffffffffffffffffffffffffffff
.type _Z7computeffffffffffffffffffffffffffffffffffffffffffff, @function
_Z7computeffffffffffffffffffffffffffffffffffffffffffff:
.LFB2084:
.cfi_startproc
endbr64
subq $296, %rsp
.cfi_def_cfa_offset 304
movss 584(%rsp), %xmm8
movss %xmm8, 280(%rsp)
movss 576(%rsp), %xmm8
movss %xmm8, 272(%rsp)
movss 568(%rsp), %xmm8
movss %xmm8, 264(%rsp)
movss 560(%rsp), %xmm8
movss %xmm8, 256(%rsp)
movss 552(%rsp), %xmm8
movss %xmm8, 248(%rsp)
movss 544(%rsp), %xmm8
movss %xmm8, 240(%rsp)
movss 536(%rsp), %xmm8
movss %xmm8, 232(%rsp)
movss 528(%rsp), %xmm8
movss %xmm8, 224(%rsp)
movss 520(%rsp), %xmm8
movss %xmm8, 216(%rsp)
movss 512(%rsp), %xmm8
movss %xmm8, 208(%rsp)
movss 504(%rsp), %xmm8
movss %xmm8, 200(%rsp)
movss 496(%rsp), %xmm8
movss %xmm8, 192(%rsp)
movss 488(%rsp), %xmm8
movss %xmm8, 184(%rsp)
movss 480(%rsp), %xmm8
movss %xmm8, 176(%rsp)
movss 472(%rsp), %xmm8
movss %xmm8, 168(%rsp)
movss 464(%rsp), %xmm8
movss %xmm8, 160(%rsp)
movss 456(%rsp), %xmm8
movss %xmm8, 152(%rsp)
movss 448(%rsp), %xmm8
movss %xmm8, 144(%rsp)
movss 440(%rsp), %xmm8
movss %xmm8, 136(%rsp)
movss 432(%rsp), %xmm8
movss %xmm8, 128(%rsp)
movss 424(%rsp), %xmm8
movss %xmm8, 120(%rsp)
movss 416(%rsp), %xmm8
movss %xmm8, 112(%rsp)
movss 408(%rsp), %xmm8
movss %xmm8, 104(%rsp)
movss 400(%rsp), %xmm8
movss %xmm8, 96(%rsp)
movss 392(%rsp), %xmm8
movss %xmm8, 88(%rsp)
movss 384(%rsp), %xmm8
movss %xmm8, 80(%rsp)
movss 376(%rsp), %xmm8
movss %xmm8, 72(%rsp)
movss 368(%rsp), %xmm8
movss %xmm8, 64(%rsp)
movss 360(%rsp), %xmm8
movss %xmm8, 56(%rsp)
movss 352(%rsp), %xmm8
movss %xmm8, 48(%rsp)
movss 344(%rsp), %xmm8
movss %xmm8, 40(%rsp)
movss 336(%rsp), %xmm8
movss %xmm8, 32(%rsp)
movss 328(%rsp), %xmm8
movss %xmm8, 24(%rsp)
movss 320(%rsp), %xmm8
movss %xmm8, 16(%rsp)
movss 312(%rsp), %xmm8
movss %xmm8, 8(%rsp)
movss 304(%rsp), %xmm8
movss %xmm8, (%rsp)
call _Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
addq $296, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z7computeffffffffffffffffffffffffffffffffffffffffffff, .-_Z7computeffffffffffffffffffffffffffffffffffffffffffff
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $384, %rsp
.cfi_def_cfa_offset 400
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 344(%rsp)
movq 16(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 336(%rsp)
movq 24(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 328(%rsp)
movq 32(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 320(%rsp)
movq 40(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 312(%rsp)
movq 48(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 304(%rsp)
movq 56(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 296(%rsp)
movq 64(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 288(%rsp)
movq 72(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 280(%rsp)
movq 80(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 272(%rsp)
movq 88(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 264(%rsp)
movq 96(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 256(%rsp)
movq 104(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 248(%rsp)
movq 112(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 240(%rsp)
movq 120(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 232(%rsp)
movq 128(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 224(%rsp)
movq 136(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 216(%rsp)
movq 144(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 208(%rsp)
movq 152(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 200(%rsp)
movq 160(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 192(%rsp)
movq 168(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 184(%rsp)
movq 176(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 176(%rsp)
movq 184(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 168(%rsp)
movq 192(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 160(%rsp)
movq 200(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 152(%rsp)
movq 208(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 144(%rsp)
movq 216(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 136(%rsp)
movq 224(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 128(%rsp)
movq 232(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 120(%rsp)
movq 240(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 112(%rsp)
movq 248(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 104(%rsp)
movq 256(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 96(%rsp)
movq 264(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 88(%rsp)
movq 272(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 80(%rsp)
movq 280(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 72(%rsp)
movq 288(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 64(%rsp)
movq 296(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 56(%rsp)
movq 304(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 48(%rsp)
movq 312(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 40(%rsp)
movq 320(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 32(%rsp)
movq 328(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 24(%rsp)
movq 336(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 16(%rsp)
movq 344(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 8(%rsp)
movq 352(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, (%rsp)
movl $1, 372(%rsp)
movl $1, 376(%rsp)
movl $1, 360(%rsp)
movl $1, 364(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 372(%rsp), %rdx
movl $1, %ecx
movq 360(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L16:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $384, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pxor %xmm0, %xmm0
cvtsd2ss 344(%rsp), %xmm0
pxor %xmm1, %xmm1
cvtsd2ss (%rsp), %xmm1
leaq -288(%rsp), %rsp
.cfi_def_cfa_offset 688
movss %xmm1, 280(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 296(%rsp), %xmm1
movss %xmm1, 272(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 304(%rsp), %xmm1
movss %xmm1, 264(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 312(%rsp), %xmm1
movss %xmm1, 256(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 320(%rsp), %xmm1
movss %xmm1, 248(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 328(%rsp), %xmm1
movss %xmm1, 240(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 336(%rsp), %xmm1
movss %xmm1, 232(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 344(%rsp), %xmm1
movss %xmm1, 224(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 352(%rsp), %xmm1
movss %xmm1, 216(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 360(%rsp), %xmm1
movss %xmm1, 208(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 368(%rsp), %xmm1
movss %xmm1, 200(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 376(%rsp), %xmm1
movss %xmm1, 192(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 384(%rsp), %xmm1
movss %xmm1, 184(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 392(%rsp), %xmm1
movss %xmm1, 176(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 400(%rsp), %xmm1
movss %xmm1, 168(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 408(%rsp), %xmm1
movss %xmm1, 160(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 416(%rsp), %xmm1
movss %xmm1, 152(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 424(%rsp), %xmm1
movss %xmm1, 144(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 432(%rsp), %xmm1
movss %xmm1, 136(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 440(%rsp), %xmm1
movss %xmm1, 128(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 448(%rsp), %xmm1
movss %xmm1, 120(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 456(%rsp), %xmm1
movss %xmm1, 112(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 464(%rsp), %xmm1
movss %xmm1, 104(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 472(%rsp), %xmm1
movss %xmm1, 96(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 480(%rsp), %xmm1
movss %xmm1, 88(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 488(%rsp), %xmm1
movss %xmm1, 80(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 496(%rsp), %xmm1
movss %xmm1, 72(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 504(%rsp), %xmm1
movss %xmm1, 64(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 512(%rsp), %xmm1
movss %xmm1, 56(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 520(%rsp), %xmm1
movss %xmm1, 48(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 528(%rsp), %xmm1
movss %xmm1, 40(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 536(%rsp), %xmm1
movss %xmm1, 32(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 544(%rsp), %xmm1
movss %xmm1, 24(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 552(%rsp), %xmm1
movss %xmm1, 16(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 560(%rsp), %xmm1
movss %xmm1, 8(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 568(%rsp), %xmm1
movss %xmm1, (%rsp)
pxor %xmm7, %xmm7
cvtsd2ss 576(%rsp), %xmm7
pxor %xmm6, %xmm6
cvtsd2ss 584(%rsp), %xmm6
pxor %xmm5, %xmm5
cvtsd2ss 592(%rsp), %xmm5
pxor %xmm4, %xmm4
cvtsd2ss 600(%rsp), %xmm4
pxor %xmm3, %xmm3
cvtsd2ss 608(%rsp), %xmm3
pxor %xmm2, %xmm2
cvtsd2ss 616(%rsp), %xmm2
pxor %xmm1, %xmm1
cvtsd2ss 624(%rsp), %xmm1
call _Z68__device_stub__Z7computeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
addq $288, %rsp
.cfi_def_cfa_offset 400
jmp .L16
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z7computeffffffffffffffffffffffffffffffffffffffffffff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7computeffffffffffffffffffffffffffffffffffffffffffff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "test.hip"
.globl _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff # -- Begin function _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff
.p2align 4, 0x90
.type _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff,@function
_Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff: # @_Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff
.cfi_startproc
# %bb.0:
subq $440, %rsp # imm = 0x1B8
.cfi_def_cfa_offset 448
movss %xmm0, 28(%rsp)
movss %xmm1, 24(%rsp)
movss %xmm2, 20(%rsp)
movss %xmm3, 16(%rsp)
movss %xmm4, 12(%rsp)
movss %xmm5, 8(%rsp)
movss %xmm6, 4(%rsp)
movss %xmm7, (%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
leaq 448(%rsp), %rax
movq %rax, 144(%rsp)
leaq 456(%rsp), %rax
movq %rax, 152(%rsp)
leaq 464(%rsp), %rax
movq %rax, 160(%rsp)
leaq 472(%rsp), %rax
movq %rax, 168(%rsp)
leaq 480(%rsp), %rax
movq %rax, 176(%rsp)
leaq 488(%rsp), %rax
movq %rax, 184(%rsp)
leaq 496(%rsp), %rax
movq %rax, 192(%rsp)
leaq 504(%rsp), %rax
movq %rax, 200(%rsp)
leaq 512(%rsp), %rax
movq %rax, 208(%rsp)
leaq 520(%rsp), %rax
movq %rax, 216(%rsp)
leaq 528(%rsp), %rax
movq %rax, 224(%rsp)
leaq 536(%rsp), %rax
movq %rax, 232(%rsp)
leaq 544(%rsp), %rax
movq %rax, 240(%rsp)
leaq 552(%rsp), %rax
movq %rax, 248(%rsp)
leaq 560(%rsp), %rax
movq %rax, 256(%rsp)
leaq 568(%rsp), %rax
movq %rax, 264(%rsp)
leaq 576(%rsp), %rax
movq %rax, 272(%rsp)
leaq 584(%rsp), %rax
movq %rax, 280(%rsp)
leaq 592(%rsp), %rax
movq %rax, 288(%rsp)
leaq 600(%rsp), %rax
movq %rax, 296(%rsp)
leaq 608(%rsp), %rax
movq %rax, 304(%rsp)
leaq 616(%rsp), %rax
movq %rax, 312(%rsp)
leaq 624(%rsp), %rax
movq %rax, 320(%rsp)
leaq 632(%rsp), %rax
movq %rax, 328(%rsp)
leaq 640(%rsp), %rax
movq %rax, 336(%rsp)
leaq 648(%rsp), %rax
movq %rax, 344(%rsp)
leaq 656(%rsp), %rax
movq %rax, 352(%rsp)
leaq 664(%rsp), %rax
movq %rax, 360(%rsp)
leaq 672(%rsp), %rax
movq %rax, 368(%rsp)
leaq 680(%rsp), %rax
movq %rax, 376(%rsp)
leaq 688(%rsp), %rax
movq %rax, 384(%rsp)
leaq 696(%rsp), %rax
movq %rax, 392(%rsp)
leaq 704(%rsp), %rax
movq %rax, 400(%rsp)
leaq 712(%rsp), %rax
movq %rax, 408(%rsp)
leaq 720(%rsp), %rax
movq %rax, 416(%rsp)
leaq 728(%rsp), %rax
movq %rax, 424(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7computeffffffffffffffffffffffffffffffffffffffffffff, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $456, %rsp # imm = 0x1C8
.cfi_adjust_cfa_offset -456
retq
.Lfunc_end0:
.size _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff, .Lfunc_end0-_Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff
.cfi_endproc
# -- End function
.globl _Z11initPointerf # -- Begin function _Z11initPointerf
.p2align 4, 0x90
.type _Z11initPointerf,@function
_Z11initPointerf: # @_Z11initPointerf
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movss %xmm0, 4(%rsp) # 4-byte Spill
movl $40, %edi
callq malloc
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movss %xmm0, (%rax,%rcx,4)
incq %rcx
cmpq $10, %rcx
jne .LBB1_1
# %bb.2:
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z11initPointerf, .Lfunc_end1-_Z11initPointerf
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $640, %rsp # imm = 0x280
.cfi_def_cfa_offset 656
.cfi_offset %rbx, -16
movq %rsi, %rbx
movq 8(%rsi), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 632(%rsp) # 8-byte Spill
movq 16(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 624(%rsp) # 8-byte Spill
movq 24(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 616(%rsp) # 8-byte Spill
movq 32(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 608(%rsp) # 8-byte Spill
movq 40(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 504(%rsp) # 8-byte Spill
movq 48(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 496(%rsp) # 8-byte Spill
movq 56(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 488(%rsp) # 8-byte Spill
movq 64(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 480(%rsp) # 8-byte Spill
movq 72(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 472(%rsp) # 8-byte Spill
movq 80(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 464(%rsp) # 8-byte Spill
movq 88(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 456(%rsp) # 8-byte Spill
movq 96(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 448(%rsp) # 8-byte Spill
movq 104(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 440(%rsp) # 8-byte Spill
movq 112(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 432(%rsp) # 8-byte Spill
movq 120(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 424(%rsp) # 8-byte Spill
movq 128(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 416(%rsp) # 8-byte Spill
movq 136(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 408(%rsp) # 8-byte Spill
movq 144(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 400(%rsp) # 8-byte Spill
movq 152(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 392(%rsp) # 8-byte Spill
movq 160(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 384(%rsp) # 8-byte Spill
movq 168(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 376(%rsp) # 8-byte Spill
movq 176(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 368(%rsp) # 8-byte Spill
movq 184(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 360(%rsp) # 8-byte Spill
movq 192(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 352(%rsp) # 8-byte Spill
movq 200(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 344(%rsp) # 8-byte Spill
movq 208(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 336(%rsp) # 8-byte Spill
movq 216(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 328(%rsp) # 8-byte Spill
movq 224(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 320(%rsp) # 8-byte Spill
movq 232(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 312(%rsp) # 8-byte Spill
movq 240(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 304(%rsp) # 8-byte Spill
movq 248(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 296(%rsp) # 8-byte Spill
movq 256(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 288(%rsp) # 8-byte Spill
movq 264(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 600(%rsp) # 8-byte Spill
movq 272(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 592(%rsp) # 8-byte Spill
movq 280(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 584(%rsp) # 8-byte Spill
movq 288(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 576(%rsp) # 8-byte Spill
movq 296(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 568(%rsp) # 8-byte Spill
movq 304(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 560(%rsp) # 8-byte Spill
movq 312(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 552(%rsp) # 8-byte Spill
movq 320(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 544(%rsp) # 8-byte Spill
movq 328(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 536(%rsp) # 8-byte Spill
movq 336(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 528(%rsp) # 8-byte Spill
movq 344(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 520(%rsp) # 8-byte Spill
movq 352(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 512(%rsp) # 8-byte Spill
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movsd 512(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm8
movsd 520(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm9
movsd 528(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm10
movsd 536(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm11
movsd 544(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm12
movsd 552(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm13
movsd 560(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm14
movsd 568(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm15
movsd 576(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm4
movsd 584(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm5
movsd 592(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm6
movsd 600(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm7
movsd 288(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 288(%rsp) # 4-byte Spill
movsd 296(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 296(%rsp) # 4-byte Spill
movsd 304(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 304(%rsp) # 4-byte Spill
movsd 312(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 312(%rsp) # 4-byte Spill
movsd 320(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 320(%rsp) # 4-byte Spill
movsd 328(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 328(%rsp) # 4-byte Spill
movsd 336(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 336(%rsp) # 4-byte Spill
movsd 344(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 344(%rsp) # 4-byte Spill
movsd 352(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 352(%rsp) # 4-byte Spill
movsd 360(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 360(%rsp) # 4-byte Spill
movsd 368(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 368(%rsp) # 4-byte Spill
movsd 376(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 376(%rsp) # 4-byte Spill
movsd 384(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 384(%rsp) # 4-byte Spill
movsd 392(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 392(%rsp) # 4-byte Spill
movsd 400(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 400(%rsp) # 4-byte Spill
movsd 408(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 408(%rsp) # 4-byte Spill
movsd 416(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 416(%rsp) # 4-byte Spill
movsd 424(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 424(%rsp) # 4-byte Spill
movsd 432(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 432(%rsp) # 4-byte Spill
movsd 440(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 440(%rsp) # 4-byte Spill
movsd 448(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 448(%rsp) # 4-byte Spill
movsd 456(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 456(%rsp) # 4-byte Spill
movsd 464(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 464(%rsp) # 4-byte Spill
movsd 472(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 472(%rsp) # 4-byte Spill
movsd 480(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 480(%rsp) # 4-byte Spill
movsd 488(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 488(%rsp) # 4-byte Spill
movsd 496(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 496(%rsp) # 4-byte Spill
movsd 504(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 504(%rsp) # 4-byte Spill
movsd 608(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm3
movsd 616(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm2
movsd 624(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm1
movsd 632(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm8, 280(%rsp)
movss %xmm9, 272(%rsp)
movss %xmm10, 264(%rsp)
movss %xmm11, 256(%rsp)
movss %xmm12, 248(%rsp)
movss %xmm13, 240(%rsp)
movss %xmm14, 232(%rsp)
movss %xmm15, 224(%rsp)
movss %xmm4, 216(%rsp)
movss %xmm5, 208(%rsp)
movss %xmm6, 200(%rsp)
movss %xmm7, 192(%rsp)
movss 288(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 184(%rsp)
movss 296(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 176(%rsp)
movss 304(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 168(%rsp)
movss 312(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 160(%rsp)
movss 320(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 152(%rsp)
movss 328(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 144(%rsp)
movss 336(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 136(%rsp)
movss 344(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 128(%rsp)
movss 352(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 120(%rsp)
movss 360(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 112(%rsp)
movss 368(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 104(%rsp)
movss 376(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 96(%rsp)
movss 384(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 88(%rsp)
movss 392(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 80(%rsp)
movss 400(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 72(%rsp)
movss 408(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 64(%rsp)
movss 416(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 56(%rsp)
movss 424(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 48(%rsp)
movss 432(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 40(%rsp)
movss 440(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 32(%rsp)
movss 448(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 24(%rsp)
movss 456(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 16(%rsp)
movss 464(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, 8(%rsp)
movss 472(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss %xmm4, (%rsp)
movss 504(%rsp), %xmm4 # 4-byte Reload
# xmm4 = mem[0],zero,zero,zero
movss 496(%rsp), %xmm5 # 4-byte Reload
# xmm5 = mem[0],zero,zero,zero
movss 488(%rsp), %xmm6 # 4-byte Reload
# xmm6 = mem[0],zero,zero,zero
movss 480(%rsp), %xmm7 # 4-byte Reload
# xmm7 = mem[0],zero,zero,zero
callq _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff
.LBB2_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $640, %rsp # imm = 0x280
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7computeffffffffffffffffffffffffffffffffffffffffffff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7computeffffffffffffffffffffffffffffffffffffffffffff,@object # @_Z7computeffffffffffffffffffffffffffffffffffffffffffff
.section .rodata,"a",@progbits
.globl _Z7computeffffffffffffffffffffffffffffffffffffffffffff
.p2align 3, 0x0
_Z7computeffffffffffffffffffffffffffffffffffffffffffff:
.quad _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff
.size _Z7computeffffffffffffffffffffffffffffffffffffffffffff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7computeffffffffffffffffffffffffffffffffffffffffffff"
.size .L__unnamed_1, 55
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__computeffffffffffffffffffffffffffffffffffffffffffff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7computeffffffffffffffffffffffffffffffffffffffffffff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #pragma once
#include <vector>
#include <string>
#include <cassert>
#include "Vector3.cuh.cu"
namespace RayTracing
{
class Image
{
private:
int m_width;
int m_height;
cudaResourceDesc m_cudaTextureResourceDesc;
cudaTextureDesc m_cudaTextureDesc;
cudaArray *m_buffer_d = nullptr;
public:
std::vector<Color> buffer;
cudaTextureObject_t cudaTexture;
public:
Image(const std::string &fileName);
template<bool isGPU>
void Init()
{
assert(("Not implemented", false));
}
template<bool isGPU, typename T>
T GetResource()
{
assert(("Not implemented", false));
}
Color GetColor(const float u, const float v) const;
void Deinit();
};
}; | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #pragma once
#include <vector>
#include <string>
#include <cassert>
#include "Vector3.cuh.cu"
namespace RayTracing
{
class Image
{
private:
int m_width;
int m_height;
cudaResourceDesc m_cudaTextureResourceDesc;
cudaTextureDesc m_cudaTextureDesc;
cudaArray *m_buffer_d = nullptr;
public:
std::vector<Color> buffer;
cudaTextureObject_t cudaTexture;
public:
Image(const std::string &fileName);
template<bool isGPU>
void Init()
{
assert(("Not implemented", false));
}
template<bool isGPU, typename T>
T GetResource()
{
assert(("Not implemented", false));
}
Color GetColor(const float u, const float v) const;
void Deinit();
};
}; | .file "tmpxft_0002215a_00000000-6_Image.cuh.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4045:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4045:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4068:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4068:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #pragma once
#include <vector>
#include <string>
#include <cassert>
#include "Vector3.cuh.cu"
namespace RayTracing
{
class Image
{
private:
int m_width;
int m_height;
cudaResourceDesc m_cudaTextureResourceDesc;
cudaTextureDesc m_cudaTextureDesc;
cudaArray *m_buffer_d = nullptr;
public:
std::vector<Color> buffer;
cudaTextureObject_t cudaTexture;
public:
Image(const std::string &fileName);
template<bool isGPU>
void Init()
{
assert(("Not implemented", false));
}
template<bool isGPU, typename T>
T GetResource()
{
assert(("Not implemented", false));
}
Color GetColor(const float u, const float v) const;
void Deinit();
};
}; | #pragma once
#include <hip/hip_runtime.h>
#include <vector>
#include <string>
#include <cassert>
#include "Vector3.cuh.cu"
namespace RayTracing
{
class Image
{
private:
int m_width;
int m_height;
hipResourceDesc m_cudaTextureResourceDesc;
hipTextureDesc m_cudaTextureDesc;
hipArray *m_buffer_d = nullptr;
public:
std::vector<Color> buffer;
hipTextureObject_t cudaTexture;
public:
Image(const std::string &fileName);
template<bool isGPU>
void Init()
{
assert(("Not implemented", false));
}
template<bool isGPU, typename T>
T GetResource()
{
assert(("Not implemented", false));
}
Color GetColor(const float u, const float v) const;
void Deinit();
};
}; |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #pragma once
#include <hip/hip_runtime.h>
#include <vector>
#include <string>
#include <cassert>
#include "Vector3.cuh.cu"
namespace RayTracing
{
class Image
{
private:
int m_width;
int m_height;
hipResourceDesc m_cudaTextureResourceDesc;
hipTextureDesc m_cudaTextureDesc;
hipArray *m_buffer_d = nullptr;
public:
std::vector<Color> buffer;
hipTextureObject_t cudaTexture;
public:
Image(const std::string &fileName);
template<bool isGPU>
void Init()
{
assert(("Not implemented", false));
}
template<bool isGPU, typename T>
T GetResource()
{
assert(("Not implemented", false));
}
Color GetColor(const float u, const float v) const;
void Deinit();
};
}; | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #pragma once
#include <hip/hip_runtime.h>
#include <vector>
#include <string>
#include <cassert>
#include "Vector3.cuh.cu"
namespace RayTracing
{
class Image
{
private:
int m_width;
int m_height;
hipResourceDesc m_cudaTextureResourceDesc;
hipTextureDesc m_cudaTextureDesc;
hipArray *m_buffer_d = nullptr;
public:
std::vector<Color> buffer;
hipTextureObject_t cudaTexture;
public:
Image(const std::string &fileName);
template<bool isGPU>
void Init()
{
assert(("Not implemented", false));
}
template<bool isGPU, typename T>
T GetResource()
{
assert(("Not implemented", false));
}
Color GetColor(const float u, const float v) const;
void Deinit();
};
}; | .text
.file "Image.cuh.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0002215a_00000000-6_Image.cuh.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4045:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4045:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4068:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4068:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "Image.cuh.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <time.h>
int main(void) {
time_t t;
time(&t);
printf("%ld\n", t);
printf(ctime(&t));
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <time.h>
int main(void) {
time_t t;
time(&t);
printf("%ld\n", t);
printf(ctime(&t));
} | .file "tmpxft_0011f843_00000000-6_File.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%ld\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $16, %rsp
.cfi_def_cfa_offset 32
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq %rsp, %rbx
movq %rbx, %rdi
call time@PLT
movq (%rsp), %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdi
call ctime@PLT
movq %rax, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L6
movl $0, %eax
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <time.h>
int main(void) {
time_t t;
time(&t);
printf("%ld\n", t);
printf(ctime(&t));
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
int main(void) {
time_t t;
time(&t);
printf("%ld\n", t);
printf(ctime(&t));
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
int main(void) {
time_t t;
time(&t);
printf("%ld\n", t);
printf(ctime(&t));
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
int main(void) {
time_t t;
time(&t);
printf("%ld\n", t);
printf(ctime(&t));
} | .text
.file "File.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
leaq 8(%rsp), %rbx
movq %rbx, %rdi
callq time
movq 8(%rsp), %rsi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movq %rbx, %rdi
callq ctime
movq %rax, %rdi
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%ld\n"
.size .L.str, 5
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0011f843_00000000-6_File.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%ld\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $16, %rsp
.cfi_def_cfa_offset 32
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq %rsp, %rbx
movq %rbx, %rdi
call time@PLT
movq (%rsp), %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdi
call ctime@PLT
movq %rax, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L6
movl $0, %eax
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "File.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
leaq 8(%rsp), %rbx
movq %rbx, %rdi
callq time
movq 8(%rsp), %rsi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movq %rbx, %rdi
callq ctime
movq %rax, %rdi
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%ld\n"
.size .L.str, 5
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
int main()
{
int devCount;
cudaGetDeviceCount(&devCount);
printf("device count: %d\n", devCount);
for (int i = 0; i < devCount; ++i) {
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printf("ver: %d.%d\n", devProp.major, devProp.minor);
printf("max threads per block: %d\n", (int)devProp.maxThreadsPerBlock);
printf("number of SMs: %d\n", (int)devProp.multiProcessorCount);
printf("warp size: %d\n", (int)devProp.warpSize);
printf("max warps per SM: %d\n", devProp.maxThreadsPerMultiProcessor / devProp.warpSize);
printf("registers available per SM: %d\n", (int)devProp.regsPerBlock);
printf("shared memory available per SM: %d\n", (int)devProp.sharedMemPerBlock);
printf("clock frequency: %d\n", (int)devProp.clockRate);
printf("total const memory: %d\n", (int)devProp.totalConstMem);
printf("max threads dimBlock.x: %d\n", (int)devProp.maxThreadsDim[0]);
printf("max threads dimBlock.y: %d\n", (int)devProp.maxThreadsDim[1]);
printf("max threads dimBlock.z: %d\n", (int)devProp.maxThreadsDim[2]);
printf("max blocks dimGrid.x: %d\n", (int)devProp.maxGridSize[0]);
printf("max blocks dimGrid.y: %d\n", (int)devProp.maxGridSize[1]);
printf("max blocks dimGrid.z: %d\n", (int)devProp.maxGridSize[2]);
printf("memory clock rate: %d\n", (int)devProp.memoryClockRate);
printf("memory bus width: %d\n", (int)devProp.memoryBusWidth);
printf("memory pitch: %lld\n", devProp.memoryBusWidth);
printf("\n");
}
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
int main()
{
int devCount;
cudaGetDeviceCount(&devCount);
printf("device count: %d\n", devCount);
for (int i = 0; i < devCount; ++i) {
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printf("ver: %d.%d\n", devProp.major, devProp.minor);
printf("max threads per block: %d\n", (int)devProp.maxThreadsPerBlock);
printf("number of SMs: %d\n", (int)devProp.multiProcessorCount);
printf("warp size: %d\n", (int)devProp.warpSize);
printf("max warps per SM: %d\n", devProp.maxThreadsPerMultiProcessor / devProp.warpSize);
printf("registers available per SM: %d\n", (int)devProp.regsPerBlock);
printf("shared memory available per SM: %d\n", (int)devProp.sharedMemPerBlock);
printf("clock frequency: %d\n", (int)devProp.clockRate);
printf("total const memory: %d\n", (int)devProp.totalConstMem);
printf("max threads dimBlock.x: %d\n", (int)devProp.maxThreadsDim[0]);
printf("max threads dimBlock.y: %d\n", (int)devProp.maxThreadsDim[1]);
printf("max threads dimBlock.z: %d\n", (int)devProp.maxThreadsDim[2]);
printf("max blocks dimGrid.x: %d\n", (int)devProp.maxGridSize[0]);
printf("max blocks dimGrid.y: %d\n", (int)devProp.maxGridSize[1]);
printf("max blocks dimGrid.z: %d\n", (int)devProp.maxGridSize[2]);
printf("memory clock rate: %d\n", (int)devProp.memoryClockRate);
printf("memory bus width: %d\n", (int)devProp.memoryBusWidth);
printf("memory pitch: %lld\n", devProp.memoryBusWidth);
printf("\n");
}
} | .file "tmpxft_00098a65_00000000-6_info.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "device count: %d\n"
.LC1:
.string "ver: %d.%d\n"
.LC2:
.string "max threads per block: %d\n"
.LC3:
.string "number of SMs: %d\n"
.LC4:
.string "warp size: %d\n"
.LC5:
.string "max warps per SM: %d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC6:
.string "registers available per SM: %d\n"
.align 8
.LC7:
.string "shared memory available per SM: %d\n"
.section .rodata.str1.1
.LC8:
.string "clock frequency: %d\n"
.LC9:
.string "total const memory: %d\n"
.LC10:
.string "max threads dimBlock.x: %d\n"
.LC11:
.string "max threads dimBlock.y: %d\n"
.LC12:
.string "max threads dimBlock.z: %d\n"
.LC13:
.string "max blocks dimGrid.x: %d\n"
.LC14:
.string "max blocks dimGrid.y: %d\n"
.LC15:
.string "max blocks dimGrid.z: %d\n"
.LC16:
.string "memory clock rate: %d\n"
.LC17:
.string "memory bus width: %d\n"
.LC18:
.string "memory pitch: %lld\n"
.LC19:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $1056, %rsp
.cfi_def_cfa_offset 1104
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 12(%rsp)
jle .L4
movl $0, %ebx
leaq .LC1(%rip), %r14
leaq .LC2(%rip), %r13
leaq .LC3(%rip), %r12
leaq .LC4(%rip), %rbp
.L5:
leaq 16(%rsp), %rdi
movl %ebx, %esi
call cudaGetDeviceProperties_v2@PLT
movl 380(%rsp), %ecx
movl 376(%rsp), %edx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 404(%rsp), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 324(%rsp), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 640(%rsp), %eax
cltd
idivl 324(%rsp)
movl %eax, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 320(%rsp), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 312(%rsp), %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 364(%rsp), %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 368(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 340(%rsp), %edx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 344(%rsp), %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 348(%rsp), %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 352(%rsp), %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 356(%rsp), %edx
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 360(%rsp), %edx
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 624(%rsp), %edx
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 628(%rsp), %edx
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 628(%rsp), %edx
leaq .LC18(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC19(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jg .L5
.L4:
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L9
movl $0, %eax
addq $1056, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
int main()
{
int devCount;
cudaGetDeviceCount(&devCount);
printf("device count: %d\n", devCount);
for (int i = 0; i < devCount; ++i) {
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printf("ver: %d.%d\n", devProp.major, devProp.minor);
printf("max threads per block: %d\n", (int)devProp.maxThreadsPerBlock);
printf("number of SMs: %d\n", (int)devProp.multiProcessorCount);
printf("warp size: %d\n", (int)devProp.warpSize);
printf("max warps per SM: %d\n", devProp.maxThreadsPerMultiProcessor / devProp.warpSize);
printf("registers available per SM: %d\n", (int)devProp.regsPerBlock);
printf("shared memory available per SM: %d\n", (int)devProp.sharedMemPerBlock);
printf("clock frequency: %d\n", (int)devProp.clockRate);
printf("total const memory: %d\n", (int)devProp.totalConstMem);
printf("max threads dimBlock.x: %d\n", (int)devProp.maxThreadsDim[0]);
printf("max threads dimBlock.y: %d\n", (int)devProp.maxThreadsDim[1]);
printf("max threads dimBlock.z: %d\n", (int)devProp.maxThreadsDim[2]);
printf("max blocks dimGrid.x: %d\n", (int)devProp.maxGridSize[0]);
printf("max blocks dimGrid.y: %d\n", (int)devProp.maxGridSize[1]);
printf("max blocks dimGrid.z: %d\n", (int)devProp.maxGridSize[2]);
printf("memory clock rate: %d\n", (int)devProp.memoryClockRate);
printf("memory bus width: %d\n", (int)devProp.memoryBusWidth);
printf("memory pitch: %lld\n", devProp.memoryBusWidth);
printf("\n");
}
} | #include <hip/hip_runtime.h>
#include <stdio.h>
int main()
{
int devCount;
hipGetDeviceCount(&devCount);
printf("device count: %d\n", devCount);
for (int i = 0; i < devCount; ++i) {
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printf("ver: %d.%d\n", devProp.major, devProp.minor);
printf("max threads per block: %d\n", (int)devProp.maxThreadsPerBlock);
printf("number of SMs: %d\n", (int)devProp.multiProcessorCount);
printf("warp size: %d\n", (int)devProp.warpSize);
printf("max warps per SM: %d\n", devProp.maxThreadsPerMultiProcessor / devProp.warpSize);
printf("registers available per SM: %d\n", (int)devProp.regsPerBlock);
printf("shared memory available per SM: %d\n", (int)devProp.sharedMemPerBlock);
printf("clock frequency: %d\n", (int)devProp.clockRate);
printf("total const memory: %d\n", (int)devProp.totalConstMem);
printf("max threads dimBlock.x: %d\n", (int)devProp.maxThreadsDim[0]);
printf("max threads dimBlock.y: %d\n", (int)devProp.maxThreadsDim[1]);
printf("max threads dimBlock.z: %d\n", (int)devProp.maxThreadsDim[2]);
printf("max blocks dimGrid.x: %d\n", (int)devProp.maxGridSize[0]);
printf("max blocks dimGrid.y: %d\n", (int)devProp.maxGridSize[1]);
printf("max blocks dimGrid.z: %d\n", (int)devProp.maxGridSize[2]);
printf("memory clock rate: %d\n", (int)devProp.memoryClockRate);
printf("memory bus width: %d\n", (int)devProp.memoryBusWidth);
printf("memory pitch: %lld\n", devProp.memoryBusWidth);
printf("\n");
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main()
{
int devCount;
hipGetDeviceCount(&devCount);
printf("device count: %d\n", devCount);
for (int i = 0; i < devCount; ++i) {
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printf("ver: %d.%d\n", devProp.major, devProp.minor);
printf("max threads per block: %d\n", (int)devProp.maxThreadsPerBlock);
printf("number of SMs: %d\n", (int)devProp.multiProcessorCount);
printf("warp size: %d\n", (int)devProp.warpSize);
printf("max warps per SM: %d\n", devProp.maxThreadsPerMultiProcessor / devProp.warpSize);
printf("registers available per SM: %d\n", (int)devProp.regsPerBlock);
printf("shared memory available per SM: %d\n", (int)devProp.sharedMemPerBlock);
printf("clock frequency: %d\n", (int)devProp.clockRate);
printf("total const memory: %d\n", (int)devProp.totalConstMem);
printf("max threads dimBlock.x: %d\n", (int)devProp.maxThreadsDim[0]);
printf("max threads dimBlock.y: %d\n", (int)devProp.maxThreadsDim[1]);
printf("max threads dimBlock.z: %d\n", (int)devProp.maxThreadsDim[2]);
printf("max blocks dimGrid.x: %d\n", (int)devProp.maxGridSize[0]);
printf("max blocks dimGrid.y: %d\n", (int)devProp.maxGridSize[1]);
printf("max blocks dimGrid.z: %d\n", (int)devProp.maxGridSize[2]);
printf("memory clock rate: %d\n", (int)devProp.memoryClockRate);
printf("memory bus width: %d\n", (int)devProp.memoryBusWidth);
printf("memory pitch: %lld\n", devProp.memoryBusWidth);
printf("\n");
}
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main()
{
int devCount;
hipGetDeviceCount(&devCount);
printf("device count: %d\n", devCount);
for (int i = 0; i < devCount; ++i) {
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printf("ver: %d.%d\n", devProp.major, devProp.minor);
printf("max threads per block: %d\n", (int)devProp.maxThreadsPerBlock);
printf("number of SMs: %d\n", (int)devProp.multiProcessorCount);
printf("warp size: %d\n", (int)devProp.warpSize);
printf("max warps per SM: %d\n", devProp.maxThreadsPerMultiProcessor / devProp.warpSize);
printf("registers available per SM: %d\n", (int)devProp.regsPerBlock);
printf("shared memory available per SM: %d\n", (int)devProp.sharedMemPerBlock);
printf("clock frequency: %d\n", (int)devProp.clockRate);
printf("total const memory: %d\n", (int)devProp.totalConstMem);
printf("max threads dimBlock.x: %d\n", (int)devProp.maxThreadsDim[0]);
printf("max threads dimBlock.y: %d\n", (int)devProp.maxThreadsDim[1]);
printf("max threads dimBlock.z: %d\n", (int)devProp.maxThreadsDim[2]);
printf("max blocks dimGrid.x: %d\n", (int)devProp.maxGridSize[0]);
printf("max blocks dimGrid.y: %d\n", (int)devProp.maxGridSize[1]);
printf("max blocks dimGrid.z: %d\n", (int)devProp.maxGridSize[2]);
printf("memory clock rate: %d\n", (int)devProp.memoryClockRate);
printf("memory bus width: %d\n", (int)devProp.memoryBusWidth);
printf("memory pitch: %lld\n", devProp.memoryBusWidth);
printf("\n");
}
} | .text
.file "info.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 1504
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
movl 4(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
cmpl $0, 4(%rsp)
jle .LBB0_3
# %bb.1: # %.lr.ph
leaq 8(%rsp), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_2: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movl 368(%rsp), %esi
movl 372(%rsp), %edx
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movl 328(%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl 396(%rsp), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl 316(%rsp), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movl 632(%rsp), %eax
cltd
idivl 316(%rsp)
movl $.L.str.5, %edi
movl %eax, %esi
xorl %eax, %eax
callq printf
movl 312(%rsp), %esi
movl $.L.str.6, %edi
xorl %eax, %eax
callq printf
movl 304(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movl 356(%rsp), %esi
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
movl 360(%rsp), %esi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movl 332(%rsp), %esi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
movl 336(%rsp), %esi
movl $.L.str.11, %edi
xorl %eax, %eax
callq printf
movl 340(%rsp), %esi
movl $.L.str.12, %edi
xorl %eax, %eax
callq printf
movl 344(%rsp), %esi
movl $.L.str.13, %edi
xorl %eax, %eax
callq printf
movl 348(%rsp), %esi
movl $.L.str.14, %edi
xorl %eax, %eax
callq printf
movl 352(%rsp), %esi
movl $.L.str.15, %edi
xorl %eax, %eax
callq printf
movl 616(%rsp), %esi
movl $.L.str.16, %edi
xorl %eax, %eax
callq printf
movl 620(%rsp), %esi
movl $.L.str.17, %edi
xorl %eax, %eax
callq printf
movl 620(%rsp), %esi
movl $.L.str.18, %edi
xorl %eax, %eax
callq printf
movl $10, %edi
callq putchar@PLT
incl %ebp
cmpl 4(%rsp), %ebp
jl .LBB0_2
.LBB0_3: # %._crit_edge
xorl %eax, %eax
addq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "device count: %d\n"
.size .L.str, 18
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "ver: %d.%d\n"
.size .L.str.1, 12
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "max threads per block: %d\n"
.size .L.str.2, 27
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "number of SMs: %d\n"
.size .L.str.3, 19
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "warp size: %d\n"
.size .L.str.4, 15
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "max warps per SM: %d\n"
.size .L.str.5, 22
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "registers available per SM: %d\n"
.size .L.str.6, 32
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "shared memory available per SM: %d\n"
.size .L.str.7, 36
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "clock frequency: %d\n"
.size .L.str.8, 21
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "total const memory: %d\n"
.size .L.str.9, 24
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "max threads dimBlock.x: %d\n"
.size .L.str.10, 28
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "max threads dimBlock.y: %d\n"
.size .L.str.11, 28
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "max threads dimBlock.z: %d\n"
.size .L.str.12, 28
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "max blocks dimGrid.x: %d\n"
.size .L.str.13, 26
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "max blocks dimGrid.y: %d\n"
.size .L.str.14, 26
.type .L.str.15,@object # @.str.15
.L.str.15:
.asciz "max blocks dimGrid.z: %d\n"
.size .L.str.15, 26
.type .L.str.16,@object # @.str.16
.L.str.16:
.asciz "memory clock rate: %d\n"
.size .L.str.16, 23
.type .L.str.17,@object # @.str.17
.L.str.17:
.asciz "memory bus width: %d\n"
.size .L.str.17, 22
.type .L.str.18,@object # @.str.18
.L.str.18:
.asciz "memory pitch: %lld\n"
.size .L.str.18, 20
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00098a65_00000000-6_info.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "device count: %d\n"
.LC1:
.string "ver: %d.%d\n"
.LC2:
.string "max threads per block: %d\n"
.LC3:
.string "number of SMs: %d\n"
.LC4:
.string "warp size: %d\n"
.LC5:
.string "max warps per SM: %d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC6:
.string "registers available per SM: %d\n"
.align 8
.LC7:
.string "shared memory available per SM: %d\n"
.section .rodata.str1.1
.LC8:
.string "clock frequency: %d\n"
.LC9:
.string "total const memory: %d\n"
.LC10:
.string "max threads dimBlock.x: %d\n"
.LC11:
.string "max threads dimBlock.y: %d\n"
.LC12:
.string "max threads dimBlock.z: %d\n"
.LC13:
.string "max blocks dimGrid.x: %d\n"
.LC14:
.string "max blocks dimGrid.y: %d\n"
.LC15:
.string "max blocks dimGrid.z: %d\n"
.LC16:
.string "memory clock rate: %d\n"
.LC17:
.string "memory bus width: %d\n"
.LC18:
.string "memory pitch: %lld\n"
.LC19:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $1056, %rsp
.cfi_def_cfa_offset 1104
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 12(%rsp)
jle .L4
movl $0, %ebx
leaq .LC1(%rip), %r14
leaq .LC2(%rip), %r13
leaq .LC3(%rip), %r12
leaq .LC4(%rip), %rbp
.L5:
leaq 16(%rsp), %rdi
movl %ebx, %esi
call cudaGetDeviceProperties_v2@PLT
movl 380(%rsp), %ecx
movl 376(%rsp), %edx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 404(%rsp), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 324(%rsp), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 640(%rsp), %eax
cltd
idivl 324(%rsp)
movl %eax, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 320(%rsp), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 312(%rsp), %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 364(%rsp), %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 368(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 340(%rsp), %edx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 344(%rsp), %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 348(%rsp), %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 352(%rsp), %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 356(%rsp), %edx
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 360(%rsp), %edx
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 624(%rsp), %edx
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 628(%rsp), %edx
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 628(%rsp), %edx
leaq .LC18(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC19(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jg .L5
.L4:
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L9
movl $0, %eax
addq $1056, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "info.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 1504
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
movl 4(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
cmpl $0, 4(%rsp)
jle .LBB0_3
# %bb.1: # %.lr.ph
leaq 8(%rsp), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_2: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movl 368(%rsp), %esi
movl 372(%rsp), %edx
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movl 328(%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl 396(%rsp), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl 316(%rsp), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movl 632(%rsp), %eax
cltd
idivl 316(%rsp)
movl $.L.str.5, %edi
movl %eax, %esi
xorl %eax, %eax
callq printf
movl 312(%rsp), %esi
movl $.L.str.6, %edi
xorl %eax, %eax
callq printf
movl 304(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movl 356(%rsp), %esi
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
movl 360(%rsp), %esi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movl 332(%rsp), %esi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
movl 336(%rsp), %esi
movl $.L.str.11, %edi
xorl %eax, %eax
callq printf
movl 340(%rsp), %esi
movl $.L.str.12, %edi
xorl %eax, %eax
callq printf
movl 344(%rsp), %esi
movl $.L.str.13, %edi
xorl %eax, %eax
callq printf
movl 348(%rsp), %esi
movl $.L.str.14, %edi
xorl %eax, %eax
callq printf
movl 352(%rsp), %esi
movl $.L.str.15, %edi
xorl %eax, %eax
callq printf
movl 616(%rsp), %esi
movl $.L.str.16, %edi
xorl %eax, %eax
callq printf
movl 620(%rsp), %esi
movl $.L.str.17, %edi
xorl %eax, %eax
callq printf
movl 620(%rsp), %esi
movl $.L.str.18, %edi
xorl %eax, %eax
callq printf
movl $10, %edi
callq putchar@PLT
incl %ebp
cmpl 4(%rsp), %ebp
jl .LBB0_2
.LBB0_3: # %._crit_edge
xorl %eax, %eax
addq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "device count: %d\n"
.size .L.str, 18
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "ver: %d.%d\n"
.size .L.str.1, 12
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "max threads per block: %d\n"
.size .L.str.2, 27
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "number of SMs: %d\n"
.size .L.str.3, 19
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "warp size: %d\n"
.size .L.str.4, 15
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "max warps per SM: %d\n"
.size .L.str.5, 22
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "registers available per SM: %d\n"
.size .L.str.6, 32
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "shared memory available per SM: %d\n"
.size .L.str.7, 36
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "clock frequency: %d\n"
.size .L.str.8, 21
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "total const memory: %d\n"
.size .L.str.9, 24
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "max threads dimBlock.x: %d\n"
.size .L.str.10, 28
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "max threads dimBlock.y: %d\n"
.size .L.str.11, 28
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "max threads dimBlock.z: %d\n"
.size .L.str.12, 28
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "max blocks dimGrid.x: %d\n"
.size .L.str.13, 26
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "max blocks dimGrid.y: %d\n"
.size .L.str.14, 26
.type .L.str.15,@object # @.str.15
.L.str.15:
.asciz "max blocks dimGrid.z: %d\n"
.size .L.str.15, 26
.type .L.str.16,@object # @.str.16
.L.str.16:
.asciz "memory clock rate: %d\n"
.size .L.str.16, 23
.type .L.str.17,@object # @.str.17
.L.str.17:
.asciz "memory bus width: %d\n"
.size .L.str.17, 22
.type .L.str.18,@object # @.str.18
.L.str.18:
.asciz "memory pitch: %lld\n"
.size .L.str.18, 20
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<time.h>
#include<sys/time.h>
#define SQUARE(x) ((x)*(x))
#define PI 3.14
#define BLOCK_SIZE 16
#define N 4096
__device__ __constant__ float pi=3.14;
__device__ __constant__ int sobelX[9]={1,0,-1,2,0,-2,1,0,-1};
__device__ __constant__ int sobelY[9]={1,2,1,0,0,0,-1,-2,-1};
__global__ void NormalizeGrayGPU(double input[], int width, int height, unsigned char output[], double min, double max)
{
int index= blockIdx.x*blockDim.x+threadIdx.x;
if(index < (width*height))
{
output[index]=(input[index]-min)*255/(max-min);
}
}
double FindMin(double input[], int width, int height)
{
double min = input[0];
for (int i = 0; i < width*height; i++)
{
if (input[i] < min) min = input[i];
}
return min;
}
double FindMax(double input[], int width, int height)
{
double max = input[0];
for (int i = 0; i < width*height; i++)
{
if (input[i] > max) max = input[i];
}
return max;
}
__global__ void SobelFilter_gpu(unsigned char* A, double *gradImageX, double *gradImageY, double *gradMag, int width, int height)
{
int row= blockIdx.y*blockDim.y+threadIdx.y;
int col= blockIdx.x*blockDim.x+threadIdx.x;
double tempx=0;
double tempy=0;
if(row < height && col < width){
tempx = 0;
tempy = 0;
for (int r2=-1; r2<=1; r2++){
for (int c2=-1; c2<=1; c2++)
{
tempx += A[(row+r2)*width+(col+c2)]*sobelX[(r2+1)*3+c2+1];
tempy += A[(row+r2)*width+(col+c2)]*sobelY[(r2+1)*3+c2+1];
}
}
gradImageX[(row*width)+col]=tempx;
gradImageY[(row*width)+col]=tempy;
gradMag[(row*width)+col]= sqrt((double) (tempx*tempx)+(tempy*tempy));
}
}
__global__ void theta_gpu(double *gradImageY, double *gradImageX, double *gradPhase, int width, int height){
int index= blockIdx.x*blockDim.x+threadIdx.x;
if(index<(width*height)){
float theta = atan2(gradImageY[index],gradImageX[index]);
theta=theta*180/pi;
gradPhase[index]=theta;
}
}
int main(int argc, char *argv[])
{
FILE *fptr;
char *inputHeader, *testHeader;
int inputCols, inputRows, inputBytes;
int testCols, testRows, testBytes;
char Header_1[320], Header_2[320];
unsigned char *inputImage, *testImage;
unsigned char *normalGradMag, *normalGrad_x, *normalGrad_y, *normalGradPhase;
unsigned char *normaltestMag, *normaltest_x, *normaltest_y, *normaltestPhase;
double *gradPhase, *gradMag;
double *testgradPhase, *testgradMag;
double max=0;
double min=0;
float gpu_time_1 = 0;
float gpu_time_2 = 0;
float gpu_time_3 = 0;
//GPU variables
double *d_gradImageX, *d_gradImageY, *d_gradPhase, *d_gradMag;
unsigned char *d_inputImage, *d_normalGradMag, *d_normalGradX, *d_normalGradY, *d_normalGradPhase;
unsigned char *d_testImage;
double *d_testgradImageX, *d_testgradImageY, *d_testgradMag, *d_testgradPhase;
unsigned char *d_testnormalGradMag, *d_testnormalGradX, *d_testnormalGradY, *d_testnormalGradPhase;
cudaError_t err;
struct timeval cstart1, cstart2, cstart3, cend1, cend2, cend3;
cudaEvent_t start1, start2, start3, stop1, stop2, stop3;
printf("Initialization done!\n");
gettimeofday(&cstart1, NULL);
if ((fptr=fopen(argv[1],"r"))==NULL)
{
printf("Unable to open input file for reading\n");
exit(0);
}
//Open and load input image
fptr = fopen(argv[1], "r");
fscanf(fptr,"%s %d %d %d",&inputHeader, &inputCols, &inputRows, &inputBytes);
Header_1[0]=fgetc(fptr); /* read white-space character that separates header */
inputImage = (unsigned char*)calloc(inputCols*inputRows,sizeof(unsigned char));
fread(inputImage, 1, inputCols*inputRows, fptr);
fclose(fptr);
printf("Input file opened!\n");
if ((fptr = fopen(argv[2], "r")) == NULL)
{
printf("Unable to open test file for reading\n");
exit(0);
}
//Open and load test image
fptr = fopen(argv[2], "rb");
fscanf(fptr, "%s %d %d %d", &testHeader, &testCols, &testRows, &testBytes);
Header_2[0] = fgetc(fptr); /* read white-space character that separates header */
testImage = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
fread(testImage, 1, testCols*testRows, fptr);
fclose(fptr);
printf("Test file opened!\n");
gettimeofday(&cend1, NULL);
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventRecord(start1);
cudaEventSynchronize(start1);
//cudaMalloc for Input image
err=cudaMalloc(&d_inputImage,(inputRows*inputCols*sizeof(unsigned char)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_inputImage");
err=cudaMalloc(&d_gradImageX,(inputRows*inputCols*sizeof(double)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_gradImageX");
err=cudaMalloc(&d_gradImageY,(inputRows*inputCols*sizeof(double)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_gradImageY");
err=cudaMalloc(&d_gradPhase,(inputRows*inputCols*sizeof(double)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_gradPhase");
err=cudaMalloc(&d_gradMag,(inputRows*inputCols*sizeof(double)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_gradMag");
err=cudaMalloc(&d_normalGradMag,(inputRows*inputCols*sizeof(unsigned char)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_normalGradMag");
err=cudaMalloc(&d_normalGradX,(inputRows*inputCols*sizeof(unsigned char)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_normalGradX");
err=cudaMalloc(&d_normalGradY,(inputRows*inputCols*sizeof(unsigned char)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_normalGradY");
err=cudaMalloc(&d_normalGradPhase,(inputRows*inputCols*sizeof(unsigned char)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_normalGradPhase");
//cudaMalloc for test image
err=cudaMalloc(&d_testImage,(testRows*testCols*sizeof(unsigned char)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_testImage");
err=cudaMalloc(&d_testgradImageX,(testRows*testCols*sizeof(double)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_testgradImageX");
err=cudaMalloc(&d_testgradImageY,(testRows*testCols*sizeof(double)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_testgradImageY");
err=cudaMalloc(&d_testgradPhase,(testRows*testCols*sizeof(double)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_testgradPhase");
err=cudaMalloc(&d_testgradMag,(testRows*testCols*sizeof(double)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_testgradMag");
err=cudaMalloc(&d_testnormalGradMag,(testRows*testCols*sizeof(unsigned char)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_testnormalGradMag");
err=cudaMalloc(&d_testnormalGradX,(testRows*testCols*sizeof(unsigned char)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_testnormalGradX");
err=cudaMalloc(&d_testnormalGradY,(testRows*testCols*sizeof(unsigned char)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_testnormalGradY");
err=cudaMalloc(&d_testnormalGradPhase,(testRows*testCols*sizeof(unsigned char)));
if(err != cudaSuccess) printf("/n Error in cudaMalloc d_testnormalGradPhase");
cudaEventSynchronize(stop1);
cudaEventRecord(stop1);
gettimeofday(&cstart2, NULL);
//Normalized input gradient images
normalGradMag = (unsigned char*)calloc(inputCols*inputRows, sizeof(unsigned char));
normalGrad_x = (unsigned char*)calloc(inputCols*inputRows, sizeof(unsigned char));
normalGrad_y = (unsigned char*)calloc(inputCols*inputRows, sizeof(unsigned char));
normalGradPhase = (unsigned char*)calloc(inputCols*inputRows, sizeof(unsigned char));
gradPhase = (double*)calloc(inputCols*inputRows, sizeof(double));
gradMag = (double*)calloc(inputCols*inputRows, sizeof(double));
//Normalized test gradient images
normaltestMag = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
normaltest_x = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
normaltest_y = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
normaltestPhase = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
testgradPhase = (double*)calloc(testCols*testRows, sizeof(double));
testgradMag = (double*)calloc(testCols*testRows, sizeof(double));
gettimeofday(&cend2, NULL);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
cudaEventRecord(start2);
cudaEventSynchronize(start2);
//Compute gradients and phase for input image
err=cudaMemcpy(d_inputImage, inputImage, (inputRows*inputCols*sizeof(unsigned char)), cudaMemcpyHostToDevice);
if(err != cudaSuccess) printf("/n Error in cudaMemcpy of d_inputImage");
/* Launch Kernel*/
dim3 dimGrid(ceil((float)(N+2)/BLOCK_SIZE), ceil((float)(N+2)/BLOCK_SIZE),1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
SobelFilter_gpu<<<dimGrid,dimBlock>>>(d_inputImage, d_gradImageX, d_gradImageY, d_gradMag, inputCols, inputRows);
cudaDeviceSynchronize();
dim3 BlockDim = dim3(1024,1,1);
dim3 GridDim = dim3(10000,1,1);
theta_gpu<<<GridDim,BlockDim>>>(d_gradImageY,d_gradImageX, d_gradPhase, inputCols, inputRows);
//Compute gradients and phase for test image
err=cudaMemcpy(d_testImage, testImage, (testRows*testCols*sizeof(unsigned char)), cudaMemcpyHostToDevice);
if(err != cudaSuccess) printf("/n Error in cudaMemcpy of d_testImage");
/* Launch Kernel*/
SobelFilter_gpu<<<dimGrid,dimBlock>>>(d_testImage, d_testgradImageX, d_testgradImageY, d_testgradMag, testCols, testRows);
cudaDeviceSynchronize();
theta_gpu<<<GridDim,BlockDim>>>(d_testgradImageY,d_testgradImageX, d_testgradPhase, testCols, testRows);
cudaMemcpy(gradMag, d_gradMag,(inputCols*inputRows*sizeof(double)),cudaMemcpyDeviceToHost);
if(err != cudaSuccess) printf("/n Error in cudaMemcpy of normalGrad_x");
min = FindMin(gradMag, inputCols, inputRows);
max = FindMax(gradMag, inputCols, inputRows);
NormalizeGrayGPU<<<GridDim,BlockDim>>>(d_gradMag, inputCols, inputRows, d_normalGradMag, min, max);
cudaDeviceSynchronize();
cudaMemcpy(testgradMag, d_testgradMag,(inputCols*inputRows*sizeof(double)),cudaMemcpyDeviceToHost);
if(err != cudaSuccess) printf("/n Error in cudaMemcpy of normalGrad_x");
min = FindMin(testgradMag, testCols, testRows);
max = FindMax(testgradMag, testCols, testRows);
NormalizeGrayGPU<<<GridDim,BlockDim>>>(d_testgradMag, testCols, testRows, d_testnormalGradMag, min, max);
cudaDeviceSynchronize();
cudaMemcpy(gradPhase, d_gradPhase,(inputCols*inputRows*sizeof(double)),cudaMemcpyDeviceToHost);
if(err != cudaSuccess) printf("/n Error in cudaMemcpy of gradPhase");
cudaMemcpy(testgradPhase, d_testgradPhase,(testCols*testRows*sizeof(double)),cudaMemcpyDeviceToHost);
if(err != cudaSuccess) printf("/n Error in cudaMemcpy of testgradPhase");
cudaMemcpy(normalGradMag, d_normalGradMag,(inputCols*inputRows*sizeof(unsigned char)),cudaMemcpyDeviceToHost);
if(err != cudaSuccess) printf("/n Error in cudaMemcpy of normalGradMag");
cudaMemcpy(normaltestMag, d_testnormalGradMag,(testCols*testRows*sizeof(unsigned char)),cudaMemcpyDeviceToHost);
if(err != cudaSuccess) printf("/n Error in cudaMemcpy of normaltestMag");
cudaEventRecord(stop2);
cudaEventSynchronize(stop2);
gettimeofday(&cstart3, NULL);
int histo[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int testhisto[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int difference[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
//Compute histogram of gradient orientations of input image
double angle = 0;
for (int i = 0; i < inputRows*inputCols; i++)
{
if (normalGradMag[i] > 25)
{
angle = fabs(gradPhase[i]);
if (angle > 0 && angle < 21) histo[0]++;
else if (angle > 21 && angle < 41) histo[1]++;
else if (angle > 41 && angle < 61) histo[2]++;
else if (angle > 61 && angle < 81) histo[3]++;
else if (angle > 81 && angle < 101) histo[4]++;
else if (angle > 101 && angle < 121) histo[5]++;
else if (angle > 121 && angle < 141) histo[6]++;
else if (angle > 141 && angle < 161) histo[7]++;
else histo[8]++;
}
}
printf("here6\n");
//Compute histogram of gradient orientations of test image
angle = 0;
for (int i = 0; i < testRows*testCols; i++)
{
if (normaltestMag[i] > 25)
{
angle = fabs(testgradPhase[i]);
if (angle > 0 && angle < 21) testhisto[0]++;
else if (angle > 21 && angle < 41) testhisto[1]++;
else if (angle > 41 && angle < 61) testhisto[2]++;
else if (angle > 61 && angle < 81) testhisto[3]++;
else if (angle > 81 && angle < 101) testhisto[4]++;
else if (angle > 101 && angle < 121) testhisto[5]++;
else if (angle > 121 && angle < 141) testhisto[6]++;
else if (angle > 141 && angle < 161) testhisto[7]++;
else testhisto[8]++;
}
}
printf("here7\n");
//Check the dissimilarity in histogram of gradient orientations
int sumDiff = 0;
for (int i = 0; i < 9; i++)
{
difference[i] = abs(histo[i] - testhisto[i]);
printf("diff[%d] = %d\n", i, difference[i]);
sumDiff += difference[i];
}
//float mismatch = (float)sumDiff*100/(testCols*testRows);
printf("HOG mismatch = %d\n", sumDiff);
fptr=fopen("input_grad_mag.pgm","w");
fprintf(fptr,"P5 %d %d 255\n",inputCols,inputRows);
fwrite(normalGradMag,inputCols*inputRows,1,fptr);
fclose(fptr);
fptr=fopen("test_grad_mag.pgm","w");
fprintf(fptr,"P5 %d %d 255\n",testCols,testRows);
fwrite(normaltestMag,testCols*testRows,1,fptr);
fclose(fptr);
//Free allocated memory
free(normalGradMag);
free(normalGradPhase);
free(normalGrad_x);
free(normalGrad_y);
free(normaltestMag);
free(normaltestPhase);
free(normaltest_x);
free(normaltest_y);
gettimeofday(&cend3, NULL);
//Free Allocated memory on the device. Don't forget.
cudaEventCreate(&start3);
cudaEventCreate(&stop3);
cudaEventRecord(start3);
cudaEventSynchronize(start3);
cudaFree(d_gradImageX);
cudaFree(d_gradImageY);
cudaFree(d_gradPhase);
cudaFree(d_gradMag);
cudaFree(d_inputImage);
cudaFree(d_normalGradMag);
cudaFree(d_normalGradX);
cudaFree(d_normalGradY);
cudaFree(d_normalGradPhase);
cudaFree(d_testgradImageX);
cudaFree(d_testgradImageY);
cudaFree(d_testgradPhase);
cudaFree(d_testgradMag);
cudaFree(d_testImage);
cudaFree(d_testnormalGradMag);
cudaFree(d_testnormalGradX);
cudaFree(d_testnormalGradY);
cudaFree(d_testnormalGradPhase);
cudaEventRecord(stop3);
cudaEventSynchronize(stop3);
//Calculate time tiaken
// float gpu_time_1 = 0;
// float gpu_time_2 = 0;
// float gpu_time_3 = 0;
cudaEventElapsedTime(&gpu_time_1, start1, stop1);
cudaEventElapsedTime(&gpu_time_2, start2, stop2);
cudaEventElapsedTime(&gpu_time_3, start3, stop3);
printf("gpu_time_1 = %f\t gpu_time_2 = %f\t gpu_time_3 = %f\n",gpu_time_1, gpu_time_2, gpu_time_3);
printf("Total GPU time = %f\n", gpu_time_1+gpu_time_2+gpu_time_3);
float cpu_time_1 = (((cend1.tv_sec * 1000000 + cend1.tv_usec) - (cstart1.tv_sec * 1000000 + cstart1.tv_usec))/1000.0);
float cpu_time_2 = (((cend2.tv_sec * 1000000 + cend2.tv_usec) - (cstart2.tv_sec * 1000000 + cstart2.tv_usec))/1000.0);
float cpu_time_3 = (((cend3.tv_sec * 1000000 + cend3.tv_usec) - (cstart3.tv_sec * 1000000 + cstart3.tv_usec))/1000.0);
printf("cpu_time_1 = %f\t cpu_time_2 = %f\t cpu_time_3 = %f\n",cpu_time_1, cpu_time_2, cpu_time_3);
printf("Total CPU time = %f\n", cpu_time_1+cpu_time_2+cpu_time_3);
printf(" Total time = %f\n", gpu_time_1+gpu_time_2+gpu_time_3+ cpu_time_1+cpu_time_2+cpu_time_3);
return 0;
} | #include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<math.h>
#include<hip/hip_runtime.h>
#include<time.h>
#include<sys/time.h>
#define SQUARE(x) ((x)*(x))
#define PI 3.14
#define BLOCK_SIZE 16
#define N 4096
__device__ __constant__ float pi=3.14;
__device__ __constant__ int sobelX[9]={1,0,-1,2,0,-2,1,0,-1};
__device__ __constant__ int sobelY[9]={1,2,1,0,0,0,-1,-2,-1};
__global__ void NormalizeGrayGPU(double input[], int width, int height, unsigned char output[], double min, double max)
{
int index= blockIdx.x*blockDim.x+threadIdx.x;
if(index < (width*height))
{
output[index]=(input[index]-min)*255/(max-min);
}
}
double FindMin(double input[], int width, int height)
{
double min = input[0];
for (int i = 0; i < width*height; i++)
{
if (input[i] < min) min = input[i];
}
return min;
}
double FindMax(double input[], int width, int height)
{
double max = input[0];
for (int i = 0; i < width*height; i++)
{
if (input[i] > max) max = input[i];
}
return max;
}
__global__ void SobelFilter_gpu(unsigned char* A, double *gradImageX, double *gradImageY, double *gradMag, int width, int height)
{
int row= blockIdx.y*blockDim.y+threadIdx.y;
int col= blockIdx.x*blockDim.x+threadIdx.x;
double tempx=0;
double tempy=0;
if(row < height && col < width){
tempx = 0;
tempy = 0;
for (int r2=-1; r2<=1; r2++){
for (int c2=-1; c2<=1; c2++)
{
tempx += A[(row+r2)*width+(col+c2)]*sobelX[(r2+1)*3+c2+1];
tempy += A[(row+r2)*width+(col+c2)]*sobelY[(r2+1)*3+c2+1];
}
}
gradImageX[(row*width)+col]=tempx;
gradImageY[(row*width)+col]=tempy;
gradMag[(row*width)+col]= sqrt((double) (tempx*tempx)+(tempy*tempy));
}
}
__global__ void theta_gpu(double *gradImageY, double *gradImageX, double *gradPhase, int width, int height){
int index= blockIdx.x*blockDim.x+threadIdx.x;
if(index<(width*height)){
float theta = atan2(gradImageY[index],gradImageX[index]);
theta=theta*180/pi;
gradPhase[index]=theta;
}
}
int main(int argc, char *argv[])
{
FILE *fptr;
char *inputHeader, *testHeader;
int inputCols, inputRows, inputBytes;
int testCols, testRows, testBytes;
char Header_1[320], Header_2[320];
unsigned char *inputImage, *testImage;
unsigned char *normalGradMag, *normalGrad_x, *normalGrad_y, *normalGradPhase;
unsigned char *normaltestMag, *normaltest_x, *normaltest_y, *normaltestPhase;
double *gradPhase, *gradMag;
double *testgradPhase, *testgradMag;
double max=0;
double min=0;
float gpu_time_1 = 0;
float gpu_time_2 = 0;
float gpu_time_3 = 0;
//GPU variables
double *d_gradImageX, *d_gradImageY, *d_gradPhase, *d_gradMag;
unsigned char *d_inputImage, *d_normalGradMag, *d_normalGradX, *d_normalGradY, *d_normalGradPhase;
unsigned char *d_testImage;
double *d_testgradImageX, *d_testgradImageY, *d_testgradMag, *d_testgradPhase;
unsigned char *d_testnormalGradMag, *d_testnormalGradX, *d_testnormalGradY, *d_testnormalGradPhase;
hipError_t err;
struct timeval cstart1, cstart2, cstart3, cend1, cend2, cend3;
hipEvent_t start1, start2, start3, stop1, stop2, stop3;
printf("Initialization done!\n");
gettimeofday(&cstart1, NULL);
if ((fptr=fopen(argv[1],"r"))==NULL)
{
printf("Unable to open input file for reading\n");
exit(0);
}
//Open and load input image
fptr = fopen(argv[1], "r");
fscanf(fptr,"%s %d %d %d",&inputHeader, &inputCols, &inputRows, &inputBytes);
Header_1[0]=fgetc(fptr); /* read white-space character that separates header */
inputImage = (unsigned char*)calloc(inputCols*inputRows,sizeof(unsigned char));
fread(inputImage, 1, inputCols*inputRows, fptr);
fclose(fptr);
printf("Input file opened!\n");
if ((fptr = fopen(argv[2], "r")) == NULL)
{
printf("Unable to open test file for reading\n");
exit(0);
}
//Open and load test image
fptr = fopen(argv[2], "rb");
fscanf(fptr, "%s %d %d %d", &testHeader, &testCols, &testRows, &testBytes);
Header_2[0] = fgetc(fptr); /* read white-space character that separates header */
testImage = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
fread(testImage, 1, testCols*testRows, fptr);
fclose(fptr);
printf("Test file opened!\n");
gettimeofday(&cend1, NULL);
hipEventCreate(&start1);
hipEventCreate(&stop1);
hipEventRecord(start1);
hipEventSynchronize(start1);
//cudaMalloc for Input image
err=hipMalloc(&d_inputImage,(inputRows*inputCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_inputImage");
err=hipMalloc(&d_gradImageX,(inputRows*inputCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_gradImageX");
err=hipMalloc(&d_gradImageY,(inputRows*inputCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_gradImageY");
err=hipMalloc(&d_gradPhase,(inputRows*inputCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_gradPhase");
err=hipMalloc(&d_gradMag,(inputRows*inputCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_gradMag");
err=hipMalloc(&d_normalGradMag,(inputRows*inputCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_normalGradMag");
err=hipMalloc(&d_normalGradX,(inputRows*inputCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_normalGradX");
err=hipMalloc(&d_normalGradY,(inputRows*inputCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_normalGradY");
err=hipMalloc(&d_normalGradPhase,(inputRows*inputCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_normalGradPhase");
//cudaMalloc for test image
err=hipMalloc(&d_testImage,(testRows*testCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testImage");
err=hipMalloc(&d_testgradImageX,(testRows*testCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testgradImageX");
err=hipMalloc(&d_testgradImageY,(testRows*testCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testgradImageY");
err=hipMalloc(&d_testgradPhase,(testRows*testCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testgradPhase");
err=hipMalloc(&d_testgradMag,(testRows*testCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testgradMag");
err=hipMalloc(&d_testnormalGradMag,(testRows*testCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testnormalGradMag");
err=hipMalloc(&d_testnormalGradX,(testRows*testCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testnormalGradX");
err=hipMalloc(&d_testnormalGradY,(testRows*testCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testnormalGradY");
err=hipMalloc(&d_testnormalGradPhase,(testRows*testCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testnormalGradPhase");
hipEventSynchronize(stop1);
hipEventRecord(stop1);
gettimeofday(&cstart2, NULL);
//Normalized input gradient images
normalGradMag = (unsigned char*)calloc(inputCols*inputRows, sizeof(unsigned char));
normalGrad_x = (unsigned char*)calloc(inputCols*inputRows, sizeof(unsigned char));
normalGrad_y = (unsigned char*)calloc(inputCols*inputRows, sizeof(unsigned char));
normalGradPhase = (unsigned char*)calloc(inputCols*inputRows, sizeof(unsigned char));
gradPhase = (double*)calloc(inputCols*inputRows, sizeof(double));
gradMag = (double*)calloc(inputCols*inputRows, sizeof(double));
//Normalized test gradient images
normaltestMag = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
normaltest_x = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
normaltest_y = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
normaltestPhase = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
testgradPhase = (double*)calloc(testCols*testRows, sizeof(double));
testgradMag = (double*)calloc(testCols*testRows, sizeof(double));
gettimeofday(&cend2, NULL);
hipEventCreate(&start2);
hipEventCreate(&stop2);
hipEventRecord(start2);
hipEventSynchronize(start2);
//Compute gradients and phase for input image
err=hipMemcpy(d_inputImage, inputImage, (inputRows*inputCols*sizeof(unsigned char)), hipMemcpyHostToDevice);
if(err != hipSuccess) printf("/n Error in hipMemcpy of d_inputImage");
/* Launch Kernel*/
dim3 dimGrid(ceil((float)(N+2)/BLOCK_SIZE), ceil((float)(N+2)/BLOCK_SIZE),1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
SobelFilter_gpu<<<dimGrid,dimBlock>>>(d_inputImage, d_gradImageX, d_gradImageY, d_gradMag, inputCols, inputRows);
hipDeviceSynchronize();
dim3 BlockDim = dim3(1024,1,1);
dim3 GridDim = dim3(10000,1,1);
theta_gpu<<<GridDim,BlockDim>>>(d_gradImageY,d_gradImageX, d_gradPhase, inputCols, inputRows);
//Compute gradients and phase for test image
err=hipMemcpy(d_testImage, testImage, (testRows*testCols*sizeof(unsigned char)), hipMemcpyHostToDevice);
if(err != hipSuccess) printf("/n Error in hipMemcpy of d_testImage");
/* Launch Kernel*/
SobelFilter_gpu<<<dimGrid,dimBlock>>>(d_testImage, d_testgradImageX, d_testgradImageY, d_testgradMag, testCols, testRows);
hipDeviceSynchronize();
theta_gpu<<<GridDim,BlockDim>>>(d_testgradImageY,d_testgradImageX, d_testgradPhase, testCols, testRows);
hipMemcpy(gradMag, d_gradMag,(inputCols*inputRows*sizeof(double)),hipMemcpyDeviceToHost);
if(err != hipSuccess) printf("/n Error in hipMemcpy of normalGrad_x");
min = FindMin(gradMag, inputCols, inputRows);
max = FindMax(gradMag, inputCols, inputRows);
NormalizeGrayGPU<<<GridDim,BlockDim>>>(d_gradMag, inputCols, inputRows, d_normalGradMag, min, max);
hipDeviceSynchronize();
hipMemcpy(testgradMag, d_testgradMag,(inputCols*inputRows*sizeof(double)),hipMemcpyDeviceToHost);
if(err != hipSuccess) printf("/n Error in hipMemcpy of normalGrad_x");
min = FindMin(testgradMag, testCols, testRows);
max = FindMax(testgradMag, testCols, testRows);
NormalizeGrayGPU<<<GridDim,BlockDim>>>(d_testgradMag, testCols, testRows, d_testnormalGradMag, min, max);
hipDeviceSynchronize();
hipMemcpy(gradPhase, d_gradPhase,(inputCols*inputRows*sizeof(double)),hipMemcpyDeviceToHost);
if(err != hipSuccess) printf("/n Error in hipMemcpy of gradPhase");
hipMemcpy(testgradPhase, d_testgradPhase,(testCols*testRows*sizeof(double)),hipMemcpyDeviceToHost);
if(err != hipSuccess) printf("/n Error in hipMemcpy of testgradPhase");
hipMemcpy(normalGradMag, d_normalGradMag,(inputCols*inputRows*sizeof(unsigned char)),hipMemcpyDeviceToHost);
if(err != hipSuccess) printf("/n Error in hipMemcpy of normalGradMag");
hipMemcpy(normaltestMag, d_testnormalGradMag,(testCols*testRows*sizeof(unsigned char)),hipMemcpyDeviceToHost);
if(err != hipSuccess) printf("/n Error in hipMemcpy of normaltestMag");
hipEventRecord(stop2);
hipEventSynchronize(stop2);
gettimeofday(&cstart3, NULL);
int histo[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int testhisto[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int difference[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
//Compute histogram of gradient orientations of input image
double angle = 0;
for (int i = 0; i < inputRows*inputCols; i++)
{
if (normalGradMag[i] > 25)
{
angle = fabs(gradPhase[i]);
if (angle > 0 && angle < 21) histo[0]++;
else if (angle > 21 && angle < 41) histo[1]++;
else if (angle > 41 && angle < 61) histo[2]++;
else if (angle > 61 && angle < 81) histo[3]++;
else if (angle > 81 && angle < 101) histo[4]++;
else if (angle > 101 && angle < 121) histo[5]++;
else if (angle > 121 && angle < 141) histo[6]++;
else if (angle > 141 && angle < 161) histo[7]++;
else histo[8]++;
}
}
printf("here6\n");
//Compute histogram of gradient orientations of test image
angle = 0;
for (int i = 0; i < testRows*testCols; i++)
{
if (normaltestMag[i] > 25)
{
angle = fabs(testgradPhase[i]);
if (angle > 0 && angle < 21) testhisto[0]++;
else if (angle > 21 && angle < 41) testhisto[1]++;
else if (angle > 41 && angle < 61) testhisto[2]++;
else if (angle > 61 && angle < 81) testhisto[3]++;
else if (angle > 81 && angle < 101) testhisto[4]++;
else if (angle > 101 && angle < 121) testhisto[5]++;
else if (angle > 121 && angle < 141) testhisto[6]++;
else if (angle > 141 && angle < 161) testhisto[7]++;
else testhisto[8]++;
}
}
printf("here7\n");
//Check the dissimilarity in histogram of gradient orientations
int sumDiff = 0;
for (int i = 0; i < 9; i++)
{
difference[i] = abs(histo[i] - testhisto[i]);
printf("diff[%d] = %d\n", i, difference[i]);
sumDiff += difference[i];
}
//float mismatch = (float)sumDiff*100/(testCols*testRows);
printf("HOG mismatch = %d\n", sumDiff);
fptr=fopen("input_grad_mag.pgm","w");
fprintf(fptr,"P5 %d %d 255\n",inputCols,inputRows);
fwrite(normalGradMag,inputCols*inputRows,1,fptr);
fclose(fptr);
fptr=fopen("test_grad_mag.pgm","w");
fprintf(fptr,"P5 %d %d 255\n",testCols,testRows);
fwrite(normaltestMag,testCols*testRows,1,fptr);
fclose(fptr);
//Free allocated memory
free(normalGradMag);
free(normalGradPhase);
free(normalGrad_x);
free(normalGrad_y);
free(normaltestMag);
free(normaltestPhase);
free(normaltest_x);
free(normaltest_y);
gettimeofday(&cend3, NULL);
//Free Allocated memory on the device. Don't forget.
hipEventCreate(&start3);
hipEventCreate(&stop3);
hipEventRecord(start3);
hipEventSynchronize(start3);
hipFree(d_gradImageX);
hipFree(d_gradImageY);
hipFree(d_gradPhase);
hipFree(d_gradMag);
hipFree(d_inputImage);
hipFree(d_normalGradMag);
hipFree(d_normalGradX);
hipFree(d_normalGradY);
hipFree(d_normalGradPhase);
hipFree(d_testgradImageX);
hipFree(d_testgradImageY);
hipFree(d_testgradPhase);
hipFree(d_testgradMag);
hipFree(d_testImage);
hipFree(d_testnormalGradMag);
hipFree(d_testnormalGradX);
hipFree(d_testnormalGradY);
hipFree(d_testnormalGradPhase);
hipEventRecord(stop3);
hipEventSynchronize(stop3);
//Calculate time tiaken
// float gpu_time_1 = 0;
// float gpu_time_2 = 0;
// float gpu_time_3 = 0;
hipEventElapsedTime(&gpu_time_1, start1, stop1);
hipEventElapsedTime(&gpu_time_2, start2, stop2);
hipEventElapsedTime(&gpu_time_3, start3, stop3);
printf("gpu_time_1 = %f\t gpu_time_2 = %f\t gpu_time_3 = %f\n",gpu_time_1, gpu_time_2, gpu_time_3);
printf("Total GPU time = %f\n", gpu_time_1+gpu_time_2+gpu_time_3);
float cpu_time_1 = (((cend1.tv_sec * 1000000 + cend1.tv_usec) - (cstart1.tv_sec * 1000000 + cstart1.tv_usec))/1000.0);
float cpu_time_2 = (((cend2.tv_sec * 1000000 + cend2.tv_usec) - (cstart2.tv_sec * 1000000 + cstart2.tv_usec))/1000.0);
float cpu_time_3 = (((cend3.tv_sec * 1000000 + cend3.tv_usec) - (cstart3.tv_sec * 1000000 + cstart3.tv_usec))/1000.0);
printf("cpu_time_1 = %f\t cpu_time_2 = %f\t cpu_time_3 = %f\n",cpu_time_1, cpu_time_2, cpu_time_3);
printf("Total CPU time = %f\n", cpu_time_1+cpu_time_2+cpu_time_3);
printf(" Total time = %f\n", gpu_time_1+gpu_time_2+gpu_time_3+ cpu_time_1+cpu_time_2+cpu_time_3);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<math.h>
#include<hip/hip_runtime.h>
#include<time.h>
#include<sys/time.h>
#define SQUARE(x) ((x)*(x))
#define PI 3.14
#define BLOCK_SIZE 16
#define N 4096
__device__ __constant__ float pi=3.14;
__device__ __constant__ int sobelX[9]={1,0,-1,2,0,-2,1,0,-1};
__device__ __constant__ int sobelY[9]={1,2,1,0,0,0,-1,-2,-1};
__global__ void NormalizeGrayGPU(double input[], int width, int height, unsigned char output[], double min, double max)
{
int index= blockIdx.x*blockDim.x+threadIdx.x;
if(index < (width*height))
{
output[index]=(input[index]-min)*255/(max-min);
}
}
double FindMin(double input[], int width, int height)
{
double min = input[0];
for (int i = 0; i < width*height; i++)
{
if (input[i] < min) min = input[i];
}
return min;
}
double FindMax(double input[], int width, int height)
{
double max = input[0];
for (int i = 0; i < width*height; i++)
{
if (input[i] > max) max = input[i];
}
return max;
}
__global__ void SobelFilter_gpu(unsigned char* A, double *gradImageX, double *gradImageY, double *gradMag, int width, int height)
{
int row= blockIdx.y*blockDim.y+threadIdx.y;
int col= blockIdx.x*blockDim.x+threadIdx.x;
double tempx=0;
double tempy=0;
if(row < height && col < width){
tempx = 0;
tempy = 0;
for (int r2=-1; r2<=1; r2++){
for (int c2=-1; c2<=1; c2++)
{
tempx += A[(row+r2)*width+(col+c2)]*sobelX[(r2+1)*3+c2+1];
tempy += A[(row+r2)*width+(col+c2)]*sobelY[(r2+1)*3+c2+1];
}
}
gradImageX[(row*width)+col]=tempx;
gradImageY[(row*width)+col]=tempy;
gradMag[(row*width)+col]= sqrt((double) (tempx*tempx)+(tempy*tempy));
}
}
__global__ void theta_gpu(double *gradImageY, double *gradImageX, double *gradPhase, int width, int height){
int index= blockIdx.x*blockDim.x+threadIdx.x;
if(index<(width*height)){
float theta = atan2(gradImageY[index],gradImageX[index]);
theta=theta*180/pi;
gradPhase[index]=theta;
}
}
int main(int argc, char *argv[])
{
FILE *fptr;
char *inputHeader, *testHeader;
int inputCols, inputRows, inputBytes;
int testCols, testRows, testBytes;
char Header_1[320], Header_2[320];
unsigned char *inputImage, *testImage;
unsigned char *normalGradMag, *normalGrad_x, *normalGrad_y, *normalGradPhase;
unsigned char *normaltestMag, *normaltest_x, *normaltest_y, *normaltestPhase;
double *gradPhase, *gradMag;
double *testgradPhase, *testgradMag;
double max=0;
double min=0;
float gpu_time_1 = 0;
float gpu_time_2 = 0;
float gpu_time_3 = 0;
//GPU variables
double *d_gradImageX, *d_gradImageY, *d_gradPhase, *d_gradMag;
unsigned char *d_inputImage, *d_normalGradMag, *d_normalGradX, *d_normalGradY, *d_normalGradPhase;
unsigned char *d_testImage;
double *d_testgradImageX, *d_testgradImageY, *d_testgradMag, *d_testgradPhase;
unsigned char *d_testnormalGradMag, *d_testnormalGradX, *d_testnormalGradY, *d_testnormalGradPhase;
hipError_t err;
struct timeval cstart1, cstart2, cstart3, cend1, cend2, cend3;
hipEvent_t start1, start2, start3, stop1, stop2, stop3;
printf("Initialization done!\n");
gettimeofday(&cstart1, NULL);
if ((fptr=fopen(argv[1],"r"))==NULL)
{
printf("Unable to open input file for reading\n");
exit(0);
}
//Open and load input image
fptr = fopen(argv[1], "r");
fscanf(fptr,"%s %d %d %d",&inputHeader, &inputCols, &inputRows, &inputBytes);
Header_1[0]=fgetc(fptr); /* read white-space character that separates header */
inputImage = (unsigned char*)calloc(inputCols*inputRows,sizeof(unsigned char));
fread(inputImage, 1, inputCols*inputRows, fptr);
fclose(fptr);
printf("Input file opened!\n");
if ((fptr = fopen(argv[2], "r")) == NULL)
{
printf("Unable to open test file for reading\n");
exit(0);
}
//Open and load test image
fptr = fopen(argv[2], "rb");
fscanf(fptr, "%s %d %d %d", &testHeader, &testCols, &testRows, &testBytes);
Header_2[0] = fgetc(fptr); /* read white-space character that separates header */
testImage = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
fread(testImage, 1, testCols*testRows, fptr);
fclose(fptr);
printf("Test file opened!\n");
gettimeofday(&cend1, NULL);
hipEventCreate(&start1);
hipEventCreate(&stop1);
hipEventRecord(start1);
hipEventSynchronize(start1);
//cudaMalloc for Input image
err=hipMalloc(&d_inputImage,(inputRows*inputCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_inputImage");
err=hipMalloc(&d_gradImageX,(inputRows*inputCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_gradImageX");
err=hipMalloc(&d_gradImageY,(inputRows*inputCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_gradImageY");
err=hipMalloc(&d_gradPhase,(inputRows*inputCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_gradPhase");
err=hipMalloc(&d_gradMag,(inputRows*inputCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_gradMag");
err=hipMalloc(&d_normalGradMag,(inputRows*inputCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_normalGradMag");
err=hipMalloc(&d_normalGradX,(inputRows*inputCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_normalGradX");
err=hipMalloc(&d_normalGradY,(inputRows*inputCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_normalGradY");
err=hipMalloc(&d_normalGradPhase,(inputRows*inputCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_normalGradPhase");
//cudaMalloc for test image
err=hipMalloc(&d_testImage,(testRows*testCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testImage");
err=hipMalloc(&d_testgradImageX,(testRows*testCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testgradImageX");
err=hipMalloc(&d_testgradImageY,(testRows*testCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testgradImageY");
err=hipMalloc(&d_testgradPhase,(testRows*testCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testgradPhase");
err=hipMalloc(&d_testgradMag,(testRows*testCols*sizeof(double)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testgradMag");
err=hipMalloc(&d_testnormalGradMag,(testRows*testCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testnormalGradMag");
err=hipMalloc(&d_testnormalGradX,(testRows*testCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testnormalGradX");
err=hipMalloc(&d_testnormalGradY,(testRows*testCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testnormalGradY");
err=hipMalloc(&d_testnormalGradPhase,(testRows*testCols*sizeof(unsigned char)));
if(err != hipSuccess) printf("/n Error in hipMalloc d_testnormalGradPhase");
hipEventSynchronize(stop1);
hipEventRecord(stop1);
gettimeofday(&cstart2, NULL);
//Normalized input gradient images
normalGradMag = (unsigned char*)calloc(inputCols*inputRows, sizeof(unsigned char));
normalGrad_x = (unsigned char*)calloc(inputCols*inputRows, sizeof(unsigned char));
normalGrad_y = (unsigned char*)calloc(inputCols*inputRows, sizeof(unsigned char));
normalGradPhase = (unsigned char*)calloc(inputCols*inputRows, sizeof(unsigned char));
gradPhase = (double*)calloc(inputCols*inputRows, sizeof(double));
gradMag = (double*)calloc(inputCols*inputRows, sizeof(double));
//Normalized test gradient images
normaltestMag = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
normaltest_x = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
normaltest_y = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
normaltestPhase = (unsigned char*)calloc(testCols*testRows, sizeof(unsigned char));
testgradPhase = (double*)calloc(testCols*testRows, sizeof(double));
testgradMag = (double*)calloc(testCols*testRows, sizeof(double));
gettimeofday(&cend2, NULL);
hipEventCreate(&start2);
hipEventCreate(&stop2);
hipEventRecord(start2);
hipEventSynchronize(start2);
//Compute gradients and phase for input image
err=hipMemcpy(d_inputImage, inputImage, (inputRows*inputCols*sizeof(unsigned char)), hipMemcpyHostToDevice);
if(err != hipSuccess) printf("/n Error in hipMemcpy of d_inputImage");
/* Launch Kernel*/
dim3 dimGrid(ceil((float)(N+2)/BLOCK_SIZE), ceil((float)(N+2)/BLOCK_SIZE),1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
SobelFilter_gpu<<<dimGrid,dimBlock>>>(d_inputImage, d_gradImageX, d_gradImageY, d_gradMag, inputCols, inputRows);
hipDeviceSynchronize();
dim3 BlockDim = dim3(1024,1,1);
dim3 GridDim = dim3(10000,1,1);
theta_gpu<<<GridDim,BlockDim>>>(d_gradImageY,d_gradImageX, d_gradPhase, inputCols, inputRows);
//Compute gradients and phase for test image
err=hipMemcpy(d_testImage, testImage, (testRows*testCols*sizeof(unsigned char)), hipMemcpyHostToDevice);
if(err != hipSuccess) printf("/n Error in hipMemcpy of d_testImage");
/* Launch Kernel*/
SobelFilter_gpu<<<dimGrid,dimBlock>>>(d_testImage, d_testgradImageX, d_testgradImageY, d_testgradMag, testCols, testRows);
hipDeviceSynchronize();
theta_gpu<<<GridDim,BlockDim>>>(d_testgradImageY,d_testgradImageX, d_testgradPhase, testCols, testRows);
hipMemcpy(gradMag, d_gradMag,(inputCols*inputRows*sizeof(double)),hipMemcpyDeviceToHost);
if(err != hipSuccess) printf("/n Error in hipMemcpy of normalGrad_x");
min = FindMin(gradMag, inputCols, inputRows);
max = FindMax(gradMag, inputCols, inputRows);
NormalizeGrayGPU<<<GridDim,BlockDim>>>(d_gradMag, inputCols, inputRows, d_normalGradMag, min, max);
hipDeviceSynchronize();
hipMemcpy(testgradMag, d_testgradMag,(inputCols*inputRows*sizeof(double)),hipMemcpyDeviceToHost);
if(err != hipSuccess) printf("/n Error in hipMemcpy of normalGrad_x");
min = FindMin(testgradMag, testCols, testRows);
max = FindMax(testgradMag, testCols, testRows);
NormalizeGrayGPU<<<GridDim,BlockDim>>>(d_testgradMag, testCols, testRows, d_testnormalGradMag, min, max);
hipDeviceSynchronize();
hipMemcpy(gradPhase, d_gradPhase,(inputCols*inputRows*sizeof(double)),hipMemcpyDeviceToHost);
if(err != hipSuccess) printf("/n Error in hipMemcpy of gradPhase");
hipMemcpy(testgradPhase, d_testgradPhase,(testCols*testRows*sizeof(double)),hipMemcpyDeviceToHost);
if(err != hipSuccess) printf("/n Error in hipMemcpy of testgradPhase");
hipMemcpy(normalGradMag, d_normalGradMag,(inputCols*inputRows*sizeof(unsigned char)),hipMemcpyDeviceToHost);
if(err != hipSuccess) printf("/n Error in hipMemcpy of normalGradMag");
hipMemcpy(normaltestMag, d_testnormalGradMag,(testCols*testRows*sizeof(unsigned char)),hipMemcpyDeviceToHost);
if(err != hipSuccess) printf("/n Error in hipMemcpy of normaltestMag");
hipEventRecord(stop2);
hipEventSynchronize(stop2);
gettimeofday(&cstart3, NULL);
int histo[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int testhisto[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int difference[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
//Compute histogram of gradient orientations of input image
double angle = 0;
for (int i = 0; i < inputRows*inputCols; i++)
{
if (normalGradMag[i] > 25)
{
angle = fabs(gradPhase[i]);
if (angle > 0 && angle < 21) histo[0]++;
else if (angle > 21 && angle < 41) histo[1]++;
else if (angle > 41 && angle < 61) histo[2]++;
else if (angle > 61 && angle < 81) histo[3]++;
else if (angle > 81 && angle < 101) histo[4]++;
else if (angle > 101 && angle < 121) histo[5]++;
else if (angle > 121 && angle < 141) histo[6]++;
else if (angle > 141 && angle < 161) histo[7]++;
else histo[8]++;
}
}
printf("here6\n");
//Compute histogram of gradient orientations of test image
angle = 0;
for (int i = 0; i < testRows*testCols; i++)
{
if (normaltestMag[i] > 25)
{
angle = fabs(testgradPhase[i]);
if (angle > 0 && angle < 21) testhisto[0]++;
else if (angle > 21 && angle < 41) testhisto[1]++;
else if (angle > 41 && angle < 61) testhisto[2]++;
else if (angle > 61 && angle < 81) testhisto[3]++;
else if (angle > 81 && angle < 101) testhisto[4]++;
else if (angle > 101 && angle < 121) testhisto[5]++;
else if (angle > 121 && angle < 141) testhisto[6]++;
else if (angle > 141 && angle < 161) testhisto[7]++;
else testhisto[8]++;
}
}
printf("here7\n");
//Check the dissimilarity in histogram of gradient orientations
int sumDiff = 0;
for (int i = 0; i < 9; i++)
{
difference[i] = abs(histo[i] - testhisto[i]);
printf("diff[%d] = %d\n", i, difference[i]);
sumDiff += difference[i];
}
//float mismatch = (float)sumDiff*100/(testCols*testRows);
printf("HOG mismatch = %d\n", sumDiff);
fptr=fopen("input_grad_mag.pgm","w");
fprintf(fptr,"P5 %d %d 255\n",inputCols,inputRows);
fwrite(normalGradMag,inputCols*inputRows,1,fptr);
fclose(fptr);
fptr=fopen("test_grad_mag.pgm","w");
fprintf(fptr,"P5 %d %d 255\n",testCols,testRows);
fwrite(normaltestMag,testCols*testRows,1,fptr);
fclose(fptr);
//Free allocated memory
free(normalGradMag);
free(normalGradPhase);
free(normalGrad_x);
free(normalGrad_y);
free(normaltestMag);
free(normaltestPhase);
free(normaltest_x);
free(normaltest_y);
gettimeofday(&cend3, NULL);
//Free Allocated memory on the device. Don't forget.
hipEventCreate(&start3);
hipEventCreate(&stop3);
hipEventRecord(start3);
hipEventSynchronize(start3);
hipFree(d_gradImageX);
hipFree(d_gradImageY);
hipFree(d_gradPhase);
hipFree(d_gradMag);
hipFree(d_inputImage);
hipFree(d_normalGradMag);
hipFree(d_normalGradX);
hipFree(d_normalGradY);
hipFree(d_normalGradPhase);
hipFree(d_testgradImageX);
hipFree(d_testgradImageY);
hipFree(d_testgradPhase);
hipFree(d_testgradMag);
hipFree(d_testImage);
hipFree(d_testnormalGradMag);
hipFree(d_testnormalGradX);
hipFree(d_testnormalGradY);
hipFree(d_testnormalGradPhase);
hipEventRecord(stop3);
hipEventSynchronize(stop3);
//Calculate time tiaken
// float gpu_time_1 = 0;
// float gpu_time_2 = 0;
// float gpu_time_3 = 0;
hipEventElapsedTime(&gpu_time_1, start1, stop1);
hipEventElapsedTime(&gpu_time_2, start2, stop2);
hipEventElapsedTime(&gpu_time_3, start3, stop3);
printf("gpu_time_1 = %f\t gpu_time_2 = %f\t gpu_time_3 = %f\n",gpu_time_1, gpu_time_2, gpu_time_3);
printf("Total GPU time = %f\n", gpu_time_1+gpu_time_2+gpu_time_3);
float cpu_time_1 = (((cend1.tv_sec * 1000000 + cend1.tv_usec) - (cstart1.tv_sec * 1000000 + cstart1.tv_usec))/1000.0);
float cpu_time_2 = (((cend2.tv_sec * 1000000 + cend2.tv_usec) - (cstart2.tv_sec * 1000000 + cstart2.tv_usec))/1000.0);
float cpu_time_3 = (((cend3.tv_sec * 1000000 + cend3.tv_usec) - (cstart3.tv_sec * 1000000 + cstart3.tv_usec))/1000.0);
printf("cpu_time_1 = %f\t cpu_time_2 = %f\t cpu_time_3 = %f\n",cpu_time_1, cpu_time_2, cpu_time_3);
printf("Total CPU time = %f\n", cpu_time_1+cpu_time_2+cpu_time_3);
printf(" Total time = %f\n", gpu_time_1+gpu_time_2+gpu_time_3+ cpu_time_1+cpu_time_2+cpu_time_3);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16NormalizeGrayGPUPdiiPhdd
.globl _Z16NormalizeGrayGPUPdiiPhdd
.p2align 8
.type _Z16NormalizeGrayGPUPdiiPhdd,@function
_Z16NormalizeGrayGPUPdiiPhdd:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x34
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_mul_i32 s2, s3, s2
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v1
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b128 s[4:7], s[0:1], 0x10
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x20
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s2, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
v_add_f64 v[5:6], s[0:1], -s[6:7]
global_load_b64 v[3:4], v[3:4], off
s_waitcnt vmcnt(0)
v_add_f64 v[3:4], v[3:4], -s[6:7]
v_mul_f64 v[3:4], v[3:4], 0x406fe000
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f64 v[7:8], null, v[5:6], v[5:6], v[3:4]
v_rcp_f64_e32 v[9:10], v[7:8]
s_waitcnt_depctr 0xfff
v_fma_f64 v[11:12], -v[7:8], v[9:10], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[9:10], v[9:10], v[11:12], v[9:10]
v_fma_f64 v[11:12], -v[7:8], v[9:10], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f64 v[9:10], v[9:10], v[11:12], v[9:10]
v_div_scale_f64 v[11:12], vcc_lo, v[3:4], v[5:6], v[3:4]
v_mul_f64 v[13:14], v[11:12], v[9:10]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[7:8], -v[7:8], v[13:14], v[11:12]
v_div_fmas_f64 v[7:8], v[7:8], v[9:10], v[13:14]
v_add_co_u32 v0, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f64 v[3:4], v[7:8], v[5:6], v[3:4]
v_cvt_i32_f64_e32 v3, v[3:4]
global_store_b8 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16NormalizeGrayGPUPdiiPhdd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16NormalizeGrayGPUPdiiPhdd, .Lfunc_end0-_Z16NormalizeGrayGPUPdiiPhdd
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z15SobelFilter_gpuPhPdS0_S0_ii
.globl _Z15SobelFilter_gpuPhPdS0_S0_ii
.p2align 8
.type _Z15SobelFilter_gpuPhPdS0_S0_ii,@function
_Z15SobelFilter_gpuPhPdS0_S0_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b64 s[4:5], s[0:1], 0x20
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v1, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s2, 16
s_and_b32 s2, s2, 0xffff
s_mul_i32 s15, s15, s3
s_mul_i32 s14, s14, s2
v_add_nc_u32_e32 v5, s15, v2
v_add_nc_u32_e32 v0, s14, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s5, v5
v_cmp_gt_i32_e64 s2, s4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_6
s_load_b64 s[2:3], s[0:1], 0x0
v_add3_u32 v2, v2, s15, -1
s_mov_b32 s5, -1
s_getpc_b64 s[6:7]
s_add_u32 s6, s6, sobelX@rel32@lo+4
s_addc_u32 s7, s7, sobelX@rel32@hi+12
s_getpc_b64 s[8:9]
s_add_u32 s8, s8, sobelY@rel32@lo+4
s_addc_u32 s9, s9, sobelY@rel32@hi+12
v_mad_u64_u32 v[3:4], null, s4, v2, v[1:2]
v_mov_b32_e32 v1, 0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add3_u32 v6, v3, s14, -1
v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB1_2:
s_delay_alu instid0(VALU_DEP_2)
v_mov_b32_e32 v7, v6
s_mov_b64 s[10:11], 0
.p2align 6
.LBB1_3:
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v9, 31, v7
s_waitcnt lgkmcnt(0)
v_add_co_u32 v8, vcc_lo, s2, v7
s_add_u32 s12, s6, s10
s_addc_u32 s13, s7, s11
v_add_co_ci_u32_e32 v9, vcc_lo, s3, v9, vcc_lo
s_add_u32 s14, s8, s10
s_addc_u32 s15, s9, s11
v_add_nc_u32_e32 v7, 1, v7
global_load_u8 v8, v[8:9], off
s_load_b32 s12, s[12:13], 0x0
s_load_b32 s13, s[14:15], 0x0
s_add_u32 s10, s10, 4
s_addc_u32 s11, s11, 0
s_cmp_eq_u32 s10, 12
s_waitcnt vmcnt(0) lgkmcnt(0)
v_mul_lo_u32 v9, s12, v8
v_mul_lo_u32 v10, s13, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cvt_f64_i32_e32 v[8:9], v9
v_cvt_f64_i32_e32 v[10:11], v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[3:4], v[3:4], v[8:9]
v_add_f64 v[1:2], v[1:2], v[10:11]
s_cbranch_scc0 .LBB1_3
s_add_i32 s5, s5, 1
s_add_u32 s6, s6, 12
v_add_nc_u32_e32 v6, s4, v6
s_addc_u32 s7, s7, 0
s_add_u32 s8, s8, 12
s_addc_u32 s9, s9, 0
s_cmp_eq_u32 s5, 2
s_cbranch_scc0 .LBB1_2
s_set_inst_prefetch_distance 0x2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mul_f64 v[6:7], v[1:2], v[1:2]
s_clause 0x1
s_load_b128 s[8:11], s[0:1], 0x8
s_load_b64 s[2:3], s[0:1], 0x18
v_fma_f64 v[6:7], v[3:4], v[3:4], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_gt_f64_e32 vcc_lo, 0x10000000, v[6:7]
v_cndmask_b32_e64 v8, 0, 1, vcc_lo
v_lshlrev_b32_e32 v8, 8, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ldexp_f64 v[6:7], v[6:7], v8
v_rsq_f64_e32 v[8:9], v[6:7]
s_waitcnt_depctr 0xfff
v_mul_f64 v[10:11], v[6:7], v[8:9]
v_mul_f64 v[8:9], v[8:9], 0.5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[12:13], -v[8:9], v[10:11], 0.5
v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
v_fma_f64 v[8:9], v[8:9], v[12:13], v[8:9]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[12:13], -v[10:11], v[10:11], v[6:7]
v_fma_f64 v[10:11], v[12:13], v[8:9], v[10:11]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[12:13], -v[10:11], v[10:11], v[6:7]
v_fma_f64 v[8:9], v[12:13], v[8:9], v[10:11]
v_cndmask_b32_e64 v10, 0, 0xffffff80, vcc_lo
v_cmp_class_f64_e64 vcc_lo, v[6:7], 0x260
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_ldexp_f64 v[8:9], v[8:9], v10
v_mad_u64_u32 v[10:11], null, v5, s4, v[0:1]
v_ashrrev_i32_e32 v11, 31, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[10:11], 3, v[10:11]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v12, s0, s8, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v13, s0, s9, v11, s0
v_add_co_u32 v14, s0, s10, v10
v_add_co_ci_u32_e64 v15, s0, s11, v11, s0
v_dual_cndmask_b32 v7, v9, v7 :: v_dual_cndmask_b32 v6, v8, v6
v_add_co_u32 v8, vcc_lo, s2, v10
v_add_co_ci_u32_e32 v9, vcc_lo, s3, v11, vcc_lo
global_store_b64 v[12:13], v[3:4], off
global_store_b64 v[14:15], v[1:2], off
global_store_b64 v[8:9], v[6:7], off
.LBB1_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15SobelFilter_gpuPhPdS0_S0_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 16
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z15SobelFilter_gpuPhPdS0_S0_ii, .Lfunc_end1-_Z15SobelFilter_gpuPhPdS0_S0_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z9theta_gpuPdS_S_ii
.globl _Z9theta_gpuPdS_S_ii
.p2align 8
.type _Z9theta_gpuPdS_S_ii,@function
_Z9theta_gpuPdS_S_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_mul_i32 s2, s3, s2
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v1
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB2_2
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s1, 0xbf23e260
s_mov_b32 s0, 0xbd3237f4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
s_mov_b32 s5, 0x3eeba404
global_load_b64 v[2:3], v[2:3], off
global_load_b64 v[4:5], v[4:5], off
s_mov_b32 s4, 0xb5e68a13
s_waitcnt vmcnt(1)
v_max_f64 v[6:7], |v[2:3]|, |v[2:3]|
s_waitcnt vmcnt(0)
v_max_f64 v[8:9], |v[4:5]|, |v[4:5]|
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_max_f64 v[10:11], v[8:9], v[6:7]
v_min_f64 v[6:7], v[8:9], v[6:7]
v_div_scale_f64 v[8:9], null, v[10:11], v[10:11], v[6:7]
v_div_scale_f64 v[16:17], vcc_lo, v[6:7], v[10:11], v[6:7]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f64_e32 v[12:13], v[8:9]
s_waitcnt_depctr 0xfff
v_fma_f64 v[14:15], -v[8:9], v[12:13], 1.0
v_fma_f64 v[12:13], v[12:13], v[14:15], v[12:13]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[14:15], -v[8:9], v[12:13], 1.0
v_fma_f64 v[12:13], v[12:13], v[14:15], v[12:13]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[14:15], v[16:17], v[12:13]
v_fma_f64 v[8:9], -v[8:9], v[14:15], v[16:17]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_fmas_f64 v[8:9], v[8:9], v[12:13], v[14:15]
v_cmp_lt_f64_e64 vcc_lo, |v[4:5]|, |v[2:3]|
v_div_fixup_f64 v[6:7], v[8:9], v[10:11], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[8:9], v[6:7], v[6:7]
v_fma_f64 v[10:11], v[8:9], s[4:5], s[0:1]
s_mov_b32 s1, 0x3f4b2bb0
s_mov_b32 s0, 0x69efb384
v_cmp_class_f64_e64 s4, v[4:5], 0x204
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0xbf67952d
s_mov_b32 s0, 0xaf56de9b
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0x3f7d6d43
s_mov_b32 s0, 0xa595c56f
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0xbf8c6ea4
s_mov_b32 s0, 0xa57d9582
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0x3f967e29
s_mov_b32 s0, 0x5f08b19f
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0xbf9e9ae6
s_mov_b32 s0, 0xfc27006a
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0x3fa2c15b
s_mov_b32 s0, 0x5711927a
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0xbfa59976
s_mov_b32 s0, 0xe82d3ff0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0x3fa82d5d
s_mov_b32 s0, 0x6ef28734
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0xbfaae5ce
s_mov_b32 s0, 0x6a214619
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0x3fae1bb4
s_mov_b32 s0, 0x8427b883
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0xbfb110e4
s_mov_b32 s0, 0x8b207f05
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0x3fb3b136
s_mov_b32 s0, 0x57b87036
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0xbfb745d1
s_mov_b32 s0, 0x19378e4f
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0x3fbc71c7
s_mov_b32 s0, 0x17e1913c
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0xbfc24924
s_mov_b32 s0, 0x92376b7d
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0x3fc99999
s_mov_b32 s0, 0x999952cc
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s1, 0xbfd55555
s_mov_b32 s0, 0x55555523
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[10:11], v[8:9], v[10:11], s[0:1]
s_mov_b32 s0, 0x54442d18
s_mov_b32 s1, 0x3ff921fb
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_f64 v[8:9], v[8:9], v[10:11]
v_ashrrev_i32_e32 v11, 31, v5
v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_f64 v[8:9], -v[6:7], s[0:1]
s_mov_b32 s1, 0x400921fb
v_dual_cndmask_b32 v7, v7, v9 :: v_dual_cndmask_b32 v6, v6, v8
v_cmp_gt_i32_e32 vcc_lo, 0, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_add_f64 v[8:9], -v[6:7], s[0:1]
v_cmp_class_f64_e64 s1, v[2:3], 0x204
v_cmp_eq_f64_e64 s0, 0, v[2:3]
v_dual_mov_b32 v10, 0x7f3321d2 :: v_dual_cndmask_b32 v7, v7, v9
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_mov_b32 v9, 0x4002d97c :: v_dual_cndmask_b32 v6, v6, v8
v_cndmask_b32_e32 v10, 0x54442d18, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v8, 0x3fe921fb, v9, vcc_lo
s_and_b32 vcc_lo, s1, s4
v_bfi_b32 v8, 0x7fffffff, v8, v3
v_and_b32_e32 v12, 0x400921fb, v11
v_and_b32_e32 v11, 0x54442d18, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v7, v7, v12, s0
v_cndmask_b32_e64 v6, v6, v11, s0
s_getpc_b64 s[0:1]
s_add_u32 s0, s0, pi@rel32@lo+4
s_addc_u32 s1, s1, pi@rel32@hi+12
s_load_b32 s0, s[0:1], 0x0
v_dual_cndmask_b32 v7, v7, v8 :: v_dual_cndmask_b32 v6, v6, v10
v_cmp_o_f64_e32 vcc_lo, v[4:5], v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v5, 0x7ff80000, v7, vcc_lo
v_cndmask_b32_e32 v4, 0, v6, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_f32_f64_e32 v2, v[4:5]
v_bfi_b32 v2, 0x7fffffff, v2, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_f32_e32 v2, 0x43340000, v2
s_waitcnt lgkmcnt(0)
v_div_scale_f32 v3, null, s0, s0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v4, v3
s_waitcnt_depctr 0xfff
v_fma_f32 v5, -v3, v4, 1.0
v_fmac_f32_e32 v4, v5, v4
v_div_scale_f32 v5, vcc_lo, v2, s0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v6, v5, v4
v_fma_f32 v7, -v3, v6, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v6, v7, v4
v_fma_f32 v3, -v3, v6, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_div_fmas_f32 v3, v3, v4, v6
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
v_div_fixup_f32 v2, v3, s0, v2
s_delay_alu instid0(VALU_DEP_1)
v_cvt_f64_f32_e32 v[2:3], v2
global_store_b64 v[0:1], v[2:3], off
.LBB2_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9theta_gpuPdS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 18
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z9theta_gpuPdS_S_ii, .Lfunc_end2-_Z9theta_gpuPdS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected pi
.type pi,@object
.data
.globl pi
.p2align 2, 0x0
pi:
.long 0x4048f5c3
.size pi, 4
.protected sobelX
.type sobelX,@object
.globl sobelX
.p2align 4, 0x0
sobelX:
.long 1
.long 0
.long 4294967295
.long 2
.long 0
.long 4294967294
.long 1
.long 0
.long 4294967295
.size sobelX, 36
.protected sobelY
.type sobelY,@object
.globl sobelY
.p2align 4, 0x0
sobelY:
.long 1
.long 2
.long 1
.long 0
.long 0
.long 0
.long 4294967295
.long 4294967294
.long 4294967295
.size sobelY, 36
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym pi
.addrsig_sym sobelX
.addrsig_sym sobelY
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 8
.value_kind: by_value
- .offset: 32
.size: 8
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16NormalizeGrayGPUPdiiPhdd
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16NormalizeGrayGPUPdiiPhdd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15SobelFilter_gpuPhPdS0_S0_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15SobelFilter_gpuPhPdS0_S0_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 16
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9theta_gpuPdS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9theta_gpuPdS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 18
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
__device__ int compute_layer(float tx, float ty, float radius) {
int x = int(std::abs(tx) / radius + 0.5);
int y = int(std::abs(ty) / radius + 0.5);
if (x == 1 && y == 1)
return 2;
int c = 0;
if (x + y < 2)
c = x + y;
else
c = x + y + 1;
if (c > 5)
c = 5;
return c;
}
__global__ void five_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, float* out, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
const float* points_array = points + (b * num_points * num_featdim);
float* out_array = out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
float* out_temp = out_array + layer * num_featdim;
const float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] += point_temp[j] / layers_counts[layer];
}
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
float* out_temp = out_array + i * num_featdim;
float* out_front = out_array + front * num_featdim;
float* out_rear = out_array + rear * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] = out_front[j] * weight_front + out_rear[j] * weight_rear;
}
}
}
}
__global__ void fivegrad_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
float* points_array = grad_points + (b * num_points * num_featdim);
const float* out_array = grad_out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
float weights_front[6] = {0, 0, 0, 0, 0, 0};
float weights_rear[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
weights_front[i] = weight_front;
weights_rear[i] = weight_rear;
}
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
const float* out_temp = out_array + layer * num_featdim;
float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
float signal = out_temp[j];
int l = layer - 1;
const float* out_temp_step = out_temp - num_featdim;
while (l >= 0 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_rear[l];
out_temp_step -= num_featdim;
l -= 1;
}
l = layer + 1;
out_temp_step = out_temp + num_featdim;
while (l < 6 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_front[l];
out_temp_step += num_featdim;
l += 1;
}
atomicAdd(&point_temp[j], signal / layers_counts[layer]);
}
}
}
void fivekernel_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, float* out, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
five_kernel<<<dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat), dim3(256, 1, 1)>>>(batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, out, radius);
}
void fivekernelgrad_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
fivegrad_kernel<<<dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat), dim3(256, 1, 1)>>>(
batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, grad_out, grad_points, radius);
} | .file "tmpxft_00074846_00000000-6_tf_interpolate_g.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13compute_layerfff
.type _Z13compute_layerfff, @function
_Z13compute_layerfff:
.LFB3669:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE3669:
.size _Z13compute_layerfff, .-_Z13compute_layerfff
.globl _Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff
.type _Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff, @function
_Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff:
.LFB3696:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movl %edx, 52(%rsp)
movl %ecx, 48(%rsp)
movl %r8d, 44(%rsp)
movl %r9d, 40(%rsp)
movss %xmm0, 4(%rsp)
movq 240(%rsp), %rax
movq %rax, 32(%rsp)
movq 248(%rsp), %rax
movq %rax, 24(%rsp)
movq 256(%rsp), %rax
movq %rax, 16(%rsp)
movq 264(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 52(%rsp), %rax
movq %rax, 144(%rsp)
leaq 48(%rsp), %rax
movq %rax, 152(%rsp)
leaq 44(%rsp), %rax
movq %rax, 160(%rsp)
leaq 40(%rsp), %rax
movq %rax, 168(%rsp)
leaq 32(%rsp), %rax
movq %rax, 176(%rsp)
leaq 24(%rsp), %rax
movq %rax, 184(%rsp)
leaq 16(%rsp), %rax
movq %rax, 192(%rsp)
leaq 8(%rsp), %rax
movq %rax, 200(%rsp)
leaq 4(%rsp), %rax
movq %rax, 208(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z11five_kerneliiiiiiPKfS0_PKiPff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff, .-_Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff
.globl _Z11five_kerneliiiiiiPKfS0_PKiPff
.type _Z11five_kerneliiiiiiPKfS0_PKiPff, @function
_Z11five_kerneliiiiiiPKfS0_PKiPff:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 40(%rsp)
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
pushq 40(%rsp)
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z11five_kerneliiiiiiPKfS0_PKiPff, .-_Z11five_kerneliiiiiiPKfS0_PKiPff
.globl _Z14fivekernel_gpuiiiiiPKfS0_PKiPff
.type _Z14fivekernel_gpuiiiiiPKfS0_PKiPff, @function
_Z14fivekernel_gpuiiiiiPKfS0_PKiPff:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movl %edi, %ebp
movl %esi, 4(%rsp)
movl %edx, %r12d
movl %ecx, 8(%rsp)
movl %r8d, %ebx
movq %r9, %r13
movss %xmm0, 12(%rsp)
leal 255(%r8), %ecx
movl %ecx, %eax
cltd
idivl %r8d
movl %eax, %r14d
movl $256, 36(%rsp)
movl $1, 40(%rsp)
leal 510(%r8), %eax
testl %ecx, %ecx
cmovns %ecx, %eax
sarl $8, %eax
movl %eax, 24(%rsp)
movl %edi, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl %r14d, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L13:
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
leal -1(%r12,%r14), %eax
cltd
idivl %r14d
pushq 128(%rsp)
.cfi_def_cfa_offset 120
pushq 128(%rsp)
.cfi_def_cfa_offset 128
pushq 128(%rsp)
.cfi_def_cfa_offset 136
pushq %r13
.cfi_def_cfa_offset 144
movss 44(%rsp), %xmm0
movl %eax, %r9d
movl %ebx, %r8d
movl 40(%rsp), %ecx
movl %r12d, %edx
movl 36(%rsp), %esi
movl %ebp, %edi
call _Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff
addq $32, %rsp
.cfi_def_cfa_offset 112
jmp .L13
.cfi_endproc
.LFE3670:
.size _Z14fivekernel_gpuiiiiiPKfS0_PKiPff, .-_Z14fivekernel_gpuiiiiiPKfS0_PKiPff
.globl _Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff
.type _Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff, @function
_Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff:
.LFB3698:
.cfi_startproc
endbr64
subq $264, %rsp
.cfi_def_cfa_offset 272
movl %edi, 76(%rsp)
movl %esi, 72(%rsp)
movl %edx, 68(%rsp)
movl %ecx, 64(%rsp)
movl %r8d, 60(%rsp)
movl %r9d, 56(%rsp)
movss %xmm0, 12(%rsp)
movq 272(%rsp), %rax
movq %rax, 48(%rsp)
movq 280(%rsp), %rax
movq %rax, 40(%rsp)
movq 288(%rsp), %rax
movq %rax, 32(%rsp)
movq 296(%rsp), %rax
movq %rax, 24(%rsp)
movq 304(%rsp), %rax
movq %rax, 16(%rsp)
movq %fs:40, %rax
movq %rax, 248(%rsp)
xorl %eax, %eax
leaq 76(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rax
movq %rax, 152(%rsp)
leaq 68(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rax
movq %rax, 168(%rsp)
leaq 60(%rsp), %rax
movq %rax, 176(%rsp)
leaq 56(%rsp), %rax
movq %rax, 184(%rsp)
leaq 48(%rsp), %rax
movq %rax, 192(%rsp)
leaq 40(%rsp), %rax
movq %rax, 200(%rsp)
leaq 32(%rsp), %rax
movq %rax, 208(%rsp)
leaq 24(%rsp), %rax
movq %rax, 216(%rsp)
leaq 16(%rsp), %rax
movq %rax, 224(%rsp)
leaq 12(%rsp), %rax
movq %rax, 232(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
leaq 88(%rsp), %rcx
leaq 80(%rsp), %rdx
leaq 108(%rsp), %rsi
leaq 96(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $264, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 88(%rsp)
.cfi_def_cfa_offset 280
pushq 88(%rsp)
.cfi_def_cfa_offset 288
leaq 160(%rsp), %r9
movq 124(%rsp), %rcx
movl 132(%rsp), %r8d
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
leaq _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 272
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3698:
.size _Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff, .-_Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff
.globl _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.type _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, @function
_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff:
.LFB3699:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 56(%rsp)
.cfi_def_cfa_offset 32
pushq 56(%rsp)
.cfi_def_cfa_offset 40
pushq 56(%rsp)
.cfi_def_cfa_offset 48
pushq 56(%rsp)
.cfi_def_cfa_offset 56
pushq 56(%rsp)
.cfi_def_cfa_offset 64
call _Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, .-_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.globl _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff
.type _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff, @function
_Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff:
.LFB3671:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movl %edi, %ebp
movl %esi, 4(%rsp)
movl %edx, %r12d
movl %ecx, 8(%rsp)
movl %r8d, %ebx
movq %r9, %r13
movss %xmm0, 12(%rsp)
leal 255(%r8), %ecx
movl %ecx, %eax
cltd
idivl %r8d
movl %eax, %r14d
movl $256, 36(%rsp)
movl $1, 40(%rsp)
leal 510(%r8), %eax
testl %ecx, %ecx
cmovns %ecx, %eax
sarl $8, %eax
movl %eax, 24(%rsp)
movl %edi, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl %r14d, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L25:
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
leal -1(%r12,%r14), %eax
cltd
idivl %r14d
subq $8, %rsp
.cfi_def_cfa_offset 120
pushq 144(%rsp)
.cfi_def_cfa_offset 128
pushq 144(%rsp)
.cfi_def_cfa_offset 136
pushq 144(%rsp)
.cfi_def_cfa_offset 144
pushq 144(%rsp)
.cfi_def_cfa_offset 152
pushq %r13
.cfi_def_cfa_offset 160
movss 60(%rsp), %xmm0
movl %eax, %r9d
movl %ebx, %r8d
movl 56(%rsp), %ecx
movl %r12d, %edx
movl 52(%rsp), %esi
movl %ebp, %edi
call _Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff
addq $48, %rsp
.cfi_def_cfa_offset 112
jmp .L25
.cfi_endproc
.LFE3671:
.size _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff, .-_Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff"
.align 8
.LC1:
.string "_Z11five_kerneliiiiiiPKfS0_PKiPff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3701:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z11five_kerneliiiiiiPKfS0_PKiPff(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3701:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
__device__ int compute_layer(float tx, float ty, float radius) {
int x = int(std::abs(tx) / radius + 0.5);
int y = int(std::abs(ty) / radius + 0.5);
if (x == 1 && y == 1)
return 2;
int c = 0;
if (x + y < 2)
c = x + y;
else
c = x + y + 1;
if (c > 5)
c = 5;
return c;
}
__global__ void five_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, float* out, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
const float* points_array = points + (b * num_points * num_featdim);
float* out_array = out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
float* out_temp = out_array + layer * num_featdim;
const float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] += point_temp[j] / layers_counts[layer];
}
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
float* out_temp = out_array + i * num_featdim;
float* out_front = out_array + front * num_featdim;
float* out_rear = out_array + rear * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] = out_front[j] * weight_front + out_rear[j] * weight_rear;
}
}
}
}
__global__ void fivegrad_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
float* points_array = grad_points + (b * num_points * num_featdim);
const float* out_array = grad_out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
float weights_front[6] = {0, 0, 0, 0, 0, 0};
float weights_rear[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
weights_front[i] = weight_front;
weights_rear[i] = weight_rear;
}
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
const float* out_temp = out_array + layer * num_featdim;
float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
float signal = out_temp[j];
int l = layer - 1;
const float* out_temp_step = out_temp - num_featdim;
while (l >= 0 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_rear[l];
out_temp_step -= num_featdim;
l -= 1;
}
l = layer + 1;
out_temp_step = out_temp + num_featdim;
while (l < 6 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_front[l];
out_temp_step += num_featdim;
l += 1;
}
atomicAdd(&point_temp[j], signal / layers_counts[layer]);
}
}
}
void fivekernel_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, float* out, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
five_kernel<<<dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat), dim3(256, 1, 1)>>>(batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, out, radius);
}
void fivekernelgrad_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
fivegrad_kernel<<<dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat), dim3(256, 1, 1)>>>(
batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, grad_out, grad_points, radius);
} | #include <hip/hip_runtime.h>
#include <iostream>
__device__ int compute_layer(float tx, float ty, float radius) {
int x = int(std::abs(tx) / radius + 0.5);
int y = int(std::abs(ty) / radius + 0.5);
if (x == 1 && y == 1)
return 2;
int c = 0;
if (x + y < 2)
c = x + y;
else
c = x + y + 1;
if (c > 5)
c = 5;
return c;
}
__global__ void five_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, float* out, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
const float* points_array = points + (b * num_points * num_featdim);
float* out_array = out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
float* out_temp = out_array + layer * num_featdim;
const float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] += point_temp[j] / layers_counts[layer];
}
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
float* out_temp = out_array + i * num_featdim;
float* out_front = out_array + front * num_featdim;
float* out_rear = out_array + rear * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] = out_front[j] * weight_front + out_rear[j] * weight_rear;
}
}
}
}
__global__ void fivegrad_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
float* points_array = grad_points + (b * num_points * num_featdim);
const float* out_array = grad_out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
float weights_front[6] = {0, 0, 0, 0, 0, 0};
float weights_rear[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
weights_front[i] = weight_front;
weights_rear[i] = weight_rear;
}
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
const float* out_temp = out_array + layer * num_featdim;
float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
float signal = out_temp[j];
int l = layer - 1;
const float* out_temp_step = out_temp - num_featdim;
while (l >= 0 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_rear[l];
out_temp_step -= num_featdim;
l -= 1;
}
l = layer + 1;
out_temp_step = out_temp + num_featdim;
while (l < 6 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_front[l];
out_temp_step += num_featdim;
l += 1;
}
atomicAdd(&point_temp[j], signal / layers_counts[layer]);
}
}
}
void fivekernel_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, float* out, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
five_kernel<<<dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat), dim3(256, 1, 1)>>>(batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, out, radius);
}
void fivekernelgrad_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
fivegrad_kernel<<<dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat), dim3(256, 1, 1)>>>(
batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, grad_out, grad_points, radius);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
__device__ int compute_layer(float tx, float ty, float radius) {
int x = int(std::abs(tx) / radius + 0.5);
int y = int(std::abs(ty) / radius + 0.5);
if (x == 1 && y == 1)
return 2;
int c = 0;
if (x + y < 2)
c = x + y;
else
c = x + y + 1;
if (c > 5)
c = 5;
return c;
}
__global__ void five_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, float* out, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
const float* points_array = points + (b * num_points * num_featdim);
float* out_array = out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
float* out_temp = out_array + layer * num_featdim;
const float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] += point_temp[j] / layers_counts[layer];
}
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
float* out_temp = out_array + i * num_featdim;
float* out_front = out_array + front * num_featdim;
float* out_rear = out_array + rear * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] = out_front[j] * weight_front + out_rear[j] * weight_rear;
}
}
}
}
__global__ void fivegrad_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
float* points_array = grad_points + (b * num_points * num_featdim);
const float* out_array = grad_out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
float weights_front[6] = {0, 0, 0, 0, 0, 0};
float weights_rear[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
weights_front[i] = weight_front;
weights_rear[i] = weight_rear;
}
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
const float* out_temp = out_array + layer * num_featdim;
float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
float signal = out_temp[j];
int l = layer - 1;
const float* out_temp_step = out_temp - num_featdim;
while (l >= 0 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_rear[l];
out_temp_step -= num_featdim;
l -= 1;
}
l = layer + 1;
out_temp_step = out_temp + num_featdim;
while (l < 6 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_front[l];
out_temp_step += num_featdim;
l += 1;
}
atomicAdd(&point_temp[j], signal / layers_counts[layer]);
}
}
}
void fivekernel_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, float* out, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
five_kernel<<<dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat), dim3(256, 1, 1)>>>(batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, out, radius);
}
void fivekernelgrad_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
fivegrad_kernel<<<dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat), dim3(256, 1, 1)>>>(
batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, grad_out, grad_points, radius);
} | .text
.file "tf_interpolate_g.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff # -- Begin function _Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff
.p2align 4, 0x90
.type _Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff,@function
_Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff: # @_Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movss %xmm0, 4(%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 176(%rsp), %rax
movq %rax, 128(%rsp)
leaq 184(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 4(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11five_kerneliiiiiiPKfS0_PKiPff, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff, .Lfunc_end0-_Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff
.cfi_endproc
# -- End function
.globl _Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff # -- Begin function _Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.p2align 4, 0x90
.type _Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff,@function
_Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff: # @_Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movss %xmm0, 4(%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 192(%rsp), %rax
movq %rax, 128(%rsp)
leaq 200(%rsp), %rax
movq %rax, 136(%rsp)
leaq 208(%rsp), %rax
movq %rax, 144(%rsp)
leaq 216(%rsp), %rax
movq %rax, 152(%rsp)
leaq 224(%rsp), %rax
movq %rax, 160(%rsp)
leaq 4(%rsp), %rax
movq %rax, 168(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end1:
.size _Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, .Lfunc_end1-_Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.cfi_endproc
# -- End function
.globl _Z14fivekernel_gpuiiiiiPKfS0_PKiPff # -- Begin function _Z14fivekernel_gpuiiiiiPKfS0_PKiPff
.p2align 4, 0x90
.type _Z14fivekernel_gpuiiiiiPKfS0_PKiPff,@function
_Z14fivekernel_gpuiiiiiPKfS0_PKiPff: # @_Z14fivekernel_gpuiiiiiPKfS0_PKiPff
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movss %xmm0, 8(%rsp) # 4-byte Spill
movq %r9, 40(%rsp) # 8-byte Spill
movl %r8d, %r14d
movl %ecx, %ebp
movl %edx, %r15d
movl %esi, %r12d
movl %edi, %r13d
leal 255(%r14), %ecx
movl %ecx, %eax
cltd
idivl %r8d
movl %eax, %ebx
leal 510(%r14), %edi
testl %ecx, %ecx
cmovnsl %ecx, %edi
sarl $8, %edi
movq %r13, %rax
shlq $32, %rax
orq %rax, %rdi
movabsq $4294967552, %rdx # imm = 0x100000100
movl %ebx, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movq 288(%rsp), %rcx
movq 280(%rsp), %rsi
movq 272(%rsp), %rdi
leal (%r15,%rbx), %eax
decl %eax
cltd
idivl %ebx
movl %r13d, 36(%rsp)
movl %r12d, 32(%rsp)
movl %r15d, 28(%rsp)
movl %ebp, 24(%rsp)
movl %r14d, 20(%rsp)
movl %eax, 16(%rsp)
movq 40(%rsp), %rax # 8-byte Reload
movq %rax, 120(%rsp)
movq %rdi, 112(%rsp)
movq %rsi, 104(%rsp)
movq %rcx, 96(%rsp)
movss 8(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 12(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 28(%rsp), %rax
movq %rax, 144(%rsp)
leaq 24(%rsp), %rax
movq %rax, 152(%rsp)
leaq 20(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 120(%rsp), %rax
movq %rax, 176(%rsp)
leaq 112(%rsp), %rax
movq %rax, 184(%rsp)
leaq 104(%rsp), %rax
movq %rax, 192(%rsp)
leaq 96(%rsp), %rax
movq %rax, 200(%rsp)
leaq 12(%rsp), %rax
movq %rax, 208(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z11five_kerneliiiiiiPKfS0_PKiPff, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z14fivekernel_gpuiiiiiPKfS0_PKiPff, .Lfunc_end2-_Z14fivekernel_gpuiiiiiPKfS0_PKiPff
.cfi_endproc
# -- End function
.globl _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff # -- Begin function _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff
.p2align 4, 0x90
.type _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff,@function
_Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff: # @_Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movss %xmm0, (%rsp) # 4-byte Spill
movq %r9, 32(%rsp) # 8-byte Spill
movl %r8d, %r14d
movl %ecx, %ebp
movl %edx, %r15d
movl %esi, %r12d
movl %edi, %r13d
leal 255(%r14), %ecx
movl %ecx, %eax
cltd
idivl %r8d
movl %eax, %ebx
leal 510(%r14), %edi
testl %ecx, %ecx
cmovnsl %ecx, %edi
sarl $8, %edi
movq %r13, %rax
shlq $32, %rax
orq %rax, %rdi
movabsq $4294967552, %rdx # imm = 0x100000100
movl %ebx, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq 312(%rsp), %rcx
movq 304(%rsp), %rsi
movq 296(%rsp), %rdi
movq 288(%rsp), %r8
leal (%r15,%rbx), %eax
decl %eax
cltd
idivl %ebx
movl %r13d, 28(%rsp)
movl %r12d, 24(%rsp)
movl %r15d, 20(%rsp)
movl %ebp, 16(%rsp)
movl %r14d, 12(%rsp)
movl %eax, 8(%rsp)
movq 32(%rsp), %rax # 8-byte Reload
movq %rax, 120(%rsp)
movq %r8, 112(%rsp)
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rcx, 88(%rsp)
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 4(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 12(%rsp), %rax
movq %rax, 160(%rsp)
leaq 8(%rsp), %rax
movq %rax, 168(%rsp)
leaq 120(%rsp), %rax
movq %rax, 176(%rsp)
leaq 112(%rsp), %rax
movq %rax, 184(%rsp)
leaq 104(%rsp), %rax
movq %rax, 192(%rsp)
leaq 96(%rsp), %rax
movq %rax, 200(%rsp)
leaq 88(%rsp), %rax
movq %rax, 208(%rsp)
leaq 4(%rsp), %rax
movq %rax, 216(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff, .Lfunc_end3-_Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11five_kerneliiiiiiPKfS0_PKiPff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11five_kerneliiiiiiPKfS0_PKiPff,@object # @_Z11five_kerneliiiiiiPKfS0_PKiPff
.section .rodata,"a",@progbits
.globl _Z11five_kerneliiiiiiPKfS0_PKiPff
.p2align 3, 0x0
_Z11five_kerneliiiiiiPKfS0_PKiPff:
.quad _Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff
.size _Z11five_kerneliiiiiiPKfS0_PKiPff, 8
.type _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff,@object # @_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.globl _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.p2align 3, 0x0
_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff:
.quad _Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.size _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z11five_kerneliiiiiiPKfS0_PKiPff"
.size .L__unnamed_1, 34
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff"
.size .L__unnamed_2, 41
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff
.addrsig_sym _Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11five_kerneliiiiiiPKfS0_PKiPff
.addrsig_sym _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00074846_00000000-6_tf_interpolate_g.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13compute_layerfff
.type _Z13compute_layerfff, @function
_Z13compute_layerfff:
.LFB3669:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE3669:
.size _Z13compute_layerfff, .-_Z13compute_layerfff
.globl _Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff
.type _Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff, @function
_Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff:
.LFB3696:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movl %edx, 52(%rsp)
movl %ecx, 48(%rsp)
movl %r8d, 44(%rsp)
movl %r9d, 40(%rsp)
movss %xmm0, 4(%rsp)
movq 240(%rsp), %rax
movq %rax, 32(%rsp)
movq 248(%rsp), %rax
movq %rax, 24(%rsp)
movq 256(%rsp), %rax
movq %rax, 16(%rsp)
movq 264(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 52(%rsp), %rax
movq %rax, 144(%rsp)
leaq 48(%rsp), %rax
movq %rax, 152(%rsp)
leaq 44(%rsp), %rax
movq %rax, 160(%rsp)
leaq 40(%rsp), %rax
movq %rax, 168(%rsp)
leaq 32(%rsp), %rax
movq %rax, 176(%rsp)
leaq 24(%rsp), %rax
movq %rax, 184(%rsp)
leaq 16(%rsp), %rax
movq %rax, 192(%rsp)
leaq 8(%rsp), %rax
movq %rax, 200(%rsp)
leaq 4(%rsp), %rax
movq %rax, 208(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z11five_kerneliiiiiiPKfS0_PKiPff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff, .-_Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff
.globl _Z11five_kerneliiiiiiPKfS0_PKiPff
.type _Z11five_kerneliiiiiiPKfS0_PKiPff, @function
_Z11five_kerneliiiiiiPKfS0_PKiPff:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 40(%rsp)
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
pushq 40(%rsp)
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z11five_kerneliiiiiiPKfS0_PKiPff, .-_Z11five_kerneliiiiiiPKfS0_PKiPff
.globl _Z14fivekernel_gpuiiiiiPKfS0_PKiPff
.type _Z14fivekernel_gpuiiiiiPKfS0_PKiPff, @function
_Z14fivekernel_gpuiiiiiPKfS0_PKiPff:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movl %edi, %ebp
movl %esi, 4(%rsp)
movl %edx, %r12d
movl %ecx, 8(%rsp)
movl %r8d, %ebx
movq %r9, %r13
movss %xmm0, 12(%rsp)
leal 255(%r8), %ecx
movl %ecx, %eax
cltd
idivl %r8d
movl %eax, %r14d
movl $256, 36(%rsp)
movl $1, 40(%rsp)
leal 510(%r8), %eax
testl %ecx, %ecx
cmovns %ecx, %eax
sarl $8, %eax
movl %eax, 24(%rsp)
movl %edi, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl %r14d, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L13:
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
leal -1(%r12,%r14), %eax
cltd
idivl %r14d
pushq 128(%rsp)
.cfi_def_cfa_offset 120
pushq 128(%rsp)
.cfi_def_cfa_offset 128
pushq 128(%rsp)
.cfi_def_cfa_offset 136
pushq %r13
.cfi_def_cfa_offset 144
movss 44(%rsp), %xmm0
movl %eax, %r9d
movl %ebx, %r8d
movl 40(%rsp), %ecx
movl %r12d, %edx
movl 36(%rsp), %esi
movl %ebp, %edi
call _Z47__device_stub__Z11five_kerneliiiiiiPKfS0_PKiPffiiiiiiPKfS0_PKiPff
addq $32, %rsp
.cfi_def_cfa_offset 112
jmp .L13
.cfi_endproc
.LFE3670:
.size _Z14fivekernel_gpuiiiiiPKfS0_PKiPff, .-_Z14fivekernel_gpuiiiiiPKfS0_PKiPff
.globl _Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff
.type _Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff, @function
_Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff:
.LFB3698:
.cfi_startproc
endbr64
subq $264, %rsp
.cfi_def_cfa_offset 272
movl %edi, 76(%rsp)
movl %esi, 72(%rsp)
movl %edx, 68(%rsp)
movl %ecx, 64(%rsp)
movl %r8d, 60(%rsp)
movl %r9d, 56(%rsp)
movss %xmm0, 12(%rsp)
movq 272(%rsp), %rax
movq %rax, 48(%rsp)
movq 280(%rsp), %rax
movq %rax, 40(%rsp)
movq 288(%rsp), %rax
movq %rax, 32(%rsp)
movq 296(%rsp), %rax
movq %rax, 24(%rsp)
movq 304(%rsp), %rax
movq %rax, 16(%rsp)
movq %fs:40, %rax
movq %rax, 248(%rsp)
xorl %eax, %eax
leaq 76(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rax
movq %rax, 152(%rsp)
leaq 68(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rax
movq %rax, 168(%rsp)
leaq 60(%rsp), %rax
movq %rax, 176(%rsp)
leaq 56(%rsp), %rax
movq %rax, 184(%rsp)
leaq 48(%rsp), %rax
movq %rax, 192(%rsp)
leaq 40(%rsp), %rax
movq %rax, 200(%rsp)
leaq 32(%rsp), %rax
movq %rax, 208(%rsp)
leaq 24(%rsp), %rax
movq %rax, 216(%rsp)
leaq 16(%rsp), %rax
movq %rax, 224(%rsp)
leaq 12(%rsp), %rax
movq %rax, 232(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
leaq 88(%rsp), %rcx
leaq 80(%rsp), %rdx
leaq 108(%rsp), %rsi
leaq 96(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $264, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 88(%rsp)
.cfi_def_cfa_offset 280
pushq 88(%rsp)
.cfi_def_cfa_offset 288
leaq 160(%rsp), %r9
movq 124(%rsp), %rcx
movl 132(%rsp), %r8d
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
leaq _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 272
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3698:
.size _Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff, .-_Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff
.globl _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.type _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, @function
_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff:
.LFB3699:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 56(%rsp)
.cfi_def_cfa_offset 32
pushq 56(%rsp)
.cfi_def_cfa_offset 40
pushq 56(%rsp)
.cfi_def_cfa_offset 48
pushq 56(%rsp)
.cfi_def_cfa_offset 56
pushq 56(%rsp)
.cfi_def_cfa_offset 64
call _Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, .-_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.globl _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff
.type _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff, @function
_Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff:
.LFB3671:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movl %edi, %ebp
movl %esi, 4(%rsp)
movl %edx, %r12d
movl %ecx, 8(%rsp)
movl %r8d, %ebx
movq %r9, %r13
movss %xmm0, 12(%rsp)
leal 255(%r8), %ecx
movl %ecx, %eax
cltd
idivl %r8d
movl %eax, %r14d
movl $256, 36(%rsp)
movl $1, 40(%rsp)
leal 510(%r8), %eax
testl %ecx, %ecx
cmovns %ecx, %eax
sarl $8, %eax
movl %eax, 24(%rsp)
movl %edi, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl %r14d, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L25:
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
leal -1(%r12,%r14), %eax
cltd
idivl %r14d
subq $8, %rsp
.cfi_def_cfa_offset 120
pushq 144(%rsp)
.cfi_def_cfa_offset 128
pushq 144(%rsp)
.cfi_def_cfa_offset 136
pushq 144(%rsp)
.cfi_def_cfa_offset 144
pushq 144(%rsp)
.cfi_def_cfa_offset 152
pushq %r13
.cfi_def_cfa_offset 160
movss 60(%rsp), %xmm0
movl %eax, %r9d
movl %ebx, %r8d
movl 56(%rsp), %ecx
movl %r12d, %edx
movl 52(%rsp), %esi
movl %ebp, %edi
call _Z54__device_stub__Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_PffiiiiiiPKfS0_PKiS0_Pff
addq $48, %rsp
.cfi_def_cfa_offset 112
jmp .L25
.cfi_endproc
.LFE3671:
.size _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff, .-_Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff"
.align 8
.LC1:
.string "_Z11five_kerneliiiiiiPKfS0_PKiPff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3701:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z11five_kerneliiiiiiPKfS0_PKiPff(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3701:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "tf_interpolate_g.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff # -- Begin function _Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff
.p2align 4, 0x90
.type _Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff,@function
_Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff: # @_Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movss %xmm0, 4(%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 176(%rsp), %rax
movq %rax, 128(%rsp)
leaq 184(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 4(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11five_kerneliiiiiiPKfS0_PKiPff, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff, .Lfunc_end0-_Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff
.cfi_endproc
# -- End function
.globl _Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff # -- Begin function _Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.p2align 4, 0x90
.type _Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff,@function
_Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff: # @_Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movss %xmm0, 4(%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 192(%rsp), %rax
movq %rax, 128(%rsp)
leaq 200(%rsp), %rax
movq %rax, 136(%rsp)
leaq 208(%rsp), %rax
movq %rax, 144(%rsp)
leaq 216(%rsp), %rax
movq %rax, 152(%rsp)
leaq 224(%rsp), %rax
movq %rax, 160(%rsp)
leaq 4(%rsp), %rax
movq %rax, 168(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end1:
.size _Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, .Lfunc_end1-_Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.cfi_endproc
# -- End function
.globl _Z14fivekernel_gpuiiiiiPKfS0_PKiPff # -- Begin function _Z14fivekernel_gpuiiiiiPKfS0_PKiPff
.p2align 4, 0x90
.type _Z14fivekernel_gpuiiiiiPKfS0_PKiPff,@function
_Z14fivekernel_gpuiiiiiPKfS0_PKiPff: # @_Z14fivekernel_gpuiiiiiPKfS0_PKiPff
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movss %xmm0, 8(%rsp) # 4-byte Spill
movq %r9, 40(%rsp) # 8-byte Spill
movl %r8d, %r14d
movl %ecx, %ebp
movl %edx, %r15d
movl %esi, %r12d
movl %edi, %r13d
leal 255(%r14), %ecx
movl %ecx, %eax
cltd
idivl %r8d
movl %eax, %ebx
leal 510(%r14), %edi
testl %ecx, %ecx
cmovnsl %ecx, %edi
sarl $8, %edi
movq %r13, %rax
shlq $32, %rax
orq %rax, %rdi
movabsq $4294967552, %rdx # imm = 0x100000100
movl %ebx, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movq 288(%rsp), %rcx
movq 280(%rsp), %rsi
movq 272(%rsp), %rdi
leal (%r15,%rbx), %eax
decl %eax
cltd
idivl %ebx
movl %r13d, 36(%rsp)
movl %r12d, 32(%rsp)
movl %r15d, 28(%rsp)
movl %ebp, 24(%rsp)
movl %r14d, 20(%rsp)
movl %eax, 16(%rsp)
movq 40(%rsp), %rax # 8-byte Reload
movq %rax, 120(%rsp)
movq %rdi, 112(%rsp)
movq %rsi, 104(%rsp)
movq %rcx, 96(%rsp)
movss 8(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 12(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 28(%rsp), %rax
movq %rax, 144(%rsp)
leaq 24(%rsp), %rax
movq %rax, 152(%rsp)
leaq 20(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 120(%rsp), %rax
movq %rax, 176(%rsp)
leaq 112(%rsp), %rax
movq %rax, 184(%rsp)
leaq 104(%rsp), %rax
movq %rax, 192(%rsp)
leaq 96(%rsp), %rax
movq %rax, 200(%rsp)
leaq 12(%rsp), %rax
movq %rax, 208(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z11five_kerneliiiiiiPKfS0_PKiPff, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z14fivekernel_gpuiiiiiPKfS0_PKiPff, .Lfunc_end2-_Z14fivekernel_gpuiiiiiPKfS0_PKiPff
.cfi_endproc
# -- End function
.globl _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff # -- Begin function _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff
.p2align 4, 0x90
.type _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff,@function
_Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff: # @_Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movss %xmm0, (%rsp) # 4-byte Spill
movq %r9, 32(%rsp) # 8-byte Spill
movl %r8d, %r14d
movl %ecx, %ebp
movl %edx, %r15d
movl %esi, %r12d
movl %edi, %r13d
leal 255(%r14), %ecx
movl %ecx, %eax
cltd
idivl %r8d
movl %eax, %ebx
leal 510(%r14), %edi
testl %ecx, %ecx
cmovnsl %ecx, %edi
sarl $8, %edi
movq %r13, %rax
shlq $32, %rax
orq %rax, %rdi
movabsq $4294967552, %rdx # imm = 0x100000100
movl %ebx, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq 312(%rsp), %rcx
movq 304(%rsp), %rsi
movq 296(%rsp), %rdi
movq 288(%rsp), %r8
leal (%r15,%rbx), %eax
decl %eax
cltd
idivl %ebx
movl %r13d, 28(%rsp)
movl %r12d, 24(%rsp)
movl %r15d, 20(%rsp)
movl %ebp, 16(%rsp)
movl %r14d, 12(%rsp)
movl %eax, 8(%rsp)
movq 32(%rsp), %rax # 8-byte Reload
movq %rax, 120(%rsp)
movq %r8, 112(%rsp)
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rcx, 88(%rsp)
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 4(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 12(%rsp), %rax
movq %rax, 160(%rsp)
leaq 8(%rsp), %rax
movq %rax, 168(%rsp)
leaq 120(%rsp), %rax
movq %rax, 176(%rsp)
leaq 112(%rsp), %rax
movq %rax, 184(%rsp)
leaq 104(%rsp), %rax
movq %rax, 192(%rsp)
leaq 96(%rsp), %rax
movq %rax, 200(%rsp)
leaq 88(%rsp), %rax
movq %rax, 208(%rsp)
leaq 4(%rsp), %rax
movq %rax, 216(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff, .Lfunc_end3-_Z18fivekernelgrad_gpuiiiiiPKfS0_PKiS0_Pff
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11five_kerneliiiiiiPKfS0_PKiPff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11five_kerneliiiiiiPKfS0_PKiPff,@object # @_Z11five_kerneliiiiiiPKfS0_PKiPff
.section .rodata,"a",@progbits
.globl _Z11five_kerneliiiiiiPKfS0_PKiPff
.p2align 3, 0x0
_Z11five_kerneliiiiiiPKfS0_PKiPff:
.quad _Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff
.size _Z11five_kerneliiiiiiPKfS0_PKiPff, 8
.type _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff,@object # @_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.globl _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.p2align 3, 0x0
_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff:
.quad _Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.size _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z11five_kerneliiiiiiPKfS0_PKiPff"
.size .L__unnamed_1, 34
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff"
.size .L__unnamed_2, 41
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__five_kerneliiiiiiPKfS0_PKiPff
.addrsig_sym _Z30__device_stub__fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11five_kerneliiiiiiPKfS0_PKiPff
.addrsig_sym _Z15fivegrad_kerneliiiiiiPKfS0_PKiS0_Pff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */
#include <stdio.h>
#include <stdlib.h>
#define N (1000*1000*8)
/* Calculate SAXPY, single-precision vector math */
/* y[i]=a*x[i]+y[i] */
__global__
void saxpy (int n, float a, float *x, float *y) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
/* Only run calculation if we are in range */
/* where i is valid. It can be out of range */
/* if our vector is shorter than a */
/* multiple of the blocksize */
if (i<n) {
y[i]=a*x[i]+y[i];
}
}
int main(int argc, char **argv) {
int i;
float *x, *y, *dev_x, *dev_y;
float a;
float loops=1.0;
if (argc>1) {
loops=atof(argv[1]);
}
/* Allocate vectors on CPU */
x=(float *)malloc(N*sizeof(float));
y=(float *)malloc(N*sizeof(float));
/* Allocate vectors on GPU */
cudaMalloc((void **)&dev_x,N*sizeof(float));
cudaMalloc((void **)&dev_y,N*sizeof(float));
/* Initialize the host vectors */
for(i=0;i<N;i++) {
x[i]=(float)i;
y[i]=(float)(10.0*i);
}
cudaMemcpy(dev_x,x,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_y,y,N*sizeof(float),cudaMemcpyHostToDevice);
printf("Size: %d\n",(N+255)/256);
/* Perform SAXPY */
for(a=0;a<loops;a+=1.0) {
saxpy<<<(N+255)/256,256>>>(N,a,dev_x,dev_y);
}
cudaMemcpy(y,dev_y,N*sizeof(float),cudaMemcpyDeviceToHost);
/* results */
i=100;
printf("y[%d]=%f %f\n",i,y[i],y[i+1]);
/* 0: a=0, x=100, y=1000 ::::::: y=1000 */
/* 1: a=1, x=100, y=1000 ::::::: y=1100 */
/* 2: a=2, x=100, y=1100 ::::::: y=1300 */
/* 3: a=3, x=100, y=1300 ::::::: y=1600 */
/* 4: a=4, x=100, y=1600 ::::::: y=2000 */
cudaFree(dev_x);
cudaFree(dev_y);
return 0;
} | code for sm_80
Function : _Z5saxpyifPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fe400078e0205 */
/*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ FFMA R7, R2, c[0x0][0x164], R7 ; /* 0x0000590002077a23 */
/* 0x004fca0000000007 */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */
#include <stdio.h>
#include <stdlib.h>
#define N (1000*1000*8)
/* Calculate SAXPY, single-precision vector math */
/* y[i]=a*x[i]+y[i] */
__global__
void saxpy (int n, float a, float *x, float *y) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
/* Only run calculation if we are in range */
/* where i is valid. It can be out of range */
/* if our vector is shorter than a */
/* multiple of the blocksize */
if (i<n) {
y[i]=a*x[i]+y[i];
}
}
int main(int argc, char **argv) {
int i;
float *x, *y, *dev_x, *dev_y;
float a;
float loops=1.0;
if (argc>1) {
loops=atof(argv[1]);
}
/* Allocate vectors on CPU */
x=(float *)malloc(N*sizeof(float));
y=(float *)malloc(N*sizeof(float));
/* Allocate vectors on GPU */
cudaMalloc((void **)&dev_x,N*sizeof(float));
cudaMalloc((void **)&dev_y,N*sizeof(float));
/* Initialize the host vectors */
for(i=0;i<N;i++) {
x[i]=(float)i;
y[i]=(float)(10.0*i);
}
cudaMemcpy(dev_x,x,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_y,y,N*sizeof(float),cudaMemcpyHostToDevice);
printf("Size: %d\n",(N+255)/256);
/* Perform SAXPY */
for(a=0;a<loops;a+=1.0) {
saxpy<<<(N+255)/256,256>>>(N,a,dev_x,dev_y);
}
cudaMemcpy(y,dev_y,N*sizeof(float),cudaMemcpyDeviceToHost);
/* results */
i=100;
printf("y[%d]=%f %f\n",i,y[i],y[i+1]);
/* 0: a=0, x=100, y=1000 ::::::: y=1000 */
/* 1: a=1, x=100, y=1000 ::::::: y=1100 */
/* 2: a=2, x=100, y=1100 ::::::: y=1300 */
/* 3: a=3, x=100, y=1300 ::::::: y=1600 */
/* 4: a=4, x=100, y=1600 ::::::: y=2000 */
cudaFree(dev_x);
cudaFree(dev_y);
return 0;
} | .file "tmpxft_000fa165_00000000-6_saxpy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z28__device_stub__Z5saxpyifPfS_ifPfS_
.type _Z28__device_stub__Z5saxpyifPfS_ifPfS_, @function
_Z28__device_stub__Z5saxpyifPfS_ifPfS_:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movss %xmm0, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5saxpyifPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z28__device_stub__Z5saxpyifPfS_ifPfS_, .-_Z28__device_stub__Z5saxpyifPfS_ifPfS_
.globl _Z5saxpyifPfS_
.type _Z5saxpyifPfS_, @function
_Z5saxpyifPfS_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z5saxpyifPfS_ifPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z5saxpyifPfS_, .-_Z5saxpyifPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "Size: %d\n"
.LC4:
.string "y[%d]=%f %f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl .LC0(%rip), %r12d
cmpl $1, %edi
jg .L25
.L12:
movl $32000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $32000000, %edi
call malloc@PLT
movq %rax, %rbx
leaq 16(%rsp), %rdi
movl $32000000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $32000000, %esi
call cudaMalloc@PLT
movl $0, %eax
movsd .LC2(%rip), %xmm1
.L13:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
mulsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%rbx,%rax,4)
addq $1, %rax
cmpq $8000000, %rax
jne .L13
movl $1, %ecx
movl $32000000, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $32000000, %edx
movq %rbx, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $31250, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0x00000000, 12(%rsp)
pxor %xmm0, %xmm0
movd %r12d, %xmm4
comiss %xmm0, %xmm4
ja .L17
.L14:
movl $2, %ecx
movl $32000000, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
pxor %xmm0, %xmm0
cvtss2sd 400(%rbx), %xmm0
pxor %xmm1, %xmm1
cvtss2sd 404(%rbx), %xmm1
movl $100, %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movq 8(%rsi), %rdi
movl $0, %esi
call strtod@PLT
pxor %xmm5, %xmm5
cvtsd2ss %xmm0, %xmm5
movd %xmm5, %r12d
jmp .L12
.L16:
movss 12(%rsp), %xmm2
addss .LC0(%rip), %xmm2
movss %xmm2, 12(%rsp)
movd %r12d, %xmm3
comiss %xmm2, %xmm3
jbe .L14
.L17:
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $31250, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L16
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movss 12(%rsp), %xmm0
movl $8000000, %edi
call _Z28__device_stub__Z5saxpyifPfS_ifPfS_
jmp .L16
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z5saxpyifPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z5saxpyifPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 0
.long 1076101120
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */
#include <stdio.h>
#include <stdlib.h>
#define N (1000*1000*8)
/* Calculate SAXPY, single-precision vector math */
/* y[i]=a*x[i]+y[i] */
__global__
void saxpy (int n, float a, float *x, float *y) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
/* Only run calculation if we are in range */
/* where i is valid. It can be out of range */
/* if our vector is shorter than a */
/* multiple of the blocksize */
if (i<n) {
y[i]=a*x[i]+y[i];
}
}
int main(int argc, char **argv) {
int i;
float *x, *y, *dev_x, *dev_y;
float a;
float loops=1.0;
if (argc>1) {
loops=atof(argv[1]);
}
/* Allocate vectors on CPU */
x=(float *)malloc(N*sizeof(float));
y=(float *)malloc(N*sizeof(float));
/* Allocate vectors on GPU */
cudaMalloc((void **)&dev_x,N*sizeof(float));
cudaMalloc((void **)&dev_y,N*sizeof(float));
/* Initialize the host vectors */
for(i=0;i<N;i++) {
x[i]=(float)i;
y[i]=(float)(10.0*i);
}
cudaMemcpy(dev_x,x,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_y,y,N*sizeof(float),cudaMemcpyHostToDevice);
printf("Size: %d\n",(N+255)/256);
/* Perform SAXPY */
for(a=0;a<loops;a+=1.0) {
saxpy<<<(N+255)/256,256>>>(N,a,dev_x,dev_y);
}
cudaMemcpy(y,dev_y,N*sizeof(float),cudaMemcpyDeviceToHost);
/* results */
i=100;
printf("y[%d]=%f %f\n",i,y[i],y[i+1]);
/* 0: a=0, x=100, y=1000 ::::::: y=1000 */
/* 1: a=1, x=100, y=1000 ::::::: y=1100 */
/* 2: a=2, x=100, y=1100 ::::::: y=1300 */
/* 3: a=3, x=100, y=1300 ::::::: y=1600 */
/* 4: a=4, x=100, y=1600 ::::::: y=2000 */
cudaFree(dev_x);
cudaFree(dev_y);
return 0;
} | /* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N (1000*1000*8)
/* Calculate SAXPY, single-precision vector math */
/* y[i]=a*x[i]+y[i] */
__global__
void saxpy (int n, float a, float *x, float *y) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
/* Only run calculation if we are in range */
/* where i is valid. It can be out of range */
/* if our vector is shorter than a */
/* multiple of the blocksize */
if (i<n) {
y[i]=a*x[i]+y[i];
}
}
int main(int argc, char **argv) {
int i;
float *x, *y, *dev_x, *dev_y;
float a;
float loops=1.0;
if (argc>1) {
loops=atof(argv[1]);
}
/* Allocate vectors on CPU */
x=(float *)malloc(N*sizeof(float));
y=(float *)malloc(N*sizeof(float));
/* Allocate vectors on GPU */
hipMalloc((void **)&dev_x,N*sizeof(float));
hipMalloc((void **)&dev_y,N*sizeof(float));
/* Initialize the host vectors */
for(i=0;i<N;i++) {
x[i]=(float)i;
y[i]=(float)(10.0*i);
}
hipMemcpy(dev_x,x,N*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(dev_y,y,N*sizeof(float),hipMemcpyHostToDevice);
printf("Size: %d\n",(N+255)/256);
/* Perform SAXPY */
for(a=0;a<loops;a+=1.0) {
saxpy<<<(N+255)/256,256>>>(N,a,dev_x,dev_y);
}
hipMemcpy(y,dev_y,N*sizeof(float),hipMemcpyDeviceToHost);
/* results */
i=100;
printf("y[%d]=%f %f\n",i,y[i],y[i+1]);
/* 0: a=0, x=100, y=1000 ::::::: y=1000 */
/* 1: a=1, x=100, y=1000 ::::::: y=1100 */
/* 2: a=2, x=100, y=1100 ::::::: y=1300 */
/* 3: a=3, x=100, y=1300 ::::::: y=1600 */
/* 4: a=4, x=100, y=1600 ::::::: y=2000 */
hipFree(dev_x);
hipFree(dev_y);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N (1000*1000*8)
/* Calculate SAXPY, single-precision vector math */
/* y[i]=a*x[i]+y[i] */
__global__
void saxpy (int n, float a, float *x, float *y) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
/* Only run calculation if we are in range */
/* where i is valid. It can be out of range */
/* if our vector is shorter than a */
/* multiple of the blocksize */
if (i<n) {
y[i]=a*x[i]+y[i];
}
}
int main(int argc, char **argv) {
int i;
float *x, *y, *dev_x, *dev_y;
float a;
float loops=1.0;
if (argc>1) {
loops=atof(argv[1]);
}
/* Allocate vectors on CPU */
x=(float *)malloc(N*sizeof(float));
y=(float *)malloc(N*sizeof(float));
/* Allocate vectors on GPU */
hipMalloc((void **)&dev_x,N*sizeof(float));
hipMalloc((void **)&dev_y,N*sizeof(float));
/* Initialize the host vectors */
for(i=0;i<N;i++) {
x[i]=(float)i;
y[i]=(float)(10.0*i);
}
hipMemcpy(dev_x,x,N*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(dev_y,y,N*sizeof(float),hipMemcpyHostToDevice);
printf("Size: %d\n",(N+255)/256);
/* Perform SAXPY */
for(a=0;a<loops;a+=1.0) {
saxpy<<<(N+255)/256,256>>>(N,a,dev_x,dev_y);
}
hipMemcpy(y,dev_y,N*sizeof(float),hipMemcpyDeviceToHost);
/* results */
i=100;
printf("y[%d]=%f %f\n",i,y[i],y[i+1]);
/* 0: a=0, x=100, y=1000 ::::::: y=1000 */
/* 1: a=1, x=100, y=1000 ::::::: y=1100 */
/* 2: a=2, x=100, y=1100 ::::::: y=1300 */
/* 3: a=3, x=100, y=1300 ::::::: y=1600 */
/* 4: a=4, x=100, y=1600 ::::::: y=2000 */
hipFree(dev_x);
hipFree(dev_y);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5saxpyifPfS_
.globl _Z5saxpyifPfS_
.p2align 8
.type _Z5saxpyifPfS_,@function
_Z5saxpyifPfS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_load_b32 s0, s[0:1], 0x4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, s0, v2
global_store_b32 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5saxpyifPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5saxpyifPfS_, .Lfunc_end0-_Z5saxpyifPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5saxpyifPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5saxpyifPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N (1000*1000*8)
/* Calculate SAXPY, single-precision vector math */
/* y[i]=a*x[i]+y[i] */
__global__
void saxpy (int n, float a, float *x, float *y) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
/* Only run calculation if we are in range */
/* where i is valid. It can be out of range */
/* if our vector is shorter than a */
/* multiple of the blocksize */
if (i<n) {
y[i]=a*x[i]+y[i];
}
}
int main(int argc, char **argv) {
int i;
float *x, *y, *dev_x, *dev_y;
float a;
float loops=1.0;
if (argc>1) {
loops=atof(argv[1]);
}
/* Allocate vectors on CPU */
x=(float *)malloc(N*sizeof(float));
y=(float *)malloc(N*sizeof(float));
/* Allocate vectors on GPU */
hipMalloc((void **)&dev_x,N*sizeof(float));
hipMalloc((void **)&dev_y,N*sizeof(float));
/* Initialize the host vectors */
for(i=0;i<N;i++) {
x[i]=(float)i;
y[i]=(float)(10.0*i);
}
hipMemcpy(dev_x,x,N*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(dev_y,y,N*sizeof(float),hipMemcpyHostToDevice);
printf("Size: %d\n",(N+255)/256);
/* Perform SAXPY */
for(a=0;a<loops;a+=1.0) {
saxpy<<<(N+255)/256,256>>>(N,a,dev_x,dev_y);
}
hipMemcpy(y,dev_y,N*sizeof(float),hipMemcpyDeviceToHost);
/* results */
i=100;
printf("y[%d]=%f %f\n",i,y[i],y[i+1]);
/* 0: a=0, x=100, y=1000 ::::::: y=1000 */
/* 1: a=1, x=100, y=1000 ::::::: y=1100 */
/* 2: a=2, x=100, y=1100 ::::::: y=1300 */
/* 3: a=3, x=100, y=1300 ::::::: y=1600 */
/* 4: a=4, x=100, y=1600 ::::::: y=2000 */
hipFree(dev_x);
hipFree(dev_y);
return 0;
} | .text
.file "saxpy.hip"
.globl _Z20__device_stub__saxpyifPfS_ # -- Begin function _Z20__device_stub__saxpyifPfS_
.p2align 4, 0x90
.type _Z20__device_stub__saxpyifPfS_,@function
_Z20__device_stub__saxpyifPfS_: # @_Z20__device_stub__saxpyifPfS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5saxpyifPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z20__device_stub__saxpyifPfS_, .Lfunc_end0-_Z20__device_stub__saxpyifPfS_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x3f800000 # float 1
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI1_1:
.quad 0x4024000000000000 # double 10
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $136, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $2, %edi
jl .LBB1_1
# %bb.2:
movq 8(%rsi), %rdi
xorl %esi, %esi
callq strtod
cvtsd2ss %xmm0, %xmm0
jmp .LBB1_3
.LBB1_1:
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
.LBB1_3:
movss %xmm0, (%rsp) # 4-byte Spill
movl $32000000, %edi # imm = 0x1E84800
callq malloc
movq %rax, %r14
movl $32000000, %edi # imm = 0x1E84800
callq malloc
movq %rax, %rbx
leaq 16(%rsp), %rdi
movl $32000000, %esi # imm = 0x1E84800
callq hipMalloc
leaq 8(%rsp), %rdi
movl $32000000, %esi # imm = 0x1E84800
callq hipMalloc
xorl %eax, %eax
movsd .LCPI1_1(%rip), %xmm0 # xmm0 = mem[0],zero
.p2align 4, 0x90
.LBB1_4: # =>This Inner Loop Header: Depth=1
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
xorps %xmm2, %xmm2
cvtsi2sd %eax, %xmm2
mulsd %xmm0, %xmm2
cvtsd2ss %xmm2, %xmm2
movss %xmm1, (%r14,%rax,4)
movss %xmm2, (%rbx,%rax,4)
incq %rax
cmpq $8000000, %rax # imm = 0x7A1200
jne .LBB1_4
# %bb.5:
movq 16(%rsp), %rdi
movl $32000000, %edx # imm = 0x1E84800
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $32000000, %edx # imm = 0x1E84800
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl $.L.str, %edi
movl $31250, %esi # imm = 0x7A12
xorl %eax, %eax
callq printf
xorps %xmm2, %xmm2
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
xorps %xmm1, %xmm1
movss %xmm1, 4(%rsp) # 4-byte Spill
ucomiss %xmm2, %xmm0
jbe .LBB1_10
# %bb.6: # %.lr.ph
movabsq $4294967552, %r14 # imm = 0x100000100
leaq 30994(%r14), %r15
leaq 40(%rsp), %rbp
leaq 32(%rsp), %r12
leaq 96(%rsp), %r13
jmp .LBB1_7
.p2align 4, 0x90
.LBB1_9: # in Loop: Header=BB1_7 Depth=1
movss 4(%rsp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
addss .LCPI1_0(%rip), %xmm1
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm1, 4(%rsp) # 4-byte Spill
ucomiss %xmm1, %xmm0
jbe .LBB1_10
.LBB1_7: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_9
# %bb.8: # in Loop: Header=BB1_7 Depth=1
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movl $8000000, 28(%rsp) # imm = 0x7A1200
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 24(%rsp)
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
movq %rbp, %rdx
movq %r12, %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
movl $_Z5saxpyifPfS_, %edi
movq %r13, %r9
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB1_9
.LBB1_10: # %._crit_edge
movq 8(%rsp), %rsi
movl $32000000, %edx # imm = 0x1E84800
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movss 400(%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 404(%rbx), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl $.L.str.1, %edi
movl $100, %esi
movb $2, %al
callq printf
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $136, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5saxpyifPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5saxpyifPfS_,@object # @_Z5saxpyifPfS_
.section .rodata,"a",@progbits
.globl _Z5saxpyifPfS_
.p2align 3, 0x0
_Z5saxpyifPfS_:
.quad _Z20__device_stub__saxpyifPfS_
.size _Z5saxpyifPfS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Size: %d\n"
.size .L.str, 10
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "y[%d]=%f %f\n"
.size .L.str.1, 13
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z5saxpyifPfS_"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__saxpyifPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5saxpyifPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z5saxpyifPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fe400078e0205 */
/*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ FFMA R7, R2, c[0x0][0x164], R7 ; /* 0x0000590002077a23 */
/* 0x004fca0000000007 */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5saxpyifPfS_
.globl _Z5saxpyifPfS_
.p2align 8
.type _Z5saxpyifPfS_,@function
_Z5saxpyifPfS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_load_b32 s0, s[0:1], 0x4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, s0, v2
global_store_b32 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5saxpyifPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5saxpyifPfS_, .Lfunc_end0-_Z5saxpyifPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5saxpyifPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5saxpyifPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000fa165_00000000-6_saxpy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z28__device_stub__Z5saxpyifPfS_ifPfS_
.type _Z28__device_stub__Z5saxpyifPfS_ifPfS_, @function
_Z28__device_stub__Z5saxpyifPfS_ifPfS_:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movss %xmm0, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5saxpyifPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z28__device_stub__Z5saxpyifPfS_ifPfS_, .-_Z28__device_stub__Z5saxpyifPfS_ifPfS_
.globl _Z5saxpyifPfS_
.type _Z5saxpyifPfS_, @function
_Z5saxpyifPfS_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z5saxpyifPfS_ifPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z5saxpyifPfS_, .-_Z5saxpyifPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "Size: %d\n"
.LC4:
.string "y[%d]=%f %f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl .LC0(%rip), %r12d
cmpl $1, %edi
jg .L25
.L12:
movl $32000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $32000000, %edi
call malloc@PLT
movq %rax, %rbx
leaq 16(%rsp), %rdi
movl $32000000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $32000000, %esi
call cudaMalloc@PLT
movl $0, %eax
movsd .LC2(%rip), %xmm1
.L13:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
mulsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%rbx,%rax,4)
addq $1, %rax
cmpq $8000000, %rax
jne .L13
movl $1, %ecx
movl $32000000, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $32000000, %edx
movq %rbx, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $31250, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0x00000000, 12(%rsp)
pxor %xmm0, %xmm0
movd %r12d, %xmm4
comiss %xmm0, %xmm4
ja .L17
.L14:
movl $2, %ecx
movl $32000000, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
pxor %xmm0, %xmm0
cvtss2sd 400(%rbx), %xmm0
pxor %xmm1, %xmm1
cvtss2sd 404(%rbx), %xmm1
movl $100, %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movq 8(%rsi), %rdi
movl $0, %esi
call strtod@PLT
pxor %xmm5, %xmm5
cvtsd2ss %xmm0, %xmm5
movd %xmm5, %r12d
jmp .L12
.L16:
movss 12(%rsp), %xmm2
addss .LC0(%rip), %xmm2
movss %xmm2, 12(%rsp)
movd %r12d, %xmm3
comiss %xmm2, %xmm3
jbe .L14
.L17:
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $31250, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L16
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movss 12(%rsp), %xmm0
movl $8000000, %edi
call _Z28__device_stub__Z5saxpyifPfS_ifPfS_
jmp .L16
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z5saxpyifPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z5saxpyifPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 0
.long 1076101120
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "saxpy.hip"
.globl _Z20__device_stub__saxpyifPfS_ # -- Begin function _Z20__device_stub__saxpyifPfS_
.p2align 4, 0x90
.type _Z20__device_stub__saxpyifPfS_,@function
_Z20__device_stub__saxpyifPfS_: # @_Z20__device_stub__saxpyifPfS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5saxpyifPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z20__device_stub__saxpyifPfS_, .Lfunc_end0-_Z20__device_stub__saxpyifPfS_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x3f800000 # float 1
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI1_1:
.quad 0x4024000000000000 # double 10
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $136, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $2, %edi
jl .LBB1_1
# %bb.2:
movq 8(%rsi), %rdi
xorl %esi, %esi
callq strtod
cvtsd2ss %xmm0, %xmm0
jmp .LBB1_3
.LBB1_1:
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
.LBB1_3:
movss %xmm0, (%rsp) # 4-byte Spill
movl $32000000, %edi # imm = 0x1E84800
callq malloc
movq %rax, %r14
movl $32000000, %edi # imm = 0x1E84800
callq malloc
movq %rax, %rbx
leaq 16(%rsp), %rdi
movl $32000000, %esi # imm = 0x1E84800
callq hipMalloc
leaq 8(%rsp), %rdi
movl $32000000, %esi # imm = 0x1E84800
callq hipMalloc
xorl %eax, %eax
movsd .LCPI1_1(%rip), %xmm0 # xmm0 = mem[0],zero
.p2align 4, 0x90
.LBB1_4: # =>This Inner Loop Header: Depth=1
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
xorps %xmm2, %xmm2
cvtsi2sd %eax, %xmm2
mulsd %xmm0, %xmm2
cvtsd2ss %xmm2, %xmm2
movss %xmm1, (%r14,%rax,4)
movss %xmm2, (%rbx,%rax,4)
incq %rax
cmpq $8000000, %rax # imm = 0x7A1200
jne .LBB1_4
# %bb.5:
movq 16(%rsp), %rdi
movl $32000000, %edx # imm = 0x1E84800
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $32000000, %edx # imm = 0x1E84800
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl $.L.str, %edi
movl $31250, %esi # imm = 0x7A12
xorl %eax, %eax
callq printf
xorps %xmm2, %xmm2
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
xorps %xmm1, %xmm1
movss %xmm1, 4(%rsp) # 4-byte Spill
ucomiss %xmm2, %xmm0
jbe .LBB1_10
# %bb.6: # %.lr.ph
movabsq $4294967552, %r14 # imm = 0x100000100
leaq 30994(%r14), %r15
leaq 40(%rsp), %rbp
leaq 32(%rsp), %r12
leaq 96(%rsp), %r13
jmp .LBB1_7
.p2align 4, 0x90
.LBB1_9: # in Loop: Header=BB1_7 Depth=1
movss 4(%rsp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
addss .LCPI1_0(%rip), %xmm1
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm1, 4(%rsp) # 4-byte Spill
ucomiss %xmm1, %xmm0
jbe .LBB1_10
.LBB1_7: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_9
# %bb.8: # in Loop: Header=BB1_7 Depth=1
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movl $8000000, 28(%rsp) # imm = 0x7A1200
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 24(%rsp)
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
movq %rbp, %rdx
movq %r12, %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
movl $_Z5saxpyifPfS_, %edi
movq %r13, %r9
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB1_9
.LBB1_10: # %._crit_edge
movq 8(%rsp), %rsi
movl $32000000, %edx # imm = 0x1E84800
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movss 400(%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 404(%rbx), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl $.L.str.1, %edi
movl $100, %esi
movb $2, %al
callq printf
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $136, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5saxpyifPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5saxpyifPfS_,@object # @_Z5saxpyifPfS_
.section .rodata,"a",@progbits
.globl _Z5saxpyifPfS_
.p2align 3, 0x0
_Z5saxpyifPfS_:
.quad _Z20__device_stub__saxpyifPfS_
.size _Z5saxpyifPfS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Size: %d\n"
.size .L.str, 10
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "y[%d]=%f %f\n"
.size .L.str.1, 13
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z5saxpyifPfS_"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__saxpyifPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5saxpyifPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#include <vector>
using namespace std;
const int GPUs[] = {0,1,2,3,4}; // If left blank all available GPUs will be used.
vector<int> g(GPUs, GPUs + sizeof(GPUs)/sizeof(int));
void configure(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
cudaSetDevice(g[i]);
cudaMalloc(&buffer_s[i], size);
cudaMalloc(&buffer_d[i], size);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
cudaSetDevice(g[i]);
cudaDeviceEnablePeerAccess(g[j], 0);
cudaDeviceSynchronize();
cudaSetDevice(g[j]);
cudaDeviceEnablePeerAccess(g[i], 0);
cudaDeviceSynchronize();
}
}
}
}
}
void reset(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
cudaSetDevice(g[i]);
cudaFree(buffer_s[i]);
cudaFree(buffer_d[i]);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
cudaSetDevice(g[i]);
cudaDeviceDisablePeerAccess(g[j]);
cudaDeviceSynchronize();
cudaSetDevice(g[j]);
cudaDeviceDisablePeerAccess(g[i]);
cudaDeviceSynchronize();
}
}
}
}
}
void blocked_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
float time_taken[g.size()*g.size()], bw[g.size()*g.size()];
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("\nBlocked Memory Transfers: Only one memory transfer at a time\n");
for (int i=0; i<g.size(); i++)
{
for (int j=0; j<g.size(); j++)
{
if (i!=j)
{
printf("Copying from %d to %d\n", g[i], g[j]);
cudaEventRecord(start);
cudaMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float time_ms=0.0;
cudaEventElapsedTime(&time_ms, start, stop);
time_taken[i*g.size()+j] = time_ms*1e3;
bw[i*g.size()+j] = (float)size*1000/time_ms/(1<<30);
}
}
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Time(ms) spent in memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
{
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", time_taken[i*g.size()+j]);
}
printf("\n");
}
printf("bandwidth(Gbps) utilized during memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", bw[i*g.size()+j]);
printf("\n");
}
}
void perf_analyze(size_t size)
{
vector<int*> buffer_s(g.size());
vector<int*> buffer_d(g.size());
configure(size, buffer_s, buffer_d);
// Cyclic
blocked_copy(size, buffer_s, buffer_d);
reset(size, buffer_s, buffer_d);
}
int main(int argc, char** argv)
{
// NVLink D<->D performance
size_t size = (1<<30);
if (!g.size())
{
int n;
printf("Using all 8 GPUs\n");
cudaGetDeviceCount(&n);
for (int i=0; i<n; i++)
g.push_back(i);
}
//define size
perf_analyze(size);
return 0;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#include <vector>
using namespace std;
const int GPUs[] = {0,1,2,3,4}; // If left blank all available GPUs will be used.
vector<int> g(GPUs, GPUs + sizeof(GPUs)/sizeof(int));
void configure(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
cudaSetDevice(g[i]);
cudaMalloc(&buffer_s[i], size);
cudaMalloc(&buffer_d[i], size);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
cudaSetDevice(g[i]);
cudaDeviceEnablePeerAccess(g[j], 0);
cudaDeviceSynchronize();
cudaSetDevice(g[j]);
cudaDeviceEnablePeerAccess(g[i], 0);
cudaDeviceSynchronize();
}
}
}
}
}
void reset(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
cudaSetDevice(g[i]);
cudaFree(buffer_s[i]);
cudaFree(buffer_d[i]);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
cudaSetDevice(g[i]);
cudaDeviceDisablePeerAccess(g[j]);
cudaDeviceSynchronize();
cudaSetDevice(g[j]);
cudaDeviceDisablePeerAccess(g[i]);
cudaDeviceSynchronize();
}
}
}
}
}
void blocked_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
float time_taken[g.size()*g.size()], bw[g.size()*g.size()];
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("\nBlocked Memory Transfers: Only one memory transfer at a time\n");
for (int i=0; i<g.size(); i++)
{
for (int j=0; j<g.size(); j++)
{
if (i!=j)
{
printf("Copying from %d to %d\n", g[i], g[j]);
cudaEventRecord(start);
cudaMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float time_ms=0.0;
cudaEventElapsedTime(&time_ms, start, stop);
time_taken[i*g.size()+j] = time_ms*1e3;
bw[i*g.size()+j] = (float)size*1000/time_ms/(1<<30);
}
}
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Time(ms) spent in memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
{
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", time_taken[i*g.size()+j]);
}
printf("\n");
}
printf("bandwidth(Gbps) utilized during memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", bw[i*g.size()+j]);
printf("\n");
}
}
void perf_analyze(size_t size)
{
vector<int*> buffer_s(g.size());
vector<int*> buffer_d(g.size());
configure(size, buffer_s, buffer_d);
// Cyclic
blocked_copy(size, buffer_s, buffer_d);
reset(size, buffer_s, buffer_d);
}
int main(int argc, char** argv)
{
// NVLink D<->D performance
size_t size = (1<<30);
if (!g.size())
{
int n;
printf("Using all 8 GPUs\n");
cudaGetDeviceCount(&n);
for (int i=0; i<n; i++)
g.push_back(i);
}
//define size
perf_analyze(size);
return 0;
} | .file "tmpxft_00055101_00000000-6_nvlink_blocked.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2933:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2933:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .text._ZNSt6vectorIiSaIiEED2Ev,"axG",@progbits,_ZNSt6vectorIiSaIiEED5Ev,comdat
.align 2
.weak _ZNSt6vectorIiSaIiEED2Ev
.type _ZNSt6vectorIiSaIiEED2Ev, @function
_ZNSt6vectorIiSaIiEED2Ev:
.LFB3242:
.cfi_startproc
endbr64
movq (%rdi), %rax
testq %rax, %rax
je .L6
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 16(%rdi), %rsi
subq %rax, %rsi
movq %rax, %rdi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.L6:
ret
.cfi_endproc
.LFE3242:
.size _ZNSt6vectorIiSaIiEED2Ev, .-_ZNSt6vectorIiSaIiEED2Ev
.weak _ZNSt6vectorIiSaIiEED1Ev
.set _ZNSt6vectorIiSaIiEED1Ev,_ZNSt6vectorIiSaIiEED2Ev
.text
.globl _Z9configuremRSt6vectorIPiSaIS0_EES3_
.type _Z9configuremRSt6vectorIPiSaIS0_EES3_, @function
_Z9configuremRSt6vectorIPiSaIS0_EES3_:
.LFB2926:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rsi, (%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq g(%rip), %rdx
cmpq %rdx, 8+g(%rip)
je .L9
movq %rdi, %r15
movl $0, %r12d
leaq g(%rip), %rbp
leaq 20(%rsp), %r14
jmp .L14
.L12:
addq $1, %rbx
movq 0(%rbp), %rdx
movq 8(%rbp), %rax
subq %rdx, %rax
sarq $2, %rax
cmpq %rax, %rbx
jnb .L11
.L13:
cmpl %ebx, %r12d
je .L12
movl (%rdx,%rbx,4), %eax
movl (%rdx,%r13), %esi
movl %eax, %edx
movq %r14, %rdi
call cudaDeviceCanAccessPeer@PLT
cmpl $0, 20(%rsp)
je .L12
movq 0(%rbp), %rax
movl (%rax,%r13), %edi
call cudaSetDevice@PLT
movq 0(%rbp), %rax
movl (%rax,%rbx,4), %edi
movl $0, %esi
call cudaDeviceEnablePeerAccess@PLT
call cudaDeviceSynchronize@PLT
movq 0(%rbp), %rax
movl (%rax,%rbx,4), %edi
call cudaSetDevice@PLT
movq 0(%rbp), %rax
movl (%rax,%r13), %edi
movl $0, %esi
call cudaDeviceEnablePeerAccess@PLT
call cudaDeviceSynchronize@PLT
jmp .L12
.L11:
movq 0(%rbp), %rdx
addq $1, %r12
movq 8(%rbp), %rax
subq %rdx, %rax
sarq $2, %rax
cmpq %rax, %r12
jnb .L9
.L14:
leaq 0(,%r12,4), %r13
movl (%rdx,%r12,4), %edi
call cudaSetDevice@PLT
leaq 0(,%r12,8), %rbx
movq (%rsp), %rax
movq %rbx, %rdi
addq (%rax), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movq 8(%rsp), %rax
movq %rbx, %rdi
addq (%rax), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movq 0(%rbp), %rdx
cmpq %rdx, 8(%rbp)
je .L11
movl $0, %ebx
jmp .L13
.L9:
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2926:
.size _Z9configuremRSt6vectorIPiSaIS0_EES3_, .-_Z9configuremRSt6vectorIPiSaIS0_EES3_
.globl _Z5resetmRSt6vectorIPiSaIS0_EES3_
.type _Z5resetmRSt6vectorIPiSaIS0_EES3_, @function
_Z5resetmRSt6vectorIPiSaIS0_EES3_:
.LFB2927:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rsi, 8(%rsp)
movq %rdx, %r15
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq g(%rip), %rdx
cmpq %rdx, 8+g(%rip)
je .L20
movl $0, %r12d
leaq g(%rip), %rbp
leaq 20(%rsp), %r14
jmp .L25
.L23:
addq $1, %rbx
movq 0(%rbp), %rdx
movq 8(%rbp), %rax
subq %rdx, %rax
sarq $2, %rax
cmpq %rax, %rbx
jnb .L22
.L24:
cmpl %ebx, %r12d
je .L23
movl (%rdx,%rbx,4), %eax
movl (%rdx,%r13), %esi
movl %eax, %edx
movq %r14, %rdi
call cudaDeviceCanAccessPeer@PLT
cmpl $0, 20(%rsp)
je .L23
movq 0(%rbp), %rax
movl (%rax,%r13), %edi
call cudaSetDevice@PLT
movq 0(%rbp), %rax
movl (%rax,%rbx,4), %edi
call cudaDeviceDisablePeerAccess@PLT
call cudaDeviceSynchronize@PLT
movq 0(%rbp), %rax
movl (%rax,%rbx,4), %edi
call cudaSetDevice@PLT
movq 0(%rbp), %rax
movl (%rax,%r13), %edi
call cudaDeviceDisablePeerAccess@PLT
call cudaDeviceSynchronize@PLT
jmp .L23
.L22:
movq 0(%rbp), %rdx
addq $1, %r12
movq 8(%rbp), %rax
subq %rdx, %rax
sarq $2, %rax
cmpq %rax, %r12
jnb .L20
.L25:
leaq 0(,%r12,4), %r13
movl (%rdx,%r12,4), %edi
call cudaSetDevice@PLT
movq 8(%rsp), %rax
movq (%rax), %rax
movq (%rax,%r12,8), %rdi
call cudaFree@PLT
movq (%r15), %rax
movq (%rax,%r12,8), %rdi
call cudaFree@PLT
movq 0(%rbp), %rdx
cmpq %rdx, 8(%rbp)
je .L22
movl $0, %ebx
jmp .L24
.L20:
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L30
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2927:
.size _Z5resetmRSt6vectorIPiSaIS0_EES3_, .-_Z5resetmRSt6vectorIPiSaIS0_EES3_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "\nBlocked Memory Transfers: Only one memory transfer at a time\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Copying from %d to %d\n"
.LC5:
.string "Time(ms) spent in memcpy\n"
.LC6:
.string " D\\D"
.LC7:
.string "%10d "
.LC8:
.string "\n"
.LC9:
.string "%6d"
.LC10:
.string "%12.4f"
.section .rodata.str1.8
.align 8
.LC12:
.string "bandwidth(Gbps) utilized during memcpy\n"
.text
.globl _Z12blocked_copymRSt6vectorIPiSaIS0_EES3_
.type _Z12blocked_copymRSt6vectorIPiSaIS0_EES3_, @function
_Z12blocked_copymRSt6vectorIPiSaIS0_EES3_:
.LFB2928:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $88, %rsp
.cfi_offset 15, -24
.cfi_offset 14, -32
.cfi_offset 13, -40
.cfi_offset 12, -48
.cfi_offset 3, -56
movq %rdi, %r15
movq %rsi, -104(%rbp)
movq %rdx, -112(%rbp)
movq %fs:40, %rax
movq %rax, -56(%rbp)
xorl %eax, %eax
movq 8+g(%rip), %rax
subq g(%rip), %rax
sarq $2, %rax
imulq %rax, %rax
salq $2, %rax
leaq 15(%rax), %rdx
movq %rdx, %rsi
andq $-16, %rsi
andq $-4096, %rdx
movq %rsp, %rcx
subq %rdx, %rcx
.L32:
cmpq %rcx, %rsp
je .L33
subq $4096, %rsp
orq $0, 4088(%rsp)
jmp .L32
.L33:
movq %rsi, %rdx
andl $4095, %edx
subq %rdx, %rsp
testq %rdx, %rdx
je .L34
orq $0, -8(%rsp,%rdx)
.L34:
movq %rsp, %r14
addq $15, %rax
movq %rax, %rdx
andq $-16, %rdx
andq $-4096, %rax
movq %rsp, %rcx
subq %rax, %rcx
.L35:
cmpq %rcx, %rsp
je .L36
subq $4096, %rsp
orq $0, 4088(%rsp)
jmp .L35
.L36:
movq %rdx, %rax
andl $4095, %eax
subq %rax, %rsp
testq %rax, %rax
je .L37
orq $0, -8(%rsp,%rax)
.L37:
movq %rsp, -88(%rbp)
leaq -72(%rbp), %rdi
call cudaEventCreate@PLT
leaq -64(%rbp), %rdi
call cudaEventCreate@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %r13d
movq g(%rip), %rax
cmpq %rax, 8+g(%rip)
je .L39
leaq g(%rip), %r12
movq %r14, -120(%rbp)
jmp .L38
.L41:
movq %r15, %rdx
shrq %rdx
movq %r15, %rcx
andl $1, %ecx
orq %rcx, %rdx
pxor %xmm0, %xmm0
cvtsi2ssq %rdx, %xmm0
addss %xmm0, %xmm0
.L42:
mulss .LC3(%rip), %xmm0
divss %xmm1, %xmm0
mulss .LC4(%rip), %xmm0
movq -88(%rbp), %rcx
movss %xmm0, (%rcx,%rax,4)
.L40:
addq $1, %rbx
movq (%r12), %rsi
movq 8(%r12), %rax
subq %rsi, %rax
sarq $2, %rax
cmpq %rax, %rbx
jnb .L45
.L43:
cmpl %ebx, %r13d
je .L40
movl (%rsi,%rbx,4), %ecx
movl (%rsi,%r14), %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
movq -72(%rbp), %rdi
call cudaEventRecord@PLT
movq (%r12), %rax
movl (%rax,%rbx,4), %ecx
movq -112(%rbp), %rdi
movq (%rdi), %rdx
movq (%rdx,%rbx,8), %rdx
movl (%rax,%r14), %esi
movq -104(%rbp), %rax
movq (%rax), %rax
movq -96(%rbp), %rdi
movq (%rax,%rdi), %rdi
movl $0, %r9d
movq %r15, %r8
call cudaMemcpyPeerAsync@PLT
movl $0, %esi
movq -64(%rbp), %rdi
call cudaEventRecord@PLT
call cudaDeviceSynchronize@PLT
movq -64(%rbp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, -76(%rbp)
leaq -76(%rbp), %rdi
movq -64(%rbp), %rdx
movq -72(%rbp), %rsi
call cudaEventElapsedTime@PLT
movss -76(%rbp), %xmm1
movq 8(%r12), %rax
subq (%r12), %rax
sarq $2, %rax
imulq %r13, %rax
addq %rbx, %rax
movaps %xmm1, %xmm0
mulss .LC3(%rip), %xmm0
movq -120(%rbp), %rcx
movss %xmm0, (%rcx,%rax,4)
testq %r15, %r15
js .L41
pxor %xmm0, %xmm0
cvtsi2ssq %r15, %xmm0
jmp .L42
.L45:
addq $1, %r13
movq 8(%r12), %rax
subq (%r12), %rax
sarq $2, %rax
cmpq %rax, %r13
jnb .L73
.L38:
movq (%r12), %rsi
leaq 0(,%r13,4), %r14
leaq 0(,%r13,8), %r8
movl $0, %ebx
cmpq %rsi, 8(%r12)
je .L45
movq %r8, -96(%rbp)
jmp .L43
.L73:
movq -120(%rbp), %r14
.L39:
movq -72(%rbp), %rdi
call cudaEventDestroy@PLT
movq -64(%rbp), %rdi
call cudaEventDestroy@PLT
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq g(%rip), %rdx
cmpq %rdx, 8+g(%rip)
je .L46
movl $0, %ebx
leaq .LC7(%rip), %r12
.L47:
movl (%rdx,%rbx,4), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq g(%rip), %rdx
addq $1, %rbx
movq 8+g(%rip), %rax
subq %rdx, %rax
sarq $2, %rax
cmpq %rax, %rbx
jb .L47
.L46:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq g(%rip), %rdx
cmpq %rdx, 8+g(%rip)
je .L48
movl $0, %r12d
leaq .LC9(%rip), %r15
leaq .LC10(%rip), %r13
jmp .L53
.L50:
imulq %r12, %rdx
addq %rbx, %rdx
pxor %xmm0, %xmm0
cvtss2sd (%r14,%rdx,4), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
.L51:
addq $1, %rbx
movq 8+g(%rip), %rax
subq g(%rip), %rax
sarq $2, %rax
movq %rax, %rdx
cmpq %rax, %rbx
jnb .L49
.L52:
cmpl %ebx, %r12d
jne .L50
pxor %xmm0, %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
jmp .L51
.L49:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq g(%rip), %rdx
addq $1, %r12
movq 8+g(%rip), %rax
subq %rdx, %rax
sarq $2, %rax
cmpq %rax, %r12
jnb .L48
.L53:
movl (%rdx,%r12,4), %edx
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8+g(%rip), %rcx
movq g(%rip), %rax
movq %rcx, %rdx
subq %rax, %rdx
sarq $2, %rdx
cmpq %rax, %rcx
je .L49
movl $0, %ebx
jmp .L52
.L48:
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq g(%rip), %rdx
cmpq %rdx, 8+g(%rip)
je .L54
movl $0, %ebx
leaq .LC7(%rip), %r12
.L55:
movl (%rdx,%rbx,4), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq g(%rip), %rdx
addq $1, %rbx
movq 8+g(%rip), %rax
subq %rdx, %rax
sarq $2, %rax
cmpq %rax, %rbx
jb .L55
.L54:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq g(%rip), %rdx
cmpq %rdx, 8+g(%rip)
je .L31
movl $0, %r12d
leaq .LC9(%rip), %r15
leaq .LC10(%rip), %r13
leaq .LC8(%rip), %r14
jmp .L61
.L58:
imulq %r12, %rdx
addq %rbx, %rdx
movq -88(%rbp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rdx,4), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
.L59:
addq $1, %rbx
movq 8+g(%rip), %rax
subq g(%rip), %rax
sarq $2, %rax
movq %rax, %rdx
cmpq %rax, %rbx
jnb .L57
.L60:
cmpl %ebx, %r12d
jne .L58
pxor %xmm0, %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
jmp .L59
.L57:
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq g(%rip), %rdx
addq $1, %r12
movq 8+g(%rip), %rax
subq %rdx, %rax
sarq $2, %rax
cmpq %rax, %r12
jnb .L31
.L61:
movl (%rdx,%r12,4), %edx
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8+g(%rip), %rcx
movq g(%rip), %rax
movq %rcx, %rdx
subq %rax, %rdx
sarq $2, %rdx
cmpq %rax, %rcx
je .L57
movl $0, %ebx
jmp .L60
.L31:
movq -56(%rbp), %rax
subq %fs:40, %rax
jne .L76
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.L76:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2928:
.size _Z12blocked_copymRSt6vectorIPiSaIS0_EES3_, .-_Z12blocked_copymRSt6vectorIPiSaIS0_EES3_
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2956:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2956:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZNSt6vectorIPiSaIS0_EED2Ev,"axG",@progbits,_ZNSt6vectorIPiSaIS0_EED5Ev,comdat
.align 2
.weak _ZNSt6vectorIPiSaIS0_EED2Ev
.type _ZNSt6vectorIPiSaIS0_EED2Ev, @function
_ZNSt6vectorIPiSaIS0_EED2Ev:
.LFB3028:
.cfi_startproc
endbr64
movq (%rdi), %rax
testq %rax, %rax
je .L82
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 16(%rdi), %rsi
subq %rax, %rsi
movq %rax, %rdi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.L82:
ret
.cfi_endproc
.LFE3028:
.size _ZNSt6vectorIPiSaIS0_EED2Ev, .-_ZNSt6vectorIPiSaIS0_EED2Ev
.weak _ZNSt6vectorIPiSaIS0_EED1Ev
.set _ZNSt6vectorIPiSaIS0_EED1Ev,_ZNSt6vectorIPiSaIS0_EED2Ev
.section .rodata.str1.8
.align 8
.LC13:
.string "cannot create std::vector larger than max_size()"
.text
.globl _Z12perf_analyzem
.type _Z12perf_analyzem, @function
_Z12perf_analyzem:
.LFB2929:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA2929
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq 8+g(%rip), %rax
subq g(%rip), %rax
movq %rax, %r12
sarq $2, %r12
movabsq $4611686018427387900, %rdx
cmpq %rax, %rdx
jb .L110
movq %rdi, %rbx
testq %r12, %r12
je .L88
leaq 0(,%r12,8), %r14
movq %r14, %rdi
.LEHB0:
call _Znwm@PLT
movq %rax, %rbp
movq %rax, (%rsp)
leaq (%rax,%r14), %rdx
movq %rdx, 16(%rsp)
movq $0, (%rax)
leaq 8(%rax), %rax
cmpq $1, %r12
je .L102
cmpq %rdx, %rax
je .L89
.L90:
movq $0, (%rax)
addq $8, %rax
cmpq %rdx, %rax
jne .L90
jmp .L89
.L110:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L111
leaq .LC13(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.LEHE0:
.L111:
call __stack_chk_fail@PLT
.L102:
movq %rax, %rdx
jmp .L89
.L115:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L112
leaq .LC13(%rip), %rdi
.LEHB1:
call _ZSt20__throw_length_errorPKc@PLT
.LEHE1:
.L105:
endbr64
movq %rax, %rbx
jmp .L99
.L112:
call __stack_chk_fail@PLT
.L116:
movq %rax, %r12
movq %rax, 32(%rsp)
leaq (%rax,%r15), %rdx
movq %rdx, 48(%rsp)
movq $0, (%rax)
leaq 8(%rax), %rax
cmpq $1, %r13
je .L103
cmpq %rax, %rdx
je .L104
.L95:
movq $0, (%rax)
addq $8, %rax
cmpq %rax, %rdx
jne .L95
jmp .L94
.L103:
movq %rax, %rdx
jmp .L94
.L104:
movq %rax, %rdx
jmp .L94
.L114:
leaq 32(%rsp), %rdx
movq %rsp, %rsi
movq %rbx, %rdi
.LEHB2:
call _Z12blocked_copymRSt6vectorIPiSaIS0_EES3_
leaq 32(%rsp), %rdx
movq %rsp, %rsi
movq %rbx, %rdi
call _Z5resetmRSt6vectorIPiSaIS0_EES3_
.LEHE2:
testq %r12, %r12
je .L96
movq %r15, %rsi
movq %r12, %rdi
call _ZdlPvm@PLT
.L96:
testq %rbp, %rbp
je .L85
movq %r14, %rsi
movq %rbp, %rdi
call _ZdlPvm@PLT
.L85:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L113
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L106:
.cfi_restore_state
endbr64
movq %rax, %rbx
leaq 32(%rsp), %rdi
call _ZNSt6vectorIPiSaIS0_EED1Ev
.L99:
movq %rsp, %rdi
call _ZNSt6vectorIPiSaIS0_EED1Ev
movq 56(%rsp), %rax
subq %fs:40, %rax
je .L100
call __stack_chk_fail@PLT
.L100:
movq %rbx, %rdi
.LEHB3:
call _Unwind_Resume@PLT
.LEHE3:
.L93:
movq $0, 32(%rsp)
movq $0, 48(%rsp)
movq %r13, %r15
movl $0, %r12d
movl $0, %edx
.L94:
movq %rdx, 40(%rsp)
leaq 32(%rsp), %rdx
movq %rsp, %rsi
movq %rbx, %rdi
.LEHB4:
call _Z9configuremRSt6vectorIPiSaIS0_EES3_
.LEHE4:
jmp .L114
.L88:
movq $0, (%rsp)
movq $0, 16(%rsp)
movq %r12, %r14
movl $0, %ebp
movl $0, %edx
.L89:
movq %rdx, 8(%rsp)
movq 8+g(%rip), %rax
subq g(%rip), %rax
movq %rax, %r13
sarq $2, %r13
movabsq $4611686018427387900, %rdx
cmpq %rax, %rdx
jb .L115
movq $0, 40(%rsp)
movq $0, 48(%rsp)
testq %r13, %r13
je .L93
leaq 0(,%r13,8), %r15
movq %r15, %rdi
.LEHB5:
call _Znwm@PLT
.LEHE5:
jmp .L116
.L113:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2929:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA2929:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE2929-.LLSDACSB2929
.LLSDACSB2929:
.uleb128 .LEHB0-.LFB2929
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB2929
.uleb128 .LEHE1-.LEHB1
.uleb128 .L105-.LFB2929
.uleb128 0
.uleb128 .LEHB2-.LFB2929
.uleb128 .LEHE2-.LEHB2
.uleb128 .L106-.LFB2929
.uleb128 0
.uleb128 .LEHB3-.LFB2929
.uleb128 .LEHE3-.LEHB3
.uleb128 0
.uleb128 0
.uleb128 .LEHB4-.LFB2929
.uleb128 .LEHE4-.LEHB4
.uleb128 .L106-.LFB2929
.uleb128 0
.uleb128 .LEHB5-.LFB2929
.uleb128 .LEHE5-.LEHB5
.uleb128 .L105-.LFB2929
.uleb128 0
.LLSDACSE2929:
.text
.size _Z12perf_analyzem, .-_Z12perf_analyzem
.section .rodata._ZNSt6vectorIiSaIiEE17_M_realloc_insertIJRKiEEEvN9__gnu_cxx17__normal_iteratorIPiS1_EEDpOT_.str1.1,"aMS",@progbits,1
.LC14:
.string "vector::_M_realloc_insert"
.section .text._ZNSt6vectorIiSaIiEE17_M_realloc_insertIJRKiEEEvN9__gnu_cxx17__normal_iteratorIPiS1_EEDpOT_,"axG",@progbits,_ZNSt6vectorIiSaIiEE17_M_realloc_insertIJRKiEEEvN9__gnu_cxx17__normal_iteratorIPiS1_EEDpOT_,comdat
.align 2
.weak _ZNSt6vectorIiSaIiEE17_M_realloc_insertIJRKiEEEvN9__gnu_cxx17__normal_iteratorIPiS1_EEDpOT_
.type _ZNSt6vectorIiSaIiEE17_M_realloc_insertIJRKiEEEvN9__gnu_cxx17__normal_iteratorIPiS1_EEDpOT_, @function
_ZNSt6vectorIiSaIiEE17_M_realloc_insertIJRKiEEEvN9__gnu_cxx17__normal_iteratorIPiS1_EEDpOT_:
.LFB3109:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, (%rsp)
movq %rdx, 8(%rsp)
movq 8(%rdi), %rbp
movq (%rdi), %r13
movq %rbp, %rax
subq %r13, %rax
sarq $2, %rax
movabsq $2305843009213693951, %rdx
cmpq %rdx, %rax
je .L134
movq %rdi, %rbx
cmpq %r13, %rbp
movl $1, %edx
cmovne %rax, %rdx
addq %rdx, %rax
jc .L120
movabsq $2305843009213693951, %r14
cmpq %r14, %rax
cmovbe %rax, %r14
movq (%rsp), %r15
subq %r13, %r15
movl $0, %r12d
testq %rax, %rax
je .L121
jmp .L128
.L134:
leaq .LC14(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.L135:
movq %r15, %rdx
movq %r13, %rsi
movq %r12, %rdi
call memmove@PLT
leaq 4(%r12,%r15), %r15
movq (%rsp), %rax
subq %rax, %rbp
testq %rbp, %rbp
jg .L123
addq %rbp, %r15
movq 16(%rbx), %rsi
subq %r13, %rsi
jmp .L127
.L120:
movq (%rsp), %r15
subq %r13, %r15
movabsq $2305843009213693951, %r14
.L128:
leaq 0(,%r14,4), %rdi
call _Znwm@PLT
movq %rax, %r12
.L121:
movq 8(%rsp), %rax
movl (%rax), %eax
movl %eax, (%r12,%r15)
testq %r15, %r15
jg .L135
leaq 4(%r12,%r15), %r15
movq (%rsp), %rax
subq %rax, %rbp
testq %rbp, %rbp
jle .L125
.L123:
movq %rbp, %rdx
movq (%rsp), %rsi
movq %r15, %rdi
call memcpy@PLT
.L125:
addq %rbp, %r15
testq %r13, %r13
je .L126
movq 16(%rbx), %rsi
subq %r13, %rsi
.L127:
movq %r13, %rdi
call _ZdlPvm@PLT
.L126:
movq %r12, (%rbx)
movq %r15, 8(%rbx)
leaq (%r12,%r14,4), %rax
movq %rax, 16(%rbx)
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3109:
.size _ZNSt6vectorIiSaIiEE17_M_realloc_insertIJRKiEEEvN9__gnu_cxx17__normal_iteratorIPiS1_EEDpOT_, .-_ZNSt6vectorIiSaIiEE17_M_realloc_insertIJRKiEEEvN9__gnu_cxx17__normal_iteratorIPiS1_EEDpOT_
.section .rodata.str1.1
.LC15:
.string "Using all 8 GPUs\n"
.text
.globl main
.type main, @function
main:
.LFB2930:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $24, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq g(%rip), %rax
cmpq %rax, 8+g(%rip)
je .L145
.L137:
movl $1073741824, %edi
call _Z12perf_analyzem
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L146
movl $0, %eax
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L145:
.cfi_restore_state
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rsp, %rdi
call cudaGetDeviceCount@PLT
movl $0, 4(%rsp)
cmpl $0, (%rsp)
jle .L137
movl $0, %ebx
leaq g(%rip), %rbp
jmp .L141
.L139:
leaq 4(%rsp), %rdx
movq %rbp, %rdi
call _ZNSt6vectorIiSaIiEE17_M_realloc_insertIJRKiEEEvN9__gnu_cxx17__normal_iteratorIPiS1_EEDpOT_
.L140:
addl $1, %ebx
movl %ebx, 4(%rsp)
cmpl (%rsp), %ebx
jge .L137
.L141:
movq 8(%rbp), %rsi
cmpq 16(%rbp), %rsi
je .L139
movl %ebx, (%rsi)
addq $4, 8(%rbp)
jmp .L140
.L146:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2930:
.size main, .-main
.type _GLOBAL__sub_I_g, @function
_GLOBAL__sub_I_g:
.LFB3244:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA3244
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq $0, g(%rip)
movq $0, 8+g(%rip)
movq $0, 16+g(%rip)
movl $20, %edi
.LEHB6:
call _Znwm@PLT
.LEHE6:
movq %rax, g(%rip)
leaq 20(%rax), %rdx
movq %rdx, 16+g(%rip)
movabsq $4294967296, %rcx
movabsq $12884901890, %rbx
movq %rcx, (%rax)
movq %rbx, 8(%rax)
movl $4, 16(%rax)
movq %rdx, 8+g(%rip)
leaq __dso_handle(%rip), %rdx
leaq g(%rip), %rsi
leaq _ZNSt6vectorIiSaIiEED1Ev(%rip), %rdi
call __cxa_atexit@PLT
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L150:
.cfi_restore_state
endbr64
movq %rax, %rbx
movq g(%rip), %rdi
movq 16+g(%rip), %rsi
subq %rdi, %rsi
testq %rdi, %rdi
je .L149
call _ZdlPvm@PLT
.L149:
movq %rbx, %rdi
.LEHB7:
call _Unwind_Resume@PLT
.LEHE7:
.cfi_endproc
.LFE3244:
.section .gcc_except_table
.LLSDA3244:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE3244-.LLSDACSB3244
.LLSDACSB3244:
.uleb128 .LEHB6-.LFB3244
.uleb128 .LEHE6-.LEHB6
.uleb128 .L150-.LFB3244
.uleb128 0
.uleb128 .LEHB7-.LFB3244
.uleb128 .LEHE7-.LEHB7
.uleb128 0
.uleb128 0
.LLSDACSE3244:
.text
.size _GLOBAL__sub_I_g, .-_GLOBAL__sub_I_g
.section .init_array
.align 8
.quad _GLOBAL__sub_I_g
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl g
.bss
.align 16
.type g, @object
.size g, 24
g:
.zero 24
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 1148846080
.align 4
.LC4:
.long 813694976
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.hidden __dso_handle
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#include <vector>
using namespace std;
const int GPUs[] = {0,1,2,3,4}; // If left blank all available GPUs will be used.
vector<int> g(GPUs, GPUs + sizeof(GPUs)/sizeof(int));
void configure(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
cudaSetDevice(g[i]);
cudaMalloc(&buffer_s[i], size);
cudaMalloc(&buffer_d[i], size);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
cudaSetDevice(g[i]);
cudaDeviceEnablePeerAccess(g[j], 0);
cudaDeviceSynchronize();
cudaSetDevice(g[j]);
cudaDeviceEnablePeerAccess(g[i], 0);
cudaDeviceSynchronize();
}
}
}
}
}
void reset(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
cudaSetDevice(g[i]);
cudaFree(buffer_s[i]);
cudaFree(buffer_d[i]);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
cudaSetDevice(g[i]);
cudaDeviceDisablePeerAccess(g[j]);
cudaDeviceSynchronize();
cudaSetDevice(g[j]);
cudaDeviceDisablePeerAccess(g[i]);
cudaDeviceSynchronize();
}
}
}
}
}
void blocked_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
float time_taken[g.size()*g.size()], bw[g.size()*g.size()];
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("\nBlocked Memory Transfers: Only one memory transfer at a time\n");
for (int i=0; i<g.size(); i++)
{
for (int j=0; j<g.size(); j++)
{
if (i!=j)
{
printf("Copying from %d to %d\n", g[i], g[j]);
cudaEventRecord(start);
cudaMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float time_ms=0.0;
cudaEventElapsedTime(&time_ms, start, stop);
time_taken[i*g.size()+j] = time_ms*1e3;
bw[i*g.size()+j] = (float)size*1000/time_ms/(1<<30);
}
}
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Time(ms) spent in memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
{
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", time_taken[i*g.size()+j]);
}
printf("\n");
}
printf("bandwidth(Gbps) utilized during memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", bw[i*g.size()+j]);
printf("\n");
}
}
void perf_analyze(size_t size)
{
vector<int*> buffer_s(g.size());
vector<int*> buffer_d(g.size());
configure(size, buffer_s, buffer_d);
// Cyclic
blocked_copy(size, buffer_s, buffer_d);
reset(size, buffer_s, buffer_d);
}
int main(int argc, char** argv)
{
// NVLink D<->D performance
size_t size = (1<<30);
if (!g.size())
{
int n;
printf("Using all 8 GPUs\n");
cudaGetDeviceCount(&n);
for (int i=0; i<n; i++)
g.push_back(i);
}
//define size
perf_analyze(size);
return 0;
} | #include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <vector>
using namespace std;
const int GPUs[] = {0,1,2,3,4}; // If left blank all available GPUs will be used.
vector<int> g(GPUs, GPUs + sizeof(GPUs)/sizeof(int));
void configure(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
hipSetDevice(g[i]);
hipMalloc(&buffer_s[i], size);
hipMalloc(&buffer_d[i], size);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
hipDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
hipSetDevice(g[i]);
hipDeviceEnablePeerAccess(g[j], 0);
hipDeviceSynchronize();
hipSetDevice(g[j]);
hipDeviceEnablePeerAccess(g[i], 0);
hipDeviceSynchronize();
}
}
}
}
}
void reset(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
hipSetDevice(g[i]);
hipFree(buffer_s[i]);
hipFree(buffer_d[i]);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
hipDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
hipSetDevice(g[i]);
hipDeviceDisablePeerAccess(g[j]);
hipDeviceSynchronize();
hipSetDevice(g[j]);
hipDeviceDisablePeerAccess(g[i]);
hipDeviceSynchronize();
}
}
}
}
}
void blocked_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
float time_taken[g.size()*g.size()], bw[g.size()*g.size()];
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
printf("\nBlocked Memory Transfers: Only one memory transfer at a time\n");
for (int i=0; i<g.size(); i++)
{
for (int j=0; j<g.size(); j++)
{
if (i!=j)
{
printf("Copying from %d to %d\n", g[i], g[j]);
hipEventRecord(start);
hipMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size);
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
float time_ms=0.0;
hipEventElapsedTime(&time_ms, start, stop);
time_taken[i*g.size()+j] = time_ms*1e3;
bw[i*g.size()+j] = (float)size*1000/time_ms/(1<<30);
}
}
}
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Time(ms) spent in memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
{
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", time_taken[i*g.size()+j]);
}
printf("\n");
}
printf("bandwidth(Gbps) utilized during memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", bw[i*g.size()+j]);
printf("\n");
}
}
void perf_analyze(size_t size)
{
vector<int*> buffer_s(g.size());
vector<int*> buffer_d(g.size());
configure(size, buffer_s, buffer_d);
// Cyclic
blocked_copy(size, buffer_s, buffer_d);
reset(size, buffer_s, buffer_d);
}
int main(int argc, char** argv)
{
// NVLink D<->D performance
size_t size = (1<<30);
if (!g.size())
{
int n;
printf("Using all 8 GPUs\n");
hipGetDeviceCount(&n);
for (int i=0; i<n; i++)
g.push_back(i);
}
//define size
perf_analyze(size);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <vector>
using namespace std;
const int GPUs[] = {0,1,2,3,4}; // If left blank all available GPUs will be used.
vector<int> g(GPUs, GPUs + sizeof(GPUs)/sizeof(int));
void configure(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
hipSetDevice(g[i]);
hipMalloc(&buffer_s[i], size);
hipMalloc(&buffer_d[i], size);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
hipDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
hipSetDevice(g[i]);
hipDeviceEnablePeerAccess(g[j], 0);
hipDeviceSynchronize();
hipSetDevice(g[j]);
hipDeviceEnablePeerAccess(g[i], 0);
hipDeviceSynchronize();
}
}
}
}
}
void reset(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
hipSetDevice(g[i]);
hipFree(buffer_s[i]);
hipFree(buffer_d[i]);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
hipDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
hipSetDevice(g[i]);
hipDeviceDisablePeerAccess(g[j]);
hipDeviceSynchronize();
hipSetDevice(g[j]);
hipDeviceDisablePeerAccess(g[i]);
hipDeviceSynchronize();
}
}
}
}
}
void blocked_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
float time_taken[g.size()*g.size()], bw[g.size()*g.size()];
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
printf("\nBlocked Memory Transfers: Only one memory transfer at a time\n");
for (int i=0; i<g.size(); i++)
{
for (int j=0; j<g.size(); j++)
{
if (i!=j)
{
printf("Copying from %d to %d\n", g[i], g[j]);
hipEventRecord(start);
hipMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size);
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
float time_ms=0.0;
hipEventElapsedTime(&time_ms, start, stop);
time_taken[i*g.size()+j] = time_ms*1e3;
bw[i*g.size()+j] = (float)size*1000/time_ms/(1<<30);
}
}
}
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Time(ms) spent in memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
{
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", time_taken[i*g.size()+j]);
}
printf("\n");
}
printf("bandwidth(Gbps) utilized during memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", bw[i*g.size()+j]);
printf("\n");
}
}
void perf_analyze(size_t size)
{
vector<int*> buffer_s(g.size());
vector<int*> buffer_d(g.size());
configure(size, buffer_s, buffer_d);
// Cyclic
blocked_copy(size, buffer_s, buffer_d);
reset(size, buffer_s, buffer_d);
}
int main(int argc, char** argv)
{
// NVLink D<->D performance
size_t size = (1<<30);
if (!g.size())
{
int n;
printf("Using all 8 GPUs\n");
hipGetDeviceCount(&n);
for (int i=0; i<n; i++)
g.push_back(i);
}
//define size
perf_analyze(size);
return 0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <vector>
using namespace std;
const int GPUs[] = {0,1,2,3,4}; // If left blank all available GPUs will be used.
vector<int> g(GPUs, GPUs + sizeof(GPUs)/sizeof(int));
void configure(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
hipSetDevice(g[i]);
hipMalloc(&buffer_s[i], size);
hipMalloc(&buffer_d[i], size);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
hipDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
hipSetDevice(g[i]);
hipDeviceEnablePeerAccess(g[j], 0);
hipDeviceSynchronize();
hipSetDevice(g[j]);
hipDeviceEnablePeerAccess(g[i], 0);
hipDeviceSynchronize();
}
}
}
}
}
void reset(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
hipSetDevice(g[i]);
hipFree(buffer_s[i]);
hipFree(buffer_d[i]);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
hipDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
hipSetDevice(g[i]);
hipDeviceDisablePeerAccess(g[j]);
hipDeviceSynchronize();
hipSetDevice(g[j]);
hipDeviceDisablePeerAccess(g[i]);
hipDeviceSynchronize();
}
}
}
}
}
void blocked_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
float time_taken[g.size()*g.size()], bw[g.size()*g.size()];
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
printf("\nBlocked Memory Transfers: Only one memory transfer at a time\n");
for (int i=0; i<g.size(); i++)
{
for (int j=0; j<g.size(); j++)
{
if (i!=j)
{
printf("Copying from %d to %d\n", g[i], g[j]);
hipEventRecord(start);
hipMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size);
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
float time_ms=0.0;
hipEventElapsedTime(&time_ms, start, stop);
time_taken[i*g.size()+j] = time_ms*1e3;
bw[i*g.size()+j] = (float)size*1000/time_ms/(1<<30);
}
}
}
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Time(ms) spent in memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
{
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", time_taken[i*g.size()+j]);
}
printf("\n");
}
printf("bandwidth(Gbps) utilized during memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", bw[i*g.size()+j]);
printf("\n");
}
}
void perf_analyze(size_t size)
{
vector<int*> buffer_s(g.size());
vector<int*> buffer_d(g.size());
configure(size, buffer_s, buffer_d);
// Cyclic
blocked_copy(size, buffer_s, buffer_d);
reset(size, buffer_s, buffer_d);
}
int main(int argc, char** argv)
{
// NVLink D<->D performance
size_t size = (1<<30);
if (!g.size())
{
int n;
printf("Using all 8 GPUs\n");
hipGetDeviceCount(&n);
for (int i=0; i<n; i++)
g.push_back(i);
}
//define size
perf_analyze(size);
return 0;
} | .text
.file "nvlink_blocked.hip"
.section .text._ZNSt6vectorIiSaIiEED2Ev,"axG",@progbits,_ZNSt6vectorIiSaIiEED2Ev,comdat
.weak _ZNSt6vectorIiSaIiEED2Ev # -- Begin function _ZNSt6vectorIiSaIiEED2Ev
.p2align 4, 0x90
.type _ZNSt6vectorIiSaIiEED2Ev,@function
_ZNSt6vectorIiSaIiEED2Ev: # @_ZNSt6vectorIiSaIiEED2Ev
.cfi_startproc
# %bb.0:
movq (%rdi), %rdi
testq %rdi, %rdi
jne _ZdlPv # TAILCALL
# %bb.1: # %_ZNSt12_Vector_baseIiSaIiEED2Ev.exit
retq
.Lfunc_end0:
.size _ZNSt6vectorIiSaIiEED2Ev, .Lfunc_end0-_ZNSt6vectorIiSaIiEED2Ev
.cfi_endproc
# -- End function
.text
.globl _Z9configuremRSt6vectorIPiSaIS0_EES3_ # -- Begin function _Z9configuremRSt6vectorIPiSaIS0_EES3_
.p2align 4, 0x90
.type _Z9configuremRSt6vectorIPiSaIS0_EES3_,@function
_Z9configuremRSt6vectorIPiSaIS0_EES3_: # @_Z9configuremRSt6vectorIPiSaIS0_EES3_
.cfi_startproc
# %bb.0:
movq g(%rip), %rax
cmpq %rax, g+8(%rip)
je .LBB1_10
# %bb.1: # %.lr.ph25.preheader
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
xorl %ebp, %ebp
leaq 4(%rsp), %r12
jmp .LBB1_2
.p2align 4, 0x90
.LBB1_8: # %._crit_edge
# in Loop: Header=BB1_2 Depth=1
incq %rbp
movq g+8(%rip), %rcx
movq g(%rip), %rax
subq %rax, %rcx
sarq $2, %rcx
cmpq %rbp, %rcx
jbe .LBB1_9
.LBB1_2: # %.lr.ph25
# =>This Loop Header: Depth=1
# Child Loop BB1_4 Depth 2
movl (%rax,%rbp,4), %edi
callq hipSetDevice
leaq (,%rbp,8), %r13
movq (%r14), %rdi
addq %r13, %rdi
movq %r15, %rsi
callq hipMalloc
addq (%rbx), %r13
movq %r13, %rdi
movq %r15, %rsi
callq hipMalloc
movq g(%rip), %rax
cmpq %rax, g+8(%rip)
je .LBB1_8
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB1_2 Depth=1
xorl %r13d, %r13d
jmp .LBB1_4
.p2align 4, 0x90
.LBB1_7: # in Loop: Header=BB1_4 Depth=2
incq %r13
movq g+8(%rip), %rcx
movq g(%rip), %rax
subq %rax, %rcx
sarq $2, %rcx
cmpq %r13, %rcx
jbe .LBB1_8
.LBB1_4: # %.lr.ph
# Parent Loop BB1_2 Depth=1
# => This Inner Loop Header: Depth=2
cmpq %r13, %rbp
je .LBB1_7
# %bb.5: # in Loop: Header=BB1_4 Depth=2
movl (%rax,%rbp,4), %esi
movl (%rax,%r13,4), %edx
movq %r12, %rdi
callq hipDeviceCanAccessPeer
cmpl $0, 4(%rsp)
je .LBB1_7
# %bb.6: # in Loop: Header=BB1_4 Depth=2
movq g(%rip), %rax
movl (%rax,%rbp,4), %edi
callq hipSetDevice
movq g(%rip), %rax
movl (%rax,%r13,4), %edi
xorl %esi, %esi
callq hipDeviceEnablePeerAccess
callq hipDeviceSynchronize
movq g(%rip), %rax
movl (%rax,%r13,4), %edi
callq hipSetDevice
movq g(%rip), %rax
movl (%rax,%rbp,4), %edi
xorl %esi, %esi
callq hipDeviceEnablePeerAccess
callq hipDeviceSynchronize
jmp .LBB1_7
.LBB1_9:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.LBB1_10: # %._crit_edge26
retq
.Lfunc_end1:
.size _Z9configuremRSt6vectorIPiSaIS0_EES3_, .Lfunc_end1-_Z9configuremRSt6vectorIPiSaIS0_EES3_
.cfi_endproc
# -- End function
.globl _Z5resetmRSt6vectorIPiSaIS0_EES3_ # -- Begin function _Z5resetmRSt6vectorIPiSaIS0_EES3_
.p2align 4, 0x90
.type _Z5resetmRSt6vectorIPiSaIS0_EES3_,@function
_Z5resetmRSt6vectorIPiSaIS0_EES3_: # @_Z5resetmRSt6vectorIPiSaIS0_EES3_
.cfi_startproc
# %bb.0:
movq g(%rip), %rax
cmpq %rax, g+8(%rip)
je .LBB2_10
# %bb.1: # %.lr.ph23.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $16, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdx, %rbx
movq %rsi, %r14
xorl %r12d, %r12d
leaq 12(%rsp), %r15
jmp .LBB2_2
.p2align 4, 0x90
.LBB2_8: # %._crit_edge
# in Loop: Header=BB2_2 Depth=1
incq %r12
movq g+8(%rip), %rcx
movq g(%rip), %rax
subq %rax, %rcx
sarq $2, %rcx
cmpq %r12, %rcx
jbe .LBB2_9
.LBB2_2: # %.lr.ph23
# =>This Loop Header: Depth=1
# Child Loop BB2_4 Depth 2
movl (%rax,%r12,4), %edi
callq hipSetDevice
movq (%r14), %rax
movq (%rax,%r12,8), %rdi
callq hipFree
movq (%rbx), %rax
movq (%rax,%r12,8), %rdi
callq hipFree
movq g(%rip), %rax
cmpq %rax, g+8(%rip)
je .LBB2_8
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB2_2 Depth=1
xorl %r13d, %r13d
jmp .LBB2_4
.p2align 4, 0x90
.LBB2_7: # in Loop: Header=BB2_4 Depth=2
incq %r13
movq g+8(%rip), %rcx
movq g(%rip), %rax
subq %rax, %rcx
sarq $2, %rcx
cmpq %r13, %rcx
jbe .LBB2_8
.LBB2_4: # %.lr.ph
# Parent Loop BB2_2 Depth=1
# => This Inner Loop Header: Depth=2
cmpq %r13, %r12
je .LBB2_7
# %bb.5: # in Loop: Header=BB2_4 Depth=2
movl (%rax,%r12,4), %esi
movl (%rax,%r13,4), %edx
movq %r15, %rdi
callq hipDeviceCanAccessPeer
cmpl $0, 12(%rsp)
je .LBB2_7
# %bb.6: # in Loop: Header=BB2_4 Depth=2
movq g(%rip), %rax
movl (%rax,%r12,4), %edi
callq hipSetDevice
movq g(%rip), %rax
movl (%rax,%r13,4), %edi
callq hipDeviceDisablePeerAccess
callq hipDeviceSynchronize
movq g(%rip), %rax
movl (%rax,%r13,4), %edi
callq hipSetDevice
movq g(%rip), %rax
movl (%rax,%r12,4), %edi
callq hipDeviceDisablePeerAccess
callq hipDeviceSynchronize
jmp .LBB2_7
.LBB2_9:
addq $16, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.LBB2_10: # %._crit_edge24
retq
.Lfunc_end2:
.size _Z5resetmRSt6vectorIPiSaIS0_EES3_, .Lfunc_end2-_Z5resetmRSt6vectorIPiSaIS0_EES3_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z12blocked_copymRSt6vectorIPiSaIS0_EES3_
.LCPI3_0:
.long 0x447a0000 # float 1000
.LCPI3_1:
.long 0x30800000 # float 9.31322574E-10
.text
.globl _Z12blocked_copymRSt6vectorIPiSaIS0_EES3_
.p2align 4, 0x90
.type _Z12blocked_copymRSt6vectorIPiSaIS0_EES3_,@function
_Z12blocked_copymRSt6vectorIPiSaIS0_EES3_: # @_Z12blocked_copymRSt6vectorIPiSaIS0_EES3_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $56, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, -96(%rbp) # 8-byte Spill
movq %rsi, -88(%rbp) # 8-byte Spill
movq %rdi, %r15
movq g+8(%rip), %rax
subq g(%rip), %rax
sarq $2, %rax
imulq %rax, %rax
movq %rsp, %r12
leaq 15(,%rax,4), %rax
andq $-16, %rax
subq %rax, %r12
movq %r12, %rsp
movq %rsp, %rcx
subq %rax, %rcx
movq %rcx, -80(%rbp) # 8-byte Spill
movq %rcx, %rsp
leaq -64(%rbp), %rdi
callq hipEventCreate
leaq -56(%rbp), %rdi
callq hipEventCreate
movl $.Lstr, %edi
callq puts@PLT
movq g+8(%rip), %rax
cmpq g(%rip), %rax
je .LBB3_11
# %bb.1: # %.preheader.lr.ph
testq %r15, %r15
js .LBB3_2
# %bb.3: # %.preheader.lr.ph
cvtsi2ss %r15, %xmm2
jmp .LBB3_4
.LBB3_2:
movq %r15, %rax
shrq %rax
movl %r15d, %ecx
andl $1, %ecx
orq %rax, %rcx
cvtsi2ss %rcx, %xmm2
addss %xmm2, %xmm2
.LBB3_4: # %.preheader.lr.ph
mulss .LCPI3_0(%rip), %xmm2
xorl %r13d, %r13d
xorl %ebx, %ebx
movss %xmm2, -68(%rbp) # 4-byte Spill
jmp .LBB3_5
.p2align 4, 0x90
.LBB3_10: # %._crit_edge
# in Loop: Header=BB3_5 Depth=1
movq g+8(%rip), %rax
subq g(%rip), %rax
incq %rbx
sarq $2, %rax
addq $4, %r13
cmpq %rbx, %rax
jbe .LBB3_11
.LBB3_5: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_7 Depth 2
movq g(%rip), %rax
cmpq %rax, g+8(%rip)
je .LBB3_10
# %bb.6: # %.lr.ph.preheader
# in Loop: Header=BB3_5 Depth=1
xorl %r14d, %r14d
jmp .LBB3_7
.p2align 4, 0x90
.LBB3_9: # in Loop: Header=BB3_7 Depth=2
incq %r14
movq g+8(%rip), %rcx
movq g(%rip), %rax
subq %rax, %rcx
sarq $2, %rcx
cmpq %r14, %rcx
jbe .LBB3_10
.LBB3_7: # %.lr.ph
# Parent Loop BB3_5 Depth=1
# => This Inner Loop Header: Depth=2
cmpq %r14, %rbx
je .LBB3_9
# %bb.8: # in Loop: Header=BB3_7 Depth=2
movl (%rax,%rbx,4), %esi
movl (%rax,%r14,4), %edx
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movq -64(%rbp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq -88(%rbp), %rax # 8-byte Reload
movq (%rax), %rax
movq (%rax,%rbx,8), %rdi
movq g(%rip), %rax
movl (%rax,%rbx,4), %esi
movq -96(%rbp), %rcx # 8-byte Reload
movq (%rcx), %rcx
movq (%rcx,%r14,8), %rdx
movl (%rax,%r14,4), %ecx
movq %r15, %r8
xorl %r9d, %r9d
callq hipMemcpyPeerAsync
movq -56(%rbp), %rdi
xorl %esi, %esi
callq hipEventRecord
callq hipDeviceSynchronize
movq -56(%rbp), %rdi
callq hipEventSynchronize
movl $0, -44(%rbp)
movq -64(%rbp), %rsi
movq -56(%rbp), %rdx
leaq -44(%rbp), %rdi
callq hipEventElapsedTime
movss -68(%rbp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
movss -44(%rbp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss %xmm0, %xmm1
movq g+8(%rip), %rax
subq g(%rip), %rax
mulss .LCPI3_0(%rip), %xmm0
sarq $2, %rax
imulq %r13, %rax
leaq (%r12,%rax), %rcx
movss %xmm0, (%rcx,%r14,4)
mulss .LCPI3_1(%rip), %xmm1
addq -80(%rbp), %rax # 8-byte Folded Reload
movss %xmm1, (%rax,%r14,4)
jmp .LBB3_9
.LBB3_11: # %._crit_edge66
movq -64(%rbp), %rdi
callq hipEventDestroy
movq -56(%rbp), %rdi
callq hipEventDestroy
movl $.Lstr.1, %edi
callq puts@PLT
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movq g(%rip), %rax
cmpq %rax, g+8(%rip)
je .LBB3_14
# %bb.12: # %.lr.ph69.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB3_13: # %.lr.ph69
# =>This Inner Loop Header: Depth=1
movl (%rax,%rbx,4), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
incq %rbx
movq g+8(%rip), %rcx
movq g(%rip), %rax
subq %rax, %rcx
sarq $2, %rcx
cmpq %rbx, %rcx
ja .LBB3_13
.LBB3_14: # %._crit_edge70
movl $10, %edi
callq putchar@PLT
movq g(%rip), %rax
cmpq %rax, g+8(%rip)
je .LBB3_22
# %bb.15: # %.lr.ph77.preheader
xorl %ebx, %ebx
xorl %r14d, %r14d
jmp .LBB3_16
.p2align 4, 0x90
.LBB3_21: # %._crit_edge74
# in Loop: Header=BB3_16 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
movq g+8(%rip), %rcx
movq g(%rip), %rax
subq %rax, %rcx
sarq $2, %rcx
addq $4, %rbx
cmpq %r14, %rcx
jbe .LBB3_22
.LBB3_16: # %.lr.ph77
# =>This Loop Header: Depth=1
# Child Loop BB3_18 Depth 2
movl (%rax,%r14,4), %esi
movl $.L.str.6, %edi
xorl %eax, %eax
callq printf
movq g+8(%rip), %rax
subq g(%rip), %rax
je .LBB3_21
# %bb.17: # %.lr.ph73.preheader
# in Loop: Header=BB3_16 Depth=1
sarq $2, %rax
xorl %r15d, %r15d
jmp .LBB3_18
.p2align 4, 0x90
.LBB3_20: # in Loop: Header=BB3_18 Depth=2
movl $.L.str.7, %edi
movb $1, %al
callq printf
incq %r15
movq g+8(%rip), %rax
subq g(%rip), %rax
sarq $2, %rax
cmpq %r15, %rax
jbe .LBB3_21
.LBB3_18: # %.lr.ph73
# Parent Loop BB3_16 Depth=1
# => This Inner Loop Header: Depth=2
xorps %xmm0, %xmm0
cmpq %r15, %r14
je .LBB3_20
# %bb.19: # in Loop: Header=BB3_18 Depth=2
imulq %rbx, %rax
addq %r12, %rax
movss (%rax,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
jmp .LBB3_20
.LBB3_22: # %._crit_edge78
movl $.Lstr.2, %edi
callq puts@PLT
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movq g(%rip), %rax
cmpq %rax, g+8(%rip)
je .LBB3_25
# %bb.23: # %.lr.ph81.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB3_24: # %.lr.ph81
# =>This Inner Loop Header: Depth=1
movl (%rax,%rbx,4), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
incq %rbx
movq g+8(%rip), %rcx
movq g(%rip), %rax
subq %rax, %rcx
sarq $2, %rcx
cmpq %rbx, %rcx
ja .LBB3_24
.LBB3_25: # %._crit_edge82
movl $10, %edi
callq putchar@PLT
movq g(%rip), %rax
cmpq %rax, g+8(%rip)
je .LBB3_33
# %bb.26: # %.lr.ph89.preheader
xorl %ebx, %ebx
xorl %r14d, %r14d
jmp .LBB3_27
.p2align 4, 0x90
.LBB3_32: # %._crit_edge86
# in Loop: Header=BB3_27 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
movq g+8(%rip), %rcx
movq g(%rip), %rax
subq %rax, %rcx
sarq $2, %rcx
addq $4, %rbx
cmpq %r14, %rcx
jbe .LBB3_33
.LBB3_27: # %.lr.ph89
# =>This Loop Header: Depth=1
# Child Loop BB3_29 Depth 2
movl (%rax,%r14,4), %esi
movl $.L.str.6, %edi
xorl %eax, %eax
callq printf
movq g+8(%rip), %rax
subq g(%rip), %rax
je .LBB3_32
# %bb.28: # %.lr.ph85.preheader
# in Loop: Header=BB3_27 Depth=1
sarq $2, %rax
xorl %r15d, %r15d
jmp .LBB3_29
.p2align 4, 0x90
.LBB3_31: # in Loop: Header=BB3_29 Depth=2
movl $.L.str.7, %edi
movb $1, %al
callq printf
incq %r15
movq g+8(%rip), %rax
subq g(%rip), %rax
sarq $2, %rax
cmpq %r15, %rax
jbe .LBB3_32
.LBB3_29: # %.lr.ph85
# Parent Loop BB3_27 Depth=1
# => This Inner Loop Header: Depth=2
xorps %xmm0, %xmm0
cmpq %r15, %r14
je .LBB3_31
# %bb.30: # in Loop: Header=BB3_29 Depth=2
imulq %rbx, %rax
addq -80(%rbp), %rax # 8-byte Folded Reload
movss (%rax,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
jmp .LBB3_31
.LBB3_33: # %._crit_edge90
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_def_cfa %rsp, 8
retq
.Lfunc_end3:
.size _Z12blocked_copymRSt6vectorIPiSaIS0_EES3_, .Lfunc_end3-_Z12blocked_copymRSt6vectorIPiSaIS0_EES3_
.cfi_endproc
# -- End function
.globl _Z12perf_analyzem # -- Begin function _Z12perf_analyzem
.p2align 4, 0x90
.type _Z12perf_analyzem,@function
_Z12perf_analyzem: # @_Z12perf_analyzem
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $72, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, 64(%rsp) # 8-byte Spill
movq g+8(%rip), %r13
movq g(%rip), %rbp
movq %r13, %r12
subq %rbp, %r12
movq %r12, %r14
sarq $2, %r14
movq %r14, %rax
shrq $60, %rax
jne .LBB4_29
# %bb.1: # %_ZNSt6vectorIPiSaIS0_EE17_S_check_init_lenEmRKS1_.exit.i
xorps %xmm0, %xmm0
movaps %xmm0, 32(%rsp)
movq $0, 48(%rsp)
cmpq %rbp, %r13
je .LBB4_2
# %bb.3: # %_ZNSt16allocator_traitsISaIPiEE8allocateERS1_m.exit.i.i.i.i
leaq (%r12,%r12), %rdi
callq _Znwm
movq %rax, %rbx
jmp .LBB4_4
.LBB4_2:
xorl %ebx, %ebx
.LBB4_4: # %_ZNSt12_Vector_baseIPiSaIS0_EEC2EmRKS1_.exit.i
movq %rbx, 32(%rsp)
movq %rbx, 40(%rsp)
leaq (%rbx,%r14,8), %r15
movq %r15, 48(%rsp)
cmpq %rbp, %r13
je .LBB4_11
# %bb.5:
movq $0, (%rbx)
leaq 8(%rbx), %rdi
cmpq $4, %r12
jne .LBB4_7
# %bb.6:
movq %rdi, %r15
jmp .LBB4_8
.LBB4_11: # %_ZNSt12_Vector_baseIPiSaIS0_EEC2EmRKS1_.exit.i12.critedge
movq %rbx, 40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movq $0, 16(%rsp)
jmp .LBB4_12
.LBB4_7: # %_ZSt6fill_nIPPimS0_ET_S2_T0_RKT1_.exit.loopexit.i.i.i.i.i
leaq -8(,%r12,2), %rdx
xorl %esi, %esi
callq memset@PLT
.LBB4_8: # %_ZNSt6vectorIPiSaIS0_EEC2EmRKS1_.exit
movq %r15, 40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movq $0, 16(%rsp)
cmpq %rbp, %r13
je .LBB4_12
# %bb.9: # %_ZNSt16allocator_traitsISaIPiEE8allocateERS1_m.exit.i.i.i.i11
leaq (%r12,%r12), %rdi
.Ltmp0:
callq _Znwm
.Ltmp1:
# %bb.10:
movq %rax, %r15
jmp .LBB4_13
.LBB4_12: # %_ZNSt12_Vector_baseIPiSaIS0_EEC2EmRKS1_.exit.i12
xorl %r15d, %r15d
.LBB4_13: # %_ZNSt12_Vector_baseIPiSaIS0_EEC2EmRKS1_.exit.i12
movq %r15, (%rsp)
movq %r15, 8(%rsp)
leaq (%r15,%r14,8), %r14
movq %r14, 16(%rsp)
movq %r15, %rdi
cmpq %rbp, %r13
je .LBB4_16
# %bb.14:
movq $0, (%r15)
leaq 8(%r15), %rdi
cmpq $4, %r12
je .LBB4_16
# %bb.15: # %_ZSt6fill_nIPPimS0_ET_S2_T0_RKT1_.exit.loopexit.i.i.i.i.i13
leaq -8(,%r12,2), %rdx
xorl %esi, %esi
callq memset@PLT
movq %r14, %rdi
.LBB4_16: # %_ZNSt6vectorIPiSaIS0_EEC2EmRKS1_.exit17
movq %rdi, 8(%rsp)
.Ltmp3:
leaq 32(%rsp), %rsi
movq %rsp, %rdx
movq 64(%rsp), %r14 # 8-byte Reload
movq %r14, %rdi
callq _Z9configuremRSt6vectorIPiSaIS0_EES3_
.Ltmp4:
# %bb.17:
.Ltmp5:
leaq 32(%rsp), %rsi
movq %rsp, %rdx
movq %r14, %rdi
callq _Z12blocked_copymRSt6vectorIPiSaIS0_EES3_
.Ltmp6:
# %bb.18:
.Ltmp7:
leaq 32(%rsp), %rsi
movq %rsp, %rdx
callq _Z5resetmRSt6vectorIPiSaIS0_EES3_
.Ltmp8:
# %bb.19:
testq %r15, %r15
je .LBB4_21
# %bb.20:
movq %r15, %rdi
callq _ZdlPv
.LBB4_21: # %_ZNSt6vectorIPiSaIS0_EED2Ev.exit
testq %rbx, %rbx
je .LBB4_22
# %bb.30:
movq %rbx, %rdi
addq $72, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp _ZdlPv # TAILCALL
.LBB4_22: # %_ZNSt6vectorIPiSaIS0_EED2Ev.exit19
.cfi_def_cfa_offset 128
addq $72, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB4_29: # %.noexc
.cfi_def_cfa_offset 128
movl $.L.str.10, %edi
callq _ZSt20__throw_length_errorPKc
.LBB4_23:
.Ltmp2:
movq %rax, %r14
testq %rbx, %rbx
je .LBB4_28
jmp .LBB4_27
.LBB4_24:
.Ltmp9:
movq %rax, %r14
testq %r15, %r15
jne .LBB4_25
# %bb.26: # %_ZNSt6vectorIPiSaIS0_EED2Ev.exit21
testq %rbx, %rbx
jne .LBB4_27
.LBB4_28: # %_ZNSt6vectorIPiSaIS0_EED2Ev.exit23
movq %r14, %rdi
callq _Unwind_Resume@PLT
.LBB4_25:
movq %r15, %rdi
callq _ZdlPv
testq %rbx, %rbx
je .LBB4_28
.LBB4_27:
movq %rbx, %rdi
callq _ZdlPv
movq %r14, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end4:
.size _Z12perf_analyzem, .Lfunc_end4-_Z12perf_analyzem
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table4:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1
.uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2
.byte 0 # On action: cleanup
.uleb128 .Ltmp1-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp3-.Ltmp1 # Call between .Ltmp1 and .Ltmp3
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp8-.Ltmp3 # Call between .Ltmp3 and .Ltmp8
.uleb128 .Ltmp9-.Lfunc_begin0 # jumps to .Ltmp9
.byte 0 # On action: cleanup
.uleb128 .Ltmp8-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Lfunc_end4-.Ltmp8 # Call between .Ltmp8 and .Lfunc_end4
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq g+8(%rip), %rax
cmpq g(%rip), %rax
je .LBB5_1
.LBB5_15:
movl $1073741824, %edi # imm = 0x40000000
callq _Z12perf_analyzem
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB5_1:
.cfi_def_cfa_offset 64
movl $.Lstr.3, %edi
callq puts@PLT
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 4(%rsp)
jle .LBB5_15
# %bb.2: # %.lr.ph.preheader
xorl %r12d, %r12d
jmp .LBB5_3
.p2align 4, 0x90
.LBB5_4: # in Loop: Header=BB5_3 Depth=1
movl %r12d, (%rbx)
addq $4, %rbx
movq %rbx, g+8(%rip)
.LBB5_14: # %_ZNSt6vectorIiSaIiEE9push_backERKi.exit
# in Loop: Header=BB5_3 Depth=1
incl %r12d
cmpl 4(%rsp), %r12d
jge .LBB5_15
.LBB5_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq g+8(%rip), %rbx
cmpq g+16(%rip), %rbx
jne .LBB5_4
# %bb.5: # in Loop: Header=BB5_3 Depth=1
movq g(%rip), %r14
subq %r14, %rbx
movabsq $9223372036854775804, %rax # imm = 0x7FFFFFFFFFFFFFFC
cmpq %rax, %rbx
je .LBB5_16
# %bb.6: # %_ZNKSt6vectorIiSaIiEE12_M_check_lenEmPKc.exit.i.i
# in Loop: Header=BB5_3 Depth=1
movq %rbx, %rbp
sarq $2, %rbp
cmpq $1, %rbp
movq %rbp, %rax
adcq $0, %rax
leaq (%rax,%rbp), %r13
movabsq $2305843009213693951, %rcx # imm = 0x1FFFFFFFFFFFFFFF
cmpq %rcx, %r13
cmovaeq %rcx, %r13
addq %rbp, %rax
cmovbq %rcx, %r13
testq %r13, %r13
je .LBB5_7
# %bb.8: # in Loop: Header=BB5_3 Depth=1
leaq (,%r13,4), %rdi
callq _Znwm
movq %rax, %r15
jmp .LBB5_9
.LBB5_7: # in Loop: Header=BB5_3 Depth=1
xorl %r15d, %r15d
.LBB5_9: # %_ZNSt12_Vector_baseIiSaIiEE11_M_allocateEm.exit.i.i
# in Loop: Header=BB5_3 Depth=1
movl %r12d, (%r15,%rbp,4)
testq %rbx, %rbx
jle .LBB5_11
# %bb.10: # in Loop: Header=BB5_3 Depth=1
movq %r15, %rdi
movq %r14, %rsi
movq %rbx, %rdx
callq memmove@PLT
.LBB5_11: # %_ZNSt6vectorIiSaIiEE11_S_relocateEPiS2_S2_RS0_.exit.i.i
# in Loop: Header=BB5_3 Depth=1
testq %r14, %r14
je .LBB5_13
# %bb.12: # in Loop: Header=BB5_3 Depth=1
movq %r14, %rdi
callq _ZdlPv
.LBB5_13: # %_ZNSt6vectorIiSaIiEE17_M_realloc_insertIJRKiEEEvN9__gnu_cxx17__normal_iteratorIPiS1_EEDpOT_.exit.i
# in Loop: Header=BB5_3 Depth=1
leaq (%r15,%rbx), %rax
addq $4, %rax
movq %r15, g(%rip)
movq %rax, g+8(%rip)
leaq (%r15,%r13,4), %rax
movq %rax, g+16(%rip)
jmp .LBB5_14
.LBB5_16:
movl $.L.str.11, %edi
callq _ZSt20__throw_length_errorPKc
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.section .text.startup,"ax",@progbits
.p2align 4, 0x90 # -- Begin function _GLOBAL__sub_I_nvlink_blocked.hip
.type _GLOBAL__sub_I_nvlink_blocked.hip,@function
_GLOBAL__sub_I_nvlink_blocked.hip: # @_GLOBAL__sub_I_nvlink_blocked.hip
.Lfunc_begin1:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception1
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
xorps %xmm0, %xmm0
movups %xmm0, g(%rip)
movq $0, g+16(%rip)
.Ltmp10:
movl $20, %edi
callq _Znwm
.Ltmp11:
# %bb.1: # %__cxx_global_var_init.exit
movq %rax, g(%rip)
movq %rax, %rcx
addq $20, %rcx
movq %rcx, g+16(%rip)
movaps _ZL4GPUs(%rip), %xmm0
movups %xmm0, (%rax)
movl $4, 16(%rax)
movq %rcx, g+8(%rip)
movl $_ZNSt6vectorIiSaIiEED2Ev, %edi
movl $g, %esi
movl $__dso_handle, %edx
popq %rbx
.cfi_def_cfa_offset 8
jmp __cxa_atexit # TAILCALL
.LBB6_2:
.cfi_def_cfa_offset 16
.Ltmp12:
movq %rax, %rbx
movq g(%rip), %rdi
testq %rdi, %rdi
je .LBB6_4
# %bb.3:
callq _ZdlPv
.LBB6_4: # %.body.i
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end6:
.size _GLOBAL__sub_I_nvlink_blocked.hip, .Lfunc_end6-_GLOBAL__sub_I_nvlink_blocked.hip
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table6:
.Lexception1:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end1-.Lcst_begin1
.Lcst_begin1:
.uleb128 .Ltmp10-.Lfunc_begin1 # >> Call Site 1 <<
.uleb128 .Ltmp11-.Ltmp10 # Call between .Ltmp10 and .Ltmp11
.uleb128 .Ltmp12-.Lfunc_begin1 # jumps to .Ltmp12
.byte 0 # On action: cleanup
.uleb128 .Ltmp11-.Lfunc_begin1 # >> Call Site 2 <<
.uleb128 .Lfunc_end6-.Ltmp11 # Call between .Ltmp11 and .Lfunc_end6
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end1:
.p2align 2, 0x0
# -- End function
.type g,@object # @g
.bss
.globl g
.p2align 3, 0x0
g:
.zero 24
.size g, 24
.type _ZL4GPUs,@object # @_ZL4GPUs
.section .rodata,"a",@progbits
.p2align 4, 0x0
_ZL4GPUs:
.long 0 # 0x0
.long 1 # 0x1
.long 2 # 0x2
.long 3 # 0x3
.long 4 # 0x4
.size _ZL4GPUs, 20
.hidden __dso_handle
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Copying from %d to %d\n"
.size .L.str.1, 23
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz " D\\D"
.size .L.str.3, 7
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "%10d "
.size .L.str.4, 6
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "%6d"
.size .L.str.6, 4
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%12.4f"
.size .L.str.7, 7
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "cannot create std::vector larger than max_size()"
.size .L.str.10, 49
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "vector::_M_realloc_insert"
.size .L.str.11, 26
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad _GLOBAL__sub_I_nvlink_blocked.hip
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "\nBlocked Memory Transfers: Only one memory transfer at a time"
.size .Lstr, 62
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Time(ms) spent in memcpy"
.size .Lstr.1, 25
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "bandwidth(Gbps) utilized during memcpy"
.size .Lstr.2, 39
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Using all 8 GPUs"
.size .Lstr.3, 17
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __gxx_personality_v0
.addrsig_sym _GLOBAL__sub_I_nvlink_blocked.hip
.addrsig_sym _Unwind_Resume
.addrsig_sym g
.addrsig_sym __dso_handle
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void setPriorAtLast ( const int dim, const int nwl, const float *lst, float *prr ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
prr[i] = lst[dim+3+i*(dim+1+1+1+1)];
}
} | code for sm_80
Function : _Z14setPriorAtLastiiPKfPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R4, R3, c[0x0][0x0], R4 ; /* 0x0000000003047a24 */
/* 0x001fca00078e0204 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x164], PT ; /* 0x0000590004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ MOV R3, c[0x0][0x160] ; /* 0x0000580000037a02 */
/* 0x000fe20000000f00 */
/*0070*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0090*/ IADD3 R3, R3, 0x4, RZ ; /* 0x0000000403037810 */
/* 0x000fca0007ffe0ff */
/*00a0*/ IMAD R3, R4, R3, c[0x0][0x160] ; /* 0x0000580004037624 */
/* 0x000fc800078e0203 */
/*00b0*/ IMAD.WIDE R2, R3, R5, c[0x0][0x168] ; /* 0x00005a0003027625 */
/* 0x000fcc00078e0205 */
/*00c0*/ LDG.E R3, [R2.64+0xc] ; /* 0x00000c0402037981 */
/* 0x000ea2000c1e1900 */
/*00d0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fca00078e0205 */
/*00e0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void setPriorAtLast ( const int dim, const int nwl, const float *lst, float *prr ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
prr[i] = lst[dim+3+i*(dim+1+1+1+1)];
}
} | .file "tmpxft_0003b8a8_00000000-6_setPriorAtLast.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z39__device_stub__Z14setPriorAtLastiiPKfPfiiPKfPf
.type _Z39__device_stub__Z14setPriorAtLastiiPKfPfiiPKfPf, @function
_Z39__device_stub__Z14setPriorAtLastiiPKfPfiiPKfPf:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14setPriorAtLastiiPKfPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z39__device_stub__Z14setPriorAtLastiiPKfPfiiPKfPf, .-_Z39__device_stub__Z14setPriorAtLastiiPKfPfiiPKfPf
.globl _Z14setPriorAtLastiiPKfPf
.type _Z14setPriorAtLastiiPKfPf, @function
_Z14setPriorAtLastiiPKfPf:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z14setPriorAtLastiiPKfPfiiPKfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z14setPriorAtLastiiPKfPf, .-_Z14setPriorAtLastiiPKfPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z14setPriorAtLastiiPKfPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14setPriorAtLastiiPKfPf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void setPriorAtLast ( const int dim, const int nwl, const float *lst, float *prr ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
prr[i] = lst[dim+3+i*(dim+1+1+1+1)];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void setPriorAtLast ( const int dim, const int nwl, const float *lst, float *prr ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
prr[i] = lst[dim+3+i*(dim+1+1+1+1)];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void setPriorAtLast ( const int dim, const int nwl, const float *lst, float *prr ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
prr[i] = lst[dim+3+i*(dim+1+1+1+1)];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14setPriorAtLastiiPKfPf
.globl _Z14setPriorAtLastiiPKfPf
.p2align 8
.type _Z14setPriorAtLastiiPKfPf,@function
_Z14setPriorAtLastiiPKfPf:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x4
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b32 s4, s[0:1], 0x0
s_load_b128 s[0:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_add_i32 s5, s4, 4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v1, s5
v_add3_u32 v2, s4, 3, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_load_b32 v3, v[2:3], off
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14setPriorAtLastiiPKfPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14setPriorAtLastiiPKfPf, .Lfunc_end0-_Z14setPriorAtLastiiPKfPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14setPriorAtLastiiPKfPf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14setPriorAtLastiiPKfPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void setPriorAtLast ( const int dim, const int nwl, const float *lst, float *prr ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
prr[i] = lst[dim+3+i*(dim+1+1+1+1)];
}
} | .text
.file "setPriorAtLast.hip"
.globl _Z29__device_stub__setPriorAtLastiiPKfPf # -- Begin function _Z29__device_stub__setPriorAtLastiiPKfPf
.p2align 4, 0x90
.type _Z29__device_stub__setPriorAtLastiiPKfPf,@function
_Z29__device_stub__setPriorAtLastiiPKfPf: # @_Z29__device_stub__setPriorAtLastiiPKfPf
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14setPriorAtLastiiPKfPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z29__device_stub__setPriorAtLastiiPKfPf, .Lfunc_end0-_Z29__device_stub__setPriorAtLastiiPKfPf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14setPriorAtLastiiPKfPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14setPriorAtLastiiPKfPf,@object # @_Z14setPriorAtLastiiPKfPf
.section .rodata,"a",@progbits
.globl _Z14setPriorAtLastiiPKfPf
.p2align 3, 0x0
_Z14setPriorAtLastiiPKfPf:
.quad _Z29__device_stub__setPriorAtLastiiPKfPf
.size _Z14setPriorAtLastiiPKfPf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z14setPriorAtLastiiPKfPf"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__setPriorAtLastiiPKfPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14setPriorAtLastiiPKfPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z14setPriorAtLastiiPKfPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R4, R3, c[0x0][0x0], R4 ; /* 0x0000000003047a24 */
/* 0x001fca00078e0204 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x164], PT ; /* 0x0000590004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ MOV R3, c[0x0][0x160] ; /* 0x0000580000037a02 */
/* 0x000fe20000000f00 */
/*0070*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0090*/ IADD3 R3, R3, 0x4, RZ ; /* 0x0000000403037810 */
/* 0x000fca0007ffe0ff */
/*00a0*/ IMAD R3, R4, R3, c[0x0][0x160] ; /* 0x0000580004037624 */
/* 0x000fc800078e0203 */
/*00b0*/ IMAD.WIDE R2, R3, R5, c[0x0][0x168] ; /* 0x00005a0003027625 */
/* 0x000fcc00078e0205 */
/*00c0*/ LDG.E R3, [R2.64+0xc] ; /* 0x00000c0402037981 */
/* 0x000ea2000c1e1900 */
/*00d0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fca00078e0205 */
/*00e0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14setPriorAtLastiiPKfPf
.globl _Z14setPriorAtLastiiPKfPf
.p2align 8
.type _Z14setPriorAtLastiiPKfPf,@function
_Z14setPriorAtLastiiPKfPf:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x4
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b32 s4, s[0:1], 0x0
s_load_b128 s[0:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_add_i32 s5, s4, 4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v1, s5
v_add3_u32 v2, s4, 3, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_load_b32 v3, v[2:3], off
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14setPriorAtLastiiPKfPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14setPriorAtLastiiPKfPf, .Lfunc_end0-_Z14setPriorAtLastiiPKfPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14setPriorAtLastiiPKfPf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14setPriorAtLastiiPKfPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0003b8a8_00000000-6_setPriorAtLast.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z39__device_stub__Z14setPriorAtLastiiPKfPfiiPKfPf
.type _Z39__device_stub__Z14setPriorAtLastiiPKfPfiiPKfPf, @function
_Z39__device_stub__Z14setPriorAtLastiiPKfPfiiPKfPf:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14setPriorAtLastiiPKfPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z39__device_stub__Z14setPriorAtLastiiPKfPfiiPKfPf, .-_Z39__device_stub__Z14setPriorAtLastiiPKfPfiiPKfPf
.globl _Z14setPriorAtLastiiPKfPf
.type _Z14setPriorAtLastiiPKfPf, @function
_Z14setPriorAtLastiiPKfPf:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z14setPriorAtLastiiPKfPfiiPKfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z14setPriorAtLastiiPKfPf, .-_Z14setPriorAtLastiiPKfPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z14setPriorAtLastiiPKfPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14setPriorAtLastiiPKfPf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "setPriorAtLast.hip"
.globl _Z29__device_stub__setPriorAtLastiiPKfPf # -- Begin function _Z29__device_stub__setPriorAtLastiiPKfPf
.p2align 4, 0x90
.type _Z29__device_stub__setPriorAtLastiiPKfPf,@function
_Z29__device_stub__setPriorAtLastiiPKfPf: # @_Z29__device_stub__setPriorAtLastiiPKfPf
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14setPriorAtLastiiPKfPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z29__device_stub__setPriorAtLastiiPKfPf, .Lfunc_end0-_Z29__device_stub__setPriorAtLastiiPKfPf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14setPriorAtLastiiPKfPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14setPriorAtLastiiPKfPf,@object # @_Z14setPriorAtLastiiPKfPf
.section .rodata,"a",@progbits
.globl _Z14setPriorAtLastiiPKfPf
.p2align 3, 0x0
_Z14setPriorAtLastiiPKfPf:
.quad _Z29__device_stub__setPriorAtLastiiPKfPf
.size _Z14setPriorAtLastiiPKfPf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z14setPriorAtLastiiPKfPf"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__setPriorAtLastiiPKfPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14setPriorAtLastiiPKfPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void copy_kernel_frombuf(char *dest, char *src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int size_x, int size_y, int size_z, int buf_strides_x, int buf_strides_y, int buf_strides_z, int type_size, int dim, int OPS_soa) {
int idx_z = rz_s + z_step * (blockDim.z * blockIdx.z + threadIdx.z);
int idx_y = ry_s + y_step * (blockDim.y * blockIdx.y + threadIdx.y);
int idx_x = rx_s + x_step * (blockDim.x * blockIdx.x + threadIdx.x);
if ((x_step == 1 ? idx_x < rx_e : idx_x > rx_e) &&
(y_step == 1 ? idx_y < ry_e : idx_y > ry_e) &&
(z_step == 1 ? idx_z < rz_e : idx_z > rz_e)) {
if (OPS_soa) dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size;
else dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size * dim;
src += ((idx_z - rz_s) * z_step * buf_strides_z +
(idx_y - ry_s) * y_step * buf_strides_y +
(idx_x - rx_s) * x_step * buf_strides_x) *
type_size * dim;
for (int d = 0; d < dim; d++) {
memcpy(dest, src + d*type_size, type_size);
if (OPS_soa) dest += size_x * size_y * size_z * type_size;
else dest += type_size;
}
}
} | .file "tmpxft_001b5adf_00000000-6_copy_kernel_frombuf.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z59__device_stub__Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiiiPcS_iiiiiiiiiiiiiiiiii
.type _Z59__device_stub__Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiiiPcS_iiiiiiiiiiiiiiiiii, @function
_Z59__device_stub__Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiiiPcS_iiiiiiiiiiiiiiiiii:
.LFB2051:
.cfi_startproc
endbr64
subq $280, %rsp
.cfi_def_cfa_offset 288
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 264(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
leaq 288(%rsp), %rax
movq %rax, 144(%rsp)
leaq 296(%rsp), %rax
movq %rax, 152(%rsp)
leaq 304(%rsp), %rax
movq %rax, 160(%rsp)
leaq 312(%rsp), %rax
movq %rax, 168(%rsp)
leaq 320(%rsp), %rax
movq %rax, 176(%rsp)
leaq 328(%rsp), %rax
movq %rax, 184(%rsp)
leaq 336(%rsp), %rax
movq %rax, 192(%rsp)
leaq 344(%rsp), %rax
movq %rax, 200(%rsp)
leaq 352(%rsp), %rax
movq %rax, 208(%rsp)
leaq 360(%rsp), %rax
movq %rax, 216(%rsp)
leaq 368(%rsp), %rax
movq %rax, 224(%rsp)
leaq 376(%rsp), %rax
movq %rax, 232(%rsp)
leaq 384(%rsp), %rax
movq %rax, 240(%rsp)
leaq 392(%rsp), %rax
movq %rax, 248(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 264(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $280, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 296
pushq 40(%rsp)
.cfi_def_cfa_offset 304
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 288
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z59__device_stub__Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiiiPcS_iiiiiiiiiiiiiiiiii, .-_Z59__device_stub__Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiiiPcS_iiiiiiiiiiiiiiiiii
.globl _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.type _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, @function
_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 64
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 72
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 80
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 88
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 96
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 104
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 112
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 120
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 128
call _Z59__device_stub__Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiiiPcS_iiiiiiiiiiiiiiiiii
addq $120, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, .-_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void copy_kernel_frombuf(char *dest, char *src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int size_x, int size_y, int size_z, int buf_strides_x, int buf_strides_y, int buf_strides_z, int type_size, int dim, int OPS_soa) {
int idx_z = rz_s + z_step * (blockDim.z * blockIdx.z + threadIdx.z);
int idx_y = ry_s + y_step * (blockDim.y * blockIdx.y + threadIdx.y);
int idx_x = rx_s + x_step * (blockDim.x * blockIdx.x + threadIdx.x);
if ((x_step == 1 ? idx_x < rx_e : idx_x > rx_e) &&
(y_step == 1 ? idx_y < ry_e : idx_y > ry_e) &&
(z_step == 1 ? idx_z < rz_e : idx_z > rz_e)) {
if (OPS_soa) dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size;
else dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size * dim;
src += ((idx_z - rz_s) * z_step * buf_strides_z +
(idx_y - ry_s) * y_step * buf_strides_y +
(idx_x - rx_s) * x_step * buf_strides_x) *
type_size * dim;
for (int d = 0; d < dim; d++) {
memcpy(dest, src + d*type_size, type_size);
if (OPS_soa) dest += size_x * size_y * size_z * type_size;
else dest += type_size;
}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void copy_kernel_frombuf(char *dest, char *src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int size_x, int size_y, int size_z, int buf_strides_x, int buf_strides_y, int buf_strides_z, int type_size, int dim, int OPS_soa) {
int idx_z = rz_s + z_step * (blockDim.z * blockIdx.z + threadIdx.z);
int idx_y = ry_s + y_step * (blockDim.y * blockIdx.y + threadIdx.y);
int idx_x = rx_s + x_step * (blockDim.x * blockIdx.x + threadIdx.x);
if ((x_step == 1 ? idx_x < rx_e : idx_x > rx_e) &&
(y_step == 1 ? idx_y < ry_e : idx_y > ry_e) &&
(z_step == 1 ? idx_z < rz_e : idx_z > rz_e)) {
if (OPS_soa) dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size;
else dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size * dim;
src += ((idx_z - rz_s) * z_step * buf_strides_z +
(idx_y - ry_s) * y_step * buf_strides_y +
(idx_x - rx_s) * x_step * buf_strides_x) *
type_size * dim;
for (int d = 0; d < dim; d++) {
memcpy(dest, src + d*type_size, type_size);
if (OPS_soa) dest += size_x * size_y * size_z * type_size;
else dest += type_size;
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void copy_kernel_frombuf(char *dest, char *src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int size_x, int size_y, int size_z, int buf_strides_x, int buf_strides_y, int buf_strides_z, int type_size, int dim, int OPS_soa) {
int idx_z = rz_s + z_step * (blockDim.z * blockIdx.z + threadIdx.z);
int idx_y = ry_s + y_step * (blockDim.y * blockIdx.y + threadIdx.y);
int idx_x = rx_s + x_step * (blockDim.x * blockIdx.x + threadIdx.x);
if ((x_step == 1 ? idx_x < rx_e : idx_x > rx_e) &&
(y_step == 1 ? idx_y < ry_e : idx_y > ry_e) &&
(z_step == 1 ? idx_z < rz_e : idx_z > rz_e)) {
if (OPS_soa) dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size;
else dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size * dim;
src += ((idx_z - rz_s) * z_step * buf_strides_z +
(idx_y - ry_s) * y_step * buf_strides_y +
(idx_x - rx_s) * x_step * buf_strides_x) *
type_size * dim;
for (int d = 0; d < dim; d++) {
memcpy(dest, src + d*type_size, type_size);
if (OPS_soa) dest += size_x * size_y * size_z * type_size;
else dest += type_size;
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.globl _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.p2align 8
.type _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii,@function
_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x64
s_load_b32 s6, s[0:1], 0x28
v_and_b32_e32 v1, 0x3ff, v0
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s4, 0xffff
s_cmp_lg_u32 s6, 1
v_mad_u64_u32 v[2:3], null, s13, s5, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v2, s6
v_add_nc_u32_e32 v1, s2, v4
s_cbranch_scc0 .LBB0_2
s_delay_alu instid0(VALU_DEP_1)
v_cmp_lt_i32_e32 vcc_lo, s3, v1
s_and_b32 s2, vcc_lo, exec_lo
s_cbranch_execz .LBB0_3
s_branch .LBB0_4
.LBB0_2:
s_mov_b32 s2, 0
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_and_not1_b32 s2, s2, exec_lo
s_and_b32 s3, vcc_lo, exec_lo
s_or_b32 s2, s2, s3
.LBB0_4:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_39
s_clause 0x1
s_load_b32 s7, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x18
v_bfe_u32 v2, v0, 10, 10
s_lshr_b32 s4, s4, 16
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s4, 0xffff, s4
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[5:6], null, s14, s4, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v5, s7
s_cmp_lg_u32 s7, 1
v_add_nc_u32_e32 v2, s2, v5
s_cbranch_scc0 .LBB0_7
s_delay_alu instid0(VALU_DEP_1)
v_cmp_lt_i32_e32 vcc_lo, s3, v2
s_and_b32 s2, vcc_lo, exec_lo
s_cbranch_execz .LBB0_8
s_branch .LBB0_9
.LBB0_7:
s_mov_b32 s2, 0
.LBB0_8:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s3, v2
s_and_not1_b32 s2, s2, exec_lo
s_and_b32 s3, vcc_lo, exec_lo
s_or_b32 s2, s2, s3
.LBB0_9:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB0_39
s_clause 0x1
s_load_b32 s4, s[0:1], 0x68
s_load_b32 s8, s[0:1], 0x30
v_bfe_u32 v0, v0, 20, 10
s_load_b64 s[2:3], s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_and_b32 s4, 0xffff, s4
s_cmp_lg_u32 s8, 1
v_mad_u64_u32 v[6:7], null, s15, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v6, s8
v_add_nc_u32_e32 v3, s2, v0
s_cbranch_scc0 .LBB0_12
s_delay_alu instid0(VALU_DEP_1)
v_cmp_lt_i32_e32 vcc_lo, s3, v3
s_and_b32 s2, vcc_lo, exec_lo
s_cbranch_execz .LBB0_13
s_branch .LBB0_14
.LBB0_12:
s_mov_b32 s2, 0
.LBB0_13:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s3, v3
s_and_not1_b32 s2, s2, exec_lo
s_and_b32 s3, vcc_lo, exec_lo
s_or_b32 s2, s2, s3
.LBB0_14:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB0_39
s_clause 0x2
s_load_b64 s[4:5], s[0:1], 0x34
s_load_b32 s9, s[0:1], 0x54
s_load_b64 s[2:3], s[0:1], 0x4c
s_mov_b32 s10, 0
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[6:7], null, v3, s5, v[2:3]
s_cmp_eq_u32 s9, 0
s_cselect_b32 s9, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s9
v_mad_u64_u32 v[2:3], null, v6, s4, v[1:2]
s_cbranch_vccnz .LBB0_17
s_delay_alu instid0(VALU_DEP_1)
v_mul_lo_u32 v1, v2, s2
s_branch .LBB0_18
.LBB0_17:
s_mov_b32 s10, -1
.LBB0_18:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s10
s_mul_i32 s10, s3, s2
s_cbranch_vccnz .LBB0_20
s_delay_alu instid0(VALU_DEP_1)
v_mul_lo_u32 v1, s10, v2
.LBB0_20:
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_39
s_clause 0x1
s_load_b128 s[12:15], s[0:1], 0x3c
s_load_b128 s[16:19], s[0:1], 0x0
s_mul_i32 s4, s5, s4
s_mov_b32 s11, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s0, s15, s8
s_mul_i32 s1, s14, s7
s_mul_i32 s6, s13, s6
v_mul_lo_u32 v0, s0, v0
v_mul_lo_u32 v2, s1, v5
v_mul_lo_u32 v3, s6, v4
v_ashrrev_i32_e32 v4, 31, v1
s_mul_i32 s4, s4, s12
s_ashr_i32 s1, s2, 31
s_cmp_gt_u32 s2, 3
s_mul_i32 s4, s4, s2
s_cselect_b32 s8, -1, 0
s_ashr_i32 s5, s4, 31
v_add3_u32 v0, v2, v0, v3
s_and_b32 s6, s9, exec_lo
s_mov_b32 s0, s2
s_cselect_b32 s9, s1, s5
s_mov_b32 s12, 0
v_mul_lo_u32 v2, s10, v0
v_add_co_u32 v0, vcc_lo, s16, v1
v_add_co_ci_u32_e32 v1, vcc_lo, s17, v4, vcc_lo
s_cselect_b32 s10, s2, s4
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v3, 31, v2
v_add_co_u32 v6, vcc_lo, s18, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s19, v3, vcc_lo
s_branch .LBB0_23
.LBB0_22:
v_add_co_u32 v0, vcc_lo, v0, s10
v_add_co_ci_u32_e32 v1, vcc_lo, s9, v1, vcc_lo
s_add_i32 s12, s12, 1
s_add_i32 s11, s11, s2
s_cmp_lg_u32 s12, s3
s_cbranch_scc0 .LBB0_39
.LBB0_23:
s_and_not1_b32 vcc_lo, exec_lo, s8
s_cbranch_vccnz .LBB0_27
s_ashr_i32 s4, s11, 31
v_add_co_u32 v2, vcc_lo, v6, s11
v_add_co_ci_u32_e32 v3, vcc_lo, s4, v7, vcc_lo
s_mov_b64 s[6:7], 0
s_mov_b64 s[4:5], s[0:1]
.p2align 6
.LBB0_25:
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, v2, s6
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v8, vcc_lo, v0, s6
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v1, vcc_lo
global_load_u8 v10, v[4:5], off
s_add_u32 s4, s4, -4
s_addc_u32 s5, s5, -1
s_add_u32 s6, s6, 4
v_cmp_gt_u64_e64 s13, s[4:5], 3
s_addc_u32 s7, s7, 0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s13
s_waitcnt vmcnt(0)
global_store_b8 v[8:9], v10, off
global_load_u8 v10, v[4:5], off offset:1
s_waitcnt vmcnt(0)
global_store_b8 v[8:9], v10, off offset:1
global_load_u8 v10, v[4:5], off offset:2
s_waitcnt vmcnt(0)
global_store_b8 v[8:9], v10, off offset:2
global_load_u8 v4, v[4:5], off offset:3
s_waitcnt vmcnt(0)
global_store_b8 v[8:9], v4, off offset:3
s_cbranch_vccnz .LBB0_25
v_add_co_u32 v2, vcc_lo, v2, s6
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v4, vcc_lo, v0, s6
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
s_branch .LBB0_28
.LBB0_27:
s_mul_i32 s4, s12, s2
v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
s_ashr_i32 s5, s4, 31
v_add_co_u32 v2, vcc_lo, v6, s4
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v7, vcc_lo
s_mov_b64 s[4:5], s[0:1]
.LBB0_28:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_lt_i64_e64 s6, s[4:5], 2
s_and_b32 vcc_lo, exec_lo, s6
s_cbranch_vccnz .LBB0_34
v_cmp_gt_i64_e64 s6, s[4:5], 2
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s6
s_mov_b32 s6, -1
s_cbranch_vccz .LBB0_32
s_cmp_eq_u64 s[4:5], 3
s_mov_b32 s6, 0
s_cbranch_scc0 .LBB0_32
global_load_u8 v8, v[2:3], off offset:2
s_mov_b32 s6, -1
s_waitcnt vmcnt(0)
global_store_b8 v[4:5], v8, off offset:2
.LBB0_32:
s_mov_b32 s7, 0
s_and_b32 vcc_lo, exec_lo, s6
s_mov_b32 s6, 0
s_cbranch_vccz .LBB0_35
global_load_u8 v8, v[2:3], off offset:1
s_mov_b32 s6, -1
s_waitcnt vmcnt(0)
global_store_b8 v[4:5], v8, off offset:1
s_branch .LBB0_35
.LBB0_34:
s_mov_b32 s7, -1
s_mov_b32 s6, 0
.LBB0_35:
s_and_b32 vcc_lo, exec_lo, s7
s_cbranch_vccz .LBB0_37
s_cmp_eq_u64 s[4:5], 1
s_cselect_b32 s6, -1, 0
.LBB0_37:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s6
s_cbranch_vccnz .LBB0_22
global_load_u8 v2, v[2:3], off
s_waitcnt vmcnt(0)
global_store_b8 v[4:5], v2, off
s_branch .LBB0_22
.LBB0_39:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 344
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 20
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, .Lfunc_end0-_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 44
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: by_value
- .offset: 52
.size: 4
.value_kind: by_value
- .offset: 56
.size: 4
.value_kind: by_value
- .offset: 60
.size: 4
.value_kind: by_value
- .offset: 64
.size: 4
.value_kind: by_value
- .offset: 68
.size: 4
.value_kind: by_value
- .offset: 72
.size: 4
.value_kind: by_value
- .offset: 76
.size: 4
.value_kind: by_value
- .offset: 80
.size: 4
.value_kind: by_value
- .offset: 84
.size: 4
.value_kind: by_value
- .offset: 88
.size: 4
.value_kind: hidden_block_count_x
- .offset: 92
.size: 4
.value_kind: hidden_block_count_y
- .offset: 96
.size: 4
.value_kind: hidden_block_count_z
- .offset: 100
.size: 2
.value_kind: hidden_group_size_x
- .offset: 102
.size: 2
.value_kind: hidden_group_size_y
- .offset: 104
.size: 2
.value_kind: hidden_group_size_z
- .offset: 106
.size: 2
.value_kind: hidden_remainder_x
- .offset: 108
.size: 2
.value_kind: hidden_remainder_y
- .offset: 110
.size: 2
.value_kind: hidden_remainder_z
- .offset: 128
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 136
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 144
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 152
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 344
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.private_segment_fixed_size: 0
.sgpr_count: 22
.sgpr_spill_count: 0
.symbol: _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void copy_kernel_frombuf(char *dest, char *src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int size_x, int size_y, int size_z, int buf_strides_x, int buf_strides_y, int buf_strides_z, int type_size, int dim, int OPS_soa) {
int idx_z = rz_s + z_step * (blockDim.z * blockIdx.z + threadIdx.z);
int idx_y = ry_s + y_step * (blockDim.y * blockIdx.y + threadIdx.y);
int idx_x = rx_s + x_step * (blockDim.x * blockIdx.x + threadIdx.x);
if ((x_step == 1 ? idx_x < rx_e : idx_x > rx_e) &&
(y_step == 1 ? idx_y < ry_e : idx_y > ry_e) &&
(z_step == 1 ? idx_z < rz_e : idx_z > rz_e)) {
if (OPS_soa) dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size;
else dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size * dim;
src += ((idx_z - rz_s) * z_step * buf_strides_z +
(idx_y - ry_s) * y_step * buf_strides_y +
(idx_x - rx_s) * x_step * buf_strides_x) *
type_size * dim;
for (int d = 0; d < dim; d++) {
memcpy(dest, src + d*type_size, type_size);
if (OPS_soa) dest += size_x * size_y * size_z * type_size;
else dest += type_size;
}
}
} | .text
.file "copy_kernel_frombuf.hip"
.globl _Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii # -- Begin function _Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.p2align 4, 0x90
.type _Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii,@function
_Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii: # @_Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.cfi_startproc
# %bb.0:
subq $248, %rsp
.cfi_def_cfa_offset 256
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 256(%rsp), %rax
movq %rax, 128(%rsp)
leaq 264(%rsp), %rax
movq %rax, 136(%rsp)
leaq 272(%rsp), %rax
movq %rax, 144(%rsp)
leaq 280(%rsp), %rax
movq %rax, 152(%rsp)
leaq 288(%rsp), %rax
movq %rax, 160(%rsp)
leaq 296(%rsp), %rax
movq %rax, 168(%rsp)
leaq 304(%rsp), %rax
movq %rax, 176(%rsp)
leaq 312(%rsp), %rax
movq %rax, 184(%rsp)
leaq 320(%rsp), %rax
movq %rax, 192(%rsp)
leaq 328(%rsp), %rax
movq %rax, 200(%rsp)
leaq 336(%rsp), %rax
movq %rax, 208(%rsp)
leaq 344(%rsp), %rax
movq %rax, 216(%rsp)
leaq 352(%rsp), %rax
movq %rax, 224(%rsp)
leaq 360(%rsp), %rax
movq %rax, 232(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $264, %rsp # imm = 0x108
.cfi_adjust_cfa_offset -264
retq
.Lfunc_end0:
.size _Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, .Lfunc_end0-_Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii,@object # @_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.section .rodata,"a",@progbits
.globl _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.p2align 3, 0x0
_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii:
.quad _Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.size _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii"
.size .L__unnamed_1, 46
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001b5adf_00000000-6_copy_kernel_frombuf.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z59__device_stub__Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiiiPcS_iiiiiiiiiiiiiiiiii
.type _Z59__device_stub__Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiiiPcS_iiiiiiiiiiiiiiiiii, @function
_Z59__device_stub__Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiiiPcS_iiiiiiiiiiiiiiiiii:
.LFB2051:
.cfi_startproc
endbr64
subq $280, %rsp
.cfi_def_cfa_offset 288
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 264(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
leaq 288(%rsp), %rax
movq %rax, 144(%rsp)
leaq 296(%rsp), %rax
movq %rax, 152(%rsp)
leaq 304(%rsp), %rax
movq %rax, 160(%rsp)
leaq 312(%rsp), %rax
movq %rax, 168(%rsp)
leaq 320(%rsp), %rax
movq %rax, 176(%rsp)
leaq 328(%rsp), %rax
movq %rax, 184(%rsp)
leaq 336(%rsp), %rax
movq %rax, 192(%rsp)
leaq 344(%rsp), %rax
movq %rax, 200(%rsp)
leaq 352(%rsp), %rax
movq %rax, 208(%rsp)
leaq 360(%rsp), %rax
movq %rax, 216(%rsp)
leaq 368(%rsp), %rax
movq %rax, 224(%rsp)
leaq 376(%rsp), %rax
movq %rax, 232(%rsp)
leaq 384(%rsp), %rax
movq %rax, 240(%rsp)
leaq 392(%rsp), %rax
movq %rax, 248(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 264(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $280, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 296
pushq 40(%rsp)
.cfi_def_cfa_offset 304
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 288
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z59__device_stub__Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiiiPcS_iiiiiiiiiiiiiiiiii, .-_Z59__device_stub__Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiiiPcS_iiiiiiiiiiiiiiiiii
.globl _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.type _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, @function
_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 64
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 72
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 80
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 88
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 96
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 104
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 112
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 120
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 128
call _Z59__device_stub__Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiiiPcS_iiiiiiiiiiiiiiiiii
addq $120, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, .-_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "copy_kernel_frombuf.hip"
.globl _Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii # -- Begin function _Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.p2align 4, 0x90
.type _Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii,@function
_Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii: # @_Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.cfi_startproc
# %bb.0:
subq $248, %rsp
.cfi_def_cfa_offset 256
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 256(%rsp), %rax
movq %rax, 128(%rsp)
leaq 264(%rsp), %rax
movq %rax, 136(%rsp)
leaq 272(%rsp), %rax
movq %rax, 144(%rsp)
leaq 280(%rsp), %rax
movq %rax, 152(%rsp)
leaq 288(%rsp), %rax
movq %rax, 160(%rsp)
leaq 296(%rsp), %rax
movq %rax, 168(%rsp)
leaq 304(%rsp), %rax
movq %rax, 176(%rsp)
leaq 312(%rsp), %rax
movq %rax, 184(%rsp)
leaq 320(%rsp), %rax
movq %rax, 192(%rsp)
leaq 328(%rsp), %rax
movq %rax, 200(%rsp)
leaq 336(%rsp), %rax
movq %rax, 208(%rsp)
leaq 344(%rsp), %rax
movq %rax, 216(%rsp)
leaq 352(%rsp), %rax
movq %rax, 224(%rsp)
leaq 360(%rsp), %rax
movq %rax, 232(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $264, %rsp # imm = 0x108
.cfi_adjust_cfa_offset -264
retq
.Lfunc_end0:
.size _Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, .Lfunc_end0-_Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii,@object # @_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.section .rodata,"a",@progbits
.globl _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.p2align 3, 0x0
_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii:
.quad _Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.size _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii"
.size .L__unnamed_1, 46
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19copy_kernel_frombufPcS_iiiiiiiiiiiiiiiiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void genColorsKernel(float* colors, int nelems) {
const float AF_BLUE[4] = {0.0588f, 0.1137f, 0.2745f, 1.0f};
const float AF_ORANGE[4] = {0.8588f, 0.6137f, 0.0745f, 1.0f};
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nelems) {
if (i % 2 == 0) {
colors[3 * i + 0] = AF_ORANGE[0];
colors[3 * i + 1] = AF_ORANGE[1];
colors[3 * i + 2] = AF_ORANGE[2];
} else {
colors[3 * i + 0] = AF_BLUE[0];
colors[3 * i + 1] = AF_BLUE[1];
colors[3 * i + 2] = AF_BLUE[2];
}
}
} | code for sm_80
Function : _Z15genColorsKernelPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ LOP3.LUT R2, R0, 0x1, RZ, 0xc0, !PT ; /* 0x0000000100027812 */
/* 0x000fe200078ec0ff */
/*0070*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0090*/ ISETP.NE.U32.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fe20003f05070 */
/*00a0*/ IMAD R2, R0, 0x3, RZ ; /* 0x0000000300027824 */
/* 0x000fc800078e02ff */
/*00b0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fd000078e0203 */
/*00c0*/ @!P0 MOV R5, 0x3d70d845 ; /* 0x3d70d84500058802 */
/* 0x000fe20000000f00 */
/*00d0*/ @!P0 IMAD.MOV.U32 R7, RZ, RZ, 0x3de8db8c ; /* 0x3de8db8cff078424 */
/* 0x000fe200078e00ff */
/*00e0*/ @!P0 MOV R9, 0x3e8c8b44 ; /* 0x3e8c8b4400098802 */
/* 0x000fc60000000f00 */
/*00f0*/ @!P0 STG.E [R2.64], R5 ; /* 0x0000000502008986 */
/* 0x0001e8000c101904 */
/*0100*/ @!P0 STG.E [R2.64+0x4], R7 ; /* 0x0000040702008986 */
/* 0x0001e8000c101904 */
/*0110*/ @!P0 STG.E [R2.64+0x8], R9 ; /* 0x0000080902008986 */
/* 0x0001e2000c101904 */
/*0120*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0130*/ HFMA2.MMA R7, -RZ, RZ, 1.7783203125, 0.0036334991455078125 ; /* 0x3f1d1b71ff077435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD.MOV.U32 R5, RZ, RZ, 0x3f5bda51 ; /* 0x3f5bda51ff057424 */
/* 0x000fe200078e00ff */
/*0150*/ MOV R9, 0x3d989375 ; /* 0x3d98937500097802 */
/* 0x000fc80000000f00 */
/*0160*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*0170*/ STG.E [R2.64+0x4], R7 ; /* 0x0000040702007986 */
/* 0x000fe8000c101904 */
/*0180*/ STG.E [R2.64+0x8], R9 ; /* 0x0000080902007986 */
/* 0x000fe2000c101904 */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void genColorsKernel(float* colors, int nelems) {
const float AF_BLUE[4] = {0.0588f, 0.1137f, 0.2745f, 1.0f};
const float AF_ORANGE[4] = {0.8588f, 0.6137f, 0.0745f, 1.0f};
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nelems) {
if (i % 2 == 0) {
colors[3 * i + 0] = AF_ORANGE[0];
colors[3 * i + 1] = AF_ORANGE[1];
colors[3 * i + 2] = AF_ORANGE[2];
} else {
colors[3 * i + 0] = AF_BLUE[0];
colors[3 * i + 1] = AF_BLUE[1];
colors[3 * i + 2] = AF_BLUE[2];
}
}
} | .file "tmpxft_00175deb_00000000-6_genColorsKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z36__device_stub__Z15genColorsKernelPfiPfi
.type _Z36__device_stub__Z15genColorsKernelPfiPfi, @function
_Z36__device_stub__Z15genColorsKernelPfiPfi:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z15genColorsKernelPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z36__device_stub__Z15genColorsKernelPfiPfi, .-_Z36__device_stub__Z15genColorsKernelPfiPfi
.globl _Z15genColorsKernelPfi
.type _Z15genColorsKernelPfi, @function
_Z15genColorsKernelPfi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z15genColorsKernelPfiPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15genColorsKernelPfi, .-_Z15genColorsKernelPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z15genColorsKernelPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15genColorsKernelPfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void genColorsKernel(float* colors, int nelems) {
const float AF_BLUE[4] = {0.0588f, 0.1137f, 0.2745f, 1.0f};
const float AF_ORANGE[4] = {0.8588f, 0.6137f, 0.0745f, 1.0f};
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nelems) {
if (i % 2 == 0) {
colors[3 * i + 0] = AF_ORANGE[0];
colors[3 * i + 1] = AF_ORANGE[1];
colors[3 * i + 2] = AF_ORANGE[2];
} else {
colors[3 * i + 0] = AF_BLUE[0];
colors[3 * i + 1] = AF_BLUE[1];
colors[3 * i + 2] = AF_BLUE[2];
}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void genColorsKernel(float* colors, int nelems) {
const float AF_BLUE[4] = {0.0588f, 0.1137f, 0.2745f, 1.0f};
const float AF_ORANGE[4] = {0.8588f, 0.6137f, 0.0745f, 1.0f};
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nelems) {
if (i % 2 == 0) {
colors[3 * i + 0] = AF_ORANGE[0];
colors[3 * i + 1] = AF_ORANGE[1];
colors[3 * i + 2] = AF_ORANGE[2];
} else {
colors[3 * i + 0] = AF_BLUE[0];
colors[3 * i + 1] = AF_BLUE[1];
colors[3 * i + 2] = AF_BLUE[2];
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void genColorsKernel(float* colors, int nelems) {
const float AF_BLUE[4] = {0.0588f, 0.1137f, 0.2745f, 1.0f};
const float AF_ORANGE[4] = {0.8588f, 0.6137f, 0.0745f, 1.0f};
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nelems) {
if (i % 2 == 0) {
colors[3 * i + 0] = AF_ORANGE[0];
colors[3 * i + 1] = AF_ORANGE[1];
colors[3 * i + 2] = AF_ORANGE[2];
} else {
colors[3 * i + 0] = AF_BLUE[0];
colors[3 * i + 1] = AF_BLUE[1];
colors[3 * i + 2] = AF_BLUE[2];
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15genColorsKernelPfi
.globl _Z15genColorsKernelPfi
.p2align 8
.type _Z15genColorsKernelPfi,@function
_Z15genColorsKernelPfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_6
s_load_b64 s[2:3], s[0:1], 0x0
v_lshl_add_u32 v2, v1, 1, v1
v_and_b32_e32 v4, 1, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v0, s0, s2, v0
v_add_co_ci_u32_e64 v1, s0, s3, v1, s0
s_mov_b32 s0, exec_lo
v_cmpx_eq_u32_e32 1, v4
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_3
v_mov_b32_e32 v2, 0x3d70d845
v_mov_b32_e32 v3, 0x3de8db8c
s_mov_b32 s1, 0x3e8c8b44
global_store_b64 v[0:1], v[2:3], off
.LBB0_3:
s_or_saveexec_b32 s0, s0
v_mov_b32_e32 v3, s1
s_xor_b32 exec_lo, exec_lo, s0
v_or_b32_e32 v2, 1, v2
v_mov_b32_e32 v6, 0x3f5bda51
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v3, vcc_lo
v_mov_b32_e32 v3, 0x3d989375
v_mov_b32_e32 v2, 0x3f1d1b71
s_clause 0x1
global_store_b32 v[0:1], v6, off
global_store_b32 v[4:5], v2, off
s_or_b32 exec_lo, exec_lo, s0
global_store_b32 v[0:1], v3, off offset:8
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15genColorsKernelPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15genColorsKernelPfi, .Lfunc_end0-_Z15genColorsKernelPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15genColorsKernelPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15genColorsKernelPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void genColorsKernel(float* colors, int nelems) {
const float AF_BLUE[4] = {0.0588f, 0.1137f, 0.2745f, 1.0f};
const float AF_ORANGE[4] = {0.8588f, 0.6137f, 0.0745f, 1.0f};
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nelems) {
if (i % 2 == 0) {
colors[3 * i + 0] = AF_ORANGE[0];
colors[3 * i + 1] = AF_ORANGE[1];
colors[3 * i + 2] = AF_ORANGE[2];
} else {
colors[3 * i + 0] = AF_BLUE[0];
colors[3 * i + 1] = AF_BLUE[1];
colors[3 * i + 2] = AF_BLUE[2];
}
}
} | .text
.file "genColorsKernel.hip"
.globl _Z30__device_stub__genColorsKernelPfi # -- Begin function _Z30__device_stub__genColorsKernelPfi
.p2align 4, 0x90
.type _Z30__device_stub__genColorsKernelPfi,@function
_Z30__device_stub__genColorsKernelPfi: # @_Z30__device_stub__genColorsKernelPfi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z15genColorsKernelPfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z30__device_stub__genColorsKernelPfi, .Lfunc_end0-_Z30__device_stub__genColorsKernelPfi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15genColorsKernelPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15genColorsKernelPfi,@object # @_Z15genColorsKernelPfi
.section .rodata,"a",@progbits
.globl _Z15genColorsKernelPfi
.p2align 3, 0x0
_Z15genColorsKernelPfi:
.quad _Z30__device_stub__genColorsKernelPfi
.size _Z15genColorsKernelPfi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15genColorsKernelPfi"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__genColorsKernelPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15genColorsKernelPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z15genColorsKernelPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ LOP3.LUT R2, R0, 0x1, RZ, 0xc0, !PT ; /* 0x0000000100027812 */
/* 0x000fe200078ec0ff */
/*0070*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0090*/ ISETP.NE.U32.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fe20003f05070 */
/*00a0*/ IMAD R2, R0, 0x3, RZ ; /* 0x0000000300027824 */
/* 0x000fc800078e02ff */
/*00b0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fd000078e0203 */
/*00c0*/ @!P0 MOV R5, 0x3d70d845 ; /* 0x3d70d84500058802 */
/* 0x000fe20000000f00 */
/*00d0*/ @!P0 IMAD.MOV.U32 R7, RZ, RZ, 0x3de8db8c ; /* 0x3de8db8cff078424 */
/* 0x000fe200078e00ff */
/*00e0*/ @!P0 MOV R9, 0x3e8c8b44 ; /* 0x3e8c8b4400098802 */
/* 0x000fc60000000f00 */
/*00f0*/ @!P0 STG.E [R2.64], R5 ; /* 0x0000000502008986 */
/* 0x0001e8000c101904 */
/*0100*/ @!P0 STG.E [R2.64+0x4], R7 ; /* 0x0000040702008986 */
/* 0x0001e8000c101904 */
/*0110*/ @!P0 STG.E [R2.64+0x8], R9 ; /* 0x0000080902008986 */
/* 0x0001e2000c101904 */
/*0120*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0130*/ HFMA2.MMA R7, -RZ, RZ, 1.7783203125, 0.0036334991455078125 ; /* 0x3f1d1b71ff077435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD.MOV.U32 R5, RZ, RZ, 0x3f5bda51 ; /* 0x3f5bda51ff057424 */
/* 0x000fe200078e00ff */
/*0150*/ MOV R9, 0x3d989375 ; /* 0x3d98937500097802 */
/* 0x000fc80000000f00 */
/*0160*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*0170*/ STG.E [R2.64+0x4], R7 ; /* 0x0000040702007986 */
/* 0x000fe8000c101904 */
/*0180*/ STG.E [R2.64+0x8], R9 ; /* 0x0000080902007986 */
/* 0x000fe2000c101904 */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15genColorsKernelPfi
.globl _Z15genColorsKernelPfi
.p2align 8
.type _Z15genColorsKernelPfi,@function
_Z15genColorsKernelPfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_6
s_load_b64 s[2:3], s[0:1], 0x0
v_lshl_add_u32 v2, v1, 1, v1
v_and_b32_e32 v4, 1, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v0, s0, s2, v0
v_add_co_ci_u32_e64 v1, s0, s3, v1, s0
s_mov_b32 s0, exec_lo
v_cmpx_eq_u32_e32 1, v4
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_3
v_mov_b32_e32 v2, 0x3d70d845
v_mov_b32_e32 v3, 0x3de8db8c
s_mov_b32 s1, 0x3e8c8b44
global_store_b64 v[0:1], v[2:3], off
.LBB0_3:
s_or_saveexec_b32 s0, s0
v_mov_b32_e32 v3, s1
s_xor_b32 exec_lo, exec_lo, s0
v_or_b32_e32 v2, 1, v2
v_mov_b32_e32 v6, 0x3f5bda51
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v3, vcc_lo
v_mov_b32_e32 v3, 0x3d989375
v_mov_b32_e32 v2, 0x3f1d1b71
s_clause 0x1
global_store_b32 v[0:1], v6, off
global_store_b32 v[4:5], v2, off
s_or_b32 exec_lo, exec_lo, s0
global_store_b32 v[0:1], v3, off offset:8
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15genColorsKernelPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15genColorsKernelPfi, .Lfunc_end0-_Z15genColorsKernelPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15genColorsKernelPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15genColorsKernelPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00175deb_00000000-6_genColorsKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z36__device_stub__Z15genColorsKernelPfiPfi
.type _Z36__device_stub__Z15genColorsKernelPfiPfi, @function
_Z36__device_stub__Z15genColorsKernelPfiPfi:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z15genColorsKernelPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z36__device_stub__Z15genColorsKernelPfiPfi, .-_Z36__device_stub__Z15genColorsKernelPfiPfi
.globl _Z15genColorsKernelPfi
.type _Z15genColorsKernelPfi, @function
_Z15genColorsKernelPfi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z15genColorsKernelPfiPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15genColorsKernelPfi, .-_Z15genColorsKernelPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z15genColorsKernelPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15genColorsKernelPfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "genColorsKernel.hip"
.globl _Z30__device_stub__genColorsKernelPfi # -- Begin function _Z30__device_stub__genColorsKernelPfi
.p2align 4, 0x90
.type _Z30__device_stub__genColorsKernelPfi,@function
_Z30__device_stub__genColorsKernelPfi: # @_Z30__device_stub__genColorsKernelPfi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z15genColorsKernelPfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z30__device_stub__genColorsKernelPfi, .Lfunc_end0-_Z30__device_stub__genColorsKernelPfi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15genColorsKernelPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15genColorsKernelPfi,@object # @_Z15genColorsKernelPfi
.section .rodata,"a",@progbits
.globl _Z15genColorsKernelPfi
.p2align 3, 0x0
_Z15genColorsKernelPfi:
.quad _Z30__device_stub__genColorsKernelPfi
.size _Z15genColorsKernelPfi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15genColorsKernelPfi"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__genColorsKernelPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15genColorsKernelPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void __longToFloat(long long *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
} | code for sm_80
Function : _Z13__longToFloatPxPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0030*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0040*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0203 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ MOV R7, c[0x0][0x0] ; /* 0x0000000000077a02 */
/* 0x000fe20000000f00 */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*00a0*/ IMAD R7, R7, c[0x0][0xc], RZ ; /* 0x0000030007077a24 */
/* 0x000fe400078e02ff */
/*00b0*/ HFMA2.MMA R3, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff037435 */
/* 0x000fd400000001ff */
/*00c0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fcc00078e0203 */
/*00d0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*00e0*/ MOV R5, 0x4 ; /* 0x0000000400057802 */
/* 0x001fca0000000f00 */
/*00f0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0205 */
/*0100*/ IMAD R0, R7, c[0x0][0x10], R0 ; /* 0x0000040007007a24 */
/* 0x000fca00078e0200 */
/*0110*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe20003f06270 */
/*0120*/ I2F.S64 R9, R2 ; /* 0x0000000200097312 */
/* 0x004e240000301400 */
/*0130*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0011f4000c101904 */
/*0140*/ @!P0 BRA 0xb0 ; /* 0xffffff6000008947 */
/* 0x000fea000383ffff */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void __longToFloat(long long *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
} | .file "tmpxft_00188d2f_00000000-6___longToFloat.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z36__device_stub__Z13__longToFloatPxPfiPxPfi
.type _Z36__device_stub__Z13__longToFloatPxPfiPxPfi, @function
_Z36__device_stub__Z13__longToFloatPxPfiPxPfi:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13__longToFloatPxPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z36__device_stub__Z13__longToFloatPxPfiPxPfi, .-_Z36__device_stub__Z13__longToFloatPxPfiPxPfi
.globl _Z13__longToFloatPxPfi
.type _Z13__longToFloatPxPfi, @function
_Z13__longToFloatPxPfi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z13__longToFloatPxPfiPxPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13__longToFloatPxPfi, .-_Z13__longToFloatPxPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13__longToFloatPxPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13__longToFloatPxPfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void __longToFloat(long long *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void __longToFloat(long long *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void __longToFloat(long long *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13__longToFloatPxPfi
.globl _Z13__longToFloatPxPfi
.p2align 8
.type _Z13__longToFloatPxPfi,@function
_Z13__longToFloatPxPfi:
s_clause 0x2
s_load_b32 s9, s[0:1], 0x18
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s8, s[0:1], 0x10
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s5, s9, s15
s_and_b32 s10, s4, 0xffff
s_add_i32 s5, s5, s14
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s5, s10, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[2:3], 0x4
s_load_b128 s[4:7], s[0:1], 0x0
s_mul_i32 s1, s9, s10
s_waitcnt lgkmcnt(0)
s_mul_i32 s1, s1, s2
s_mov_b32 s2, 0
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 3, v[1:2]
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
global_load_b64 v[3:4], v[3:4], off
s_waitcnt vmcnt(0)
v_xor_b32_e32 v0, v3, v4
v_cls_i32_e32 v5, v4
v_ashrrev_i32_e32 v0, 31, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v5, -1, v5
v_add_nc_u32_e32 v0, 32, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_min_u32_e32 v0, v5, v0
v_lshlrev_b64 v[3:4], v0, v[3:4]
v_sub_nc_u32_e32 v0, 32, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_min_u32_e32 v3, 1, v3
v_or_b32_e32 v4, v4, v3
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_nc_u32_e32 v1, s1, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cvt_f32_i32_e32 v4, v4
v_cmp_le_i32_e32 vcc_lo, s8, v1
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, s0, s6, v2
v_ldexp_f32 v0, v4, v0
v_add_co_ci_u32_e64 v3, s0, s7, v3, s0
s_or_b32 s2, vcc_lo, s2
global_store_b32 v[2:3], v0, off
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13__longToFloatPxPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13__longToFloatPxPfi, .Lfunc_end0-_Z13__longToFloatPxPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13__longToFloatPxPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13__longToFloatPxPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void __longToFloat(long long *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
} | .text
.file "__longToFloat.hip"
.globl _Z28__device_stub____longToFloatPxPfi # -- Begin function _Z28__device_stub____longToFloatPxPfi
.p2align 4, 0x90
.type _Z28__device_stub____longToFloatPxPfi,@function
_Z28__device_stub____longToFloatPxPfi: # @_Z28__device_stub____longToFloatPxPfi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13__longToFloatPxPfi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z28__device_stub____longToFloatPxPfi, .Lfunc_end0-_Z28__device_stub____longToFloatPxPfi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13__longToFloatPxPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13__longToFloatPxPfi,@object # @_Z13__longToFloatPxPfi
.section .rodata,"a",@progbits
.globl _Z13__longToFloatPxPfi
.p2align 3, 0x0
_Z13__longToFloatPxPfi:
.quad _Z28__device_stub____longToFloatPxPfi
.size _Z13__longToFloatPxPfi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13__longToFloatPxPfi"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub____longToFloatPxPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13__longToFloatPxPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13__longToFloatPxPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0030*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0040*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0203 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ MOV R7, c[0x0][0x0] ; /* 0x0000000000077a02 */
/* 0x000fe20000000f00 */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*00a0*/ IMAD R7, R7, c[0x0][0xc], RZ ; /* 0x0000030007077a24 */
/* 0x000fe400078e02ff */
/*00b0*/ HFMA2.MMA R3, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff037435 */
/* 0x000fd400000001ff */
/*00c0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fcc00078e0203 */
/*00d0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*00e0*/ MOV R5, 0x4 ; /* 0x0000000400057802 */
/* 0x001fca0000000f00 */
/*00f0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0205 */
/*0100*/ IMAD R0, R7, c[0x0][0x10], R0 ; /* 0x0000040007007a24 */
/* 0x000fca00078e0200 */
/*0110*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe20003f06270 */
/*0120*/ I2F.S64 R9, R2 ; /* 0x0000000200097312 */
/* 0x004e240000301400 */
/*0130*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0011f4000c101904 */
/*0140*/ @!P0 BRA 0xb0 ; /* 0xffffff6000008947 */
/* 0x000fea000383ffff */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13__longToFloatPxPfi
.globl _Z13__longToFloatPxPfi
.p2align 8
.type _Z13__longToFloatPxPfi,@function
_Z13__longToFloatPxPfi:
s_clause 0x2
s_load_b32 s9, s[0:1], 0x18
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s8, s[0:1], 0x10
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s5, s9, s15
s_and_b32 s10, s4, 0xffff
s_add_i32 s5, s5, s14
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s5, s10, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[2:3], 0x4
s_load_b128 s[4:7], s[0:1], 0x0
s_mul_i32 s1, s9, s10
s_waitcnt lgkmcnt(0)
s_mul_i32 s1, s1, s2
s_mov_b32 s2, 0
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 3, v[1:2]
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
global_load_b64 v[3:4], v[3:4], off
s_waitcnt vmcnt(0)
v_xor_b32_e32 v0, v3, v4
v_cls_i32_e32 v5, v4
v_ashrrev_i32_e32 v0, 31, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v5, -1, v5
v_add_nc_u32_e32 v0, 32, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_min_u32_e32 v0, v5, v0
v_lshlrev_b64 v[3:4], v0, v[3:4]
v_sub_nc_u32_e32 v0, 32, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_min_u32_e32 v3, 1, v3
v_or_b32_e32 v4, v4, v3
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_nc_u32_e32 v1, s1, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cvt_f32_i32_e32 v4, v4
v_cmp_le_i32_e32 vcc_lo, s8, v1
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, s0, s6, v2
v_ldexp_f32 v0, v4, v0
v_add_co_ci_u32_e64 v3, s0, s7, v3, s0
s_or_b32 s2, vcc_lo, s2
global_store_b32 v[2:3], v0, off
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13__longToFloatPxPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13__longToFloatPxPfi, .Lfunc_end0-_Z13__longToFloatPxPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13__longToFloatPxPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13__longToFloatPxPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00188d2f_00000000-6___longToFloat.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z36__device_stub__Z13__longToFloatPxPfiPxPfi
.type _Z36__device_stub__Z13__longToFloatPxPfiPxPfi, @function
_Z36__device_stub__Z13__longToFloatPxPfiPxPfi:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13__longToFloatPxPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z36__device_stub__Z13__longToFloatPxPfiPxPfi, .-_Z36__device_stub__Z13__longToFloatPxPfiPxPfi
.globl _Z13__longToFloatPxPfi
.type _Z13__longToFloatPxPfi, @function
_Z13__longToFloatPxPfi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z13__longToFloatPxPfiPxPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13__longToFloatPxPfi, .-_Z13__longToFloatPxPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13__longToFloatPxPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13__longToFloatPxPfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "__longToFloat.hip"
.globl _Z28__device_stub____longToFloatPxPfi # -- Begin function _Z28__device_stub____longToFloatPxPfi
.p2align 4, 0x90
.type _Z28__device_stub____longToFloatPxPfi,@function
_Z28__device_stub____longToFloatPxPfi: # @_Z28__device_stub____longToFloatPxPfi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13__longToFloatPxPfi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z28__device_stub____longToFloatPxPfi, .Lfunc_end0-_Z28__device_stub____longToFloatPxPfi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13__longToFloatPxPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13__longToFloatPxPfi,@object # @_Z13__longToFloatPxPfi
.section .rodata,"a",@progbits
.globl _Z13__longToFloatPxPfi
.p2align 3, 0x0
_Z13__longToFloatPxPfi:
.quad _Z28__device_stub____longToFloatPxPfi
.size _Z13__longToFloatPxPfi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13__longToFloatPxPfi"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub____longToFloatPxPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13__longToFloatPxPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //
// Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s):Chengyao Tang,Victoria Kyereme
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
#include <stdio.h>
#include <math.h>
#include <iomanip>
#include <iostream>
#include <string>
#include <sys/time.h>
// problem size (vector length) N
static const int N = 12345678;
// Number of terms to use when approximating sine
static const int TERMS = 6;
// kernel function (CPU - Do not modify)
void sine_serial(float *input, float *output)
{
int i;
for (i=0; i<N; i++) {
float value = input[i];
float numer = input[i] * input[i] * input[i];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[i] * input[i];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[i] = value;
}
}
// kernel function (CUDA device)
// TODO: Implement your graphics kernel here. See assignment instructions for method information
__global__ void sine_parallel(float*d_input,float*d_output ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
float d_value = d_input[idx];
float d_numer = d_input[idx]*d_input[idx]*d_input[idx];
int d_denom = 6;
int d_sign = -1;
for (int d_j=1;d_j<=TERMS; d_j++){
d_value += d_sign *d_numer/d_denom;
d_numer *= d_input[idx]* d_input[idx];
d_denom *= (2*d_j+2)* (2*d_j+3);
d_sign *= -1;
}
d_output[idx] = d_value;
}
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main (int argc, char **argv)
{
//BEGIN: CPU implementation (do not modify)
float *h_cpu_result = (float*)malloc(N*sizeof(float));
float *h_input = (float*)malloc(N*sizeof(float));
//Initialize data on CPU
int i;
for (i=0; i<N; i++)
{
h_input[i] = 0.1f * i;
}
//Execute and time the CPU version
long long CPU_start_time = start_timer();
sine_serial(h_input, h_cpu_result);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time");
//END: CPU implementation (do not modify)
//TODO: Prepare and run your kernel, make sure to copy your results back into h_gpu_result and display your timing results
float *h_gpu_result = (float*)malloc(N*sizeof(float));
//declare GPU memory pointers
float *d_input;
float *d_output;
long long Memory_Allocation_start_time = start_timer();
long long GPU_start_time = start_timer();
//allocate GPU memory
cudaMalloc((void **) &d_input,N*sizeof(float));
cudaMalloc((void **) &d_output,N*sizeof(float));
long long Memory_Allocation_end_time = stop_timer(Memory_Allocation_start_time,"\nGPU Memory allocation time:");
//transfer the array to the GPU
long long Memory_copy_to_device_start_time = start_timer();
cudaMemcpy(d_input, h_input, N*sizeof(float),cudaMemcpyHostToDevice);
long long Memory_copy_to_device_end_time = stop_timer(Memory_copy_to_device_start_time,"\nGPU Memory Copy to Device time:");
//launch the kernel
int threards = N/1024;
long long Kernal_run_start_time = start_timer();
sine_parallel<<<threards,1024>>>(d_input,d_output);
long long Kernal_run_end_time = stop_timer(Kernal_run_start_time,"\nGPU Kernal run Time:");
//copy back the result array to the CPU
long long Memory_copy_to_Host_start_time = start_timer();
cudaMemcpy(h_gpu_result,d_output,N*sizeof(float),cudaMemcpyDeviceToHost);
long long Memory_copy_to_Host_end_time = stop_timer(Memory_copy_to_Host_start_time,"\nGPU Memory Copy to Host time:");
long long GPU_end_time = stop_timer(GPU_start_time,"\nTotal GPU Run time:");
// Checking to make sure the CPU and GPU results match - Do not modify
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("Result comparison failed.\n");
else
printf("Result comparison passed.\n");
// Cleaning up memory
free(h_input);
free(h_cpu_result);
free(h_gpu_result);
//Cleaning up memory for gpu pointers
cudaFree(d_input);
cudaFree(d_output);
return 0;
} | code for sm_80
Function : _Z13sine_parallelPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc800078e0203 */
/*0060*/ IMAD.WIDE R6, R0, R7, c[0x0][0x160] ; /* 0x0000580000067625 */
/* 0x000fca00078e0207 */
/*0070*/ LDG.E R3, [R6.64] ; /* 0x0000000406037981 */
/* 0x000ea2000c1e1900 */
/*0080*/ HFMA2.MMA R9, -RZ, RZ, 1.541015625, -0.052093505859375 ; /* 0x3e2aaaabff097435 */
/* 0x000fe200000001ff */
/*0090*/ IMAD.MOV.U32 R4, RZ, RZ, 0x40c00000 ; /* 0x40c00000ff047424 */
/* 0x000fe200078e00ff */
/*00a0*/ BSSY B0, 0x190 ; /* 0x000000e000007945 */
/* 0x000ff00003800000 */
/*00b0*/ FFMA R4, -R9, R4, 1 ; /* 0x3f80000009047423 */
/* 0x000fc80000000104 */
/*00c0*/ FFMA R4, R4, -R9, -0.16666667163372039795 ; /* 0xbe2aaaab04047423 */
/* 0x000fe40000000809 */
/*00d0*/ FMUL R2, R3, R3 ; /* 0x0000000303027220 */
/* 0x004fc80000400000 */
/*00e0*/ FMUL R5, R3, R2 ; /* 0x0000000203057220 */
/* 0x000fc80000400000 */
/*00f0*/ FCHK P0, R5, -6 ; /* 0xc0c0000005007902 */
/* 0x000e220000000000 */
/*0100*/ FFMA R8, R5, R4, RZ ; /* 0x0000000405087223 */
/* 0x000fc800000000ff */
/*0110*/ FFMA R9, R8, 6, R5 ; /* 0x40c0000008097823 */
/* 0x000fc80000000005 */
/*0120*/ FFMA R4, R4, R9, R8 ; /* 0x0000000904047223 */
/* 0x000fe20000000008 */
/*0130*/ @!P0 BRA 0x180 ; /* 0x0000004000008947 */
/* 0x001fea0003800000 */
/*0140*/ MOV R9, 0xc0c00000 ; /* 0xc0c0000000097802 */
/* 0x000fe40000000f00 */
/*0150*/ MOV R4, 0x170 ; /* 0x0000017000047802 */
/* 0x000fe40000000f00 */
/*0160*/ CALL.REL.NOINC 0x740 ; /* 0x000005d000007944 */
/* 0x000fea0003c00000 */
/*0170*/ IMAD.MOV.U32 R4, RZ, RZ, R9 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0009 */
/*0180*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0190*/ FMUL R5, R2, R5 ; /* 0x0000000502057220 */
/* 0x000fe20000400000 */
/*01a0*/ MOV R7, 0x3c088889 ; /* 0x3c08888900077802 */
/* 0x000fe20000000f00 */
/*01b0*/ IMAD.MOV.U32 R6, RZ, RZ, 0x42f00000 ; /* 0x42f00000ff067424 */
/* 0x000fe200078e00ff */
/*01c0*/ BSSY B0, 0x2a0 ; /* 0x000000d000007945 */
/* 0x000fe20003800000 */
/*01d0*/ FCHK P0, R5, 120 ; /* 0x42f0000005007902 */
/* 0x000e220000000000 */
/*01e0*/ FADD R3, R3, R4 ; /* 0x0000000403037221 */
/* 0x000fe40000000000 */
/*01f0*/ FFMA R6, R7, -R6, 1 ; /* 0x3f80000007067423 */
/* 0x000fc80000000806 */
/*0200*/ FFMA R6, R6, R7, 0.0083333337679505348206 ; /* 0x3c08888906067423 */
/* 0x000fc80000000007 */
/*0210*/ FFMA R7, R5, R6, RZ ; /* 0x0000000605077223 */
/* 0x000fc800000000ff */
/*0220*/ FFMA R8, R7, -120, R5 ; /* 0xc2f0000007087823 */
/* 0x000fc80000000005 */
/*0230*/ FFMA R6, R6, R8, R7 ; /* 0x0000000806067223 */
/* 0x000fe20000000007 */
/*0240*/ @!P0 BRA 0x290 ; /* 0x0000004000008947 */
/* 0x001fea0003800000 */
/*0250*/ HFMA2.MMA R9, -RZ, RZ, 3.46875, 0 ; /* 0x42f00000ff097435 */
/* 0x000fe200000001ff */
/*0260*/ MOV R4, 0x280 ; /* 0x0000028000047802 */
/* 0x000fca0000000f00 */
/*0270*/ CALL.REL.NOINC 0x740 ; /* 0x000004c000007944 */
/* 0x000fea0003c00000 */
/*0280*/ IMAD.MOV.U32 R6, RZ, RZ, R9 ; /* 0x000000ffff067224 */
/* 0x001fe400078e0009 */
/*0290*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*02a0*/ FMUL R5, R2, R5 ; /* 0x0000000502057220 */
/* 0x000fe20000400000 */
/*02b0*/ MOV R7, 0x39500d01 ; /* 0x39500d0100077802 */
/* 0x000fe20000000f00 */
/*02c0*/ IMAD.MOV.U32 R4, RZ, RZ, 0x459d8000 ; /* 0x459d8000ff047424 */
/* 0x000fe200078e00ff */
/*02d0*/ BSSY B0, 0x3b0 ; /* 0x000000d000007945 */
/* 0x000fe20003800000 */
/*02e0*/ FCHK P0, R5, -5040 ; /* 0xc59d800005007902 */
/* 0x000e220000000000 */
/*02f0*/ FADD R3, R3, R6 ; /* 0x0000000603037221 */
/* 0x000fe40000000000 */
/*0300*/ FFMA R4, -R7, R4, 1 ; /* 0x3f80000007047423 */
/* 0x000fc80000000104 */
/*0310*/ FFMA R4, R4, -R7, -0.00019841270113829523325 ; /* 0xb9500d0104047423 */
/* 0x000fc80000000807 */
/*0320*/ FFMA R7, R5, R4, RZ ; /* 0x0000000405077223 */
/* 0x000fc800000000ff */
/*0330*/ FFMA R8, R7, 5040, R5 ; /* 0x459d800007087823 */
/* 0x000fc80000000005 */
/*0340*/ FFMA R4, R4, R8, R7 ; /* 0x0000000804047223 */
/* 0x000fe20000000007 */
/*0350*/ @!P0 BRA 0x3a0 ; /* 0x0000004000008947 */
/* 0x001fea0003800000 */
/*0360*/ HFMA2.MMA R9, -RZ, RZ, -5.61328125, -0.0 ; /* 0xc59d8000ff097435 */
/* 0x000fe200000001ff */
/*0370*/ MOV R4, 0x390 ; /* 0x0000039000047802 */
/* 0x000fca0000000f00 */
/*0380*/ CALL.REL.NOINC 0x740 ; /* 0x000003b000007944 */
/* 0x000fea0003c00000 */
/*0390*/ IMAD.MOV.U32 R4, RZ, RZ, R9 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0009 */
/*03a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*03b0*/ FMUL R5, R2, R5 ; /* 0x0000000502057220 */
/* 0x000fe20000400000 */
/*03c0*/ MOV R7, 0x3638ef1d ; /* 0x3638ef1d00077802 */
/* 0x000fe20000000f00 */
/*03d0*/ IMAD.MOV.U32 R6, RZ, RZ, 0x48b13000 ; /* 0x48b13000ff067424 */
/* 0x000fe200078e00ff */
/*03e0*/ BSSY B0, 0x4c0 ; /* 0x000000d000007945 */
/* 0x000fe20003800000 */
/*03f0*/ FCHK P0, R5, 362880 ; /* 0x48b1300005007902 */
/* 0x000e220000000000 */
/*0400*/ FADD R3, R3, R4 ; /* 0x0000000403037221 */
/* 0x000fe40000000000 */
/*0410*/ FFMA R6, R7, -R6, 1 ; /* 0x3f80000007067423 */
/* 0x000fc80000000806 */
/*0420*/ FFMA R6, R6, R7, 2.7557318844628753141e-06 ; /* 0x3638ef1d06067423 */
/* 0x000fc80000000007 */
/*0430*/ FFMA R7, R5, R6, RZ ; /* 0x0000000605077223 */
/* 0x000fc800000000ff */
/*0440*/ FFMA R8, R7, -362880, R5 ; /* 0xc8b1300007087823 */
/* 0x000fc80000000005 */
/*0450*/ FFMA R6, R6, R8, R7 ; /* 0x0000000806067223 */
/* 0x000fe20000000007 */
/*0460*/ @!P0 BRA 0x4b0 ; /* 0x0000004000008947 */
/* 0x001fea0003800000 */
/*0470*/ HFMA2.MMA R9, -RZ, RZ, 9.3828125, 0.125 ; /* 0x48b13000ff097435 */
/* 0x000fe200000001ff */
/*0480*/ MOV R4, 0x4a0 ; /* 0x000004a000047802 */
/* 0x000fca0000000f00 */
/*0490*/ CALL.REL.NOINC 0x740 ; /* 0x000002a000007944 */
/* 0x000fea0003c00000 */
/*04a0*/ IMAD.MOV.U32 R6, RZ, RZ, R9 ; /* 0x000000ffff067224 */
/* 0x001fe400078e0009 */
/*04b0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*04c0*/ FMUL R5, R2, R5 ; /* 0x0000000502057220 */
/* 0x000fe20000400000 */
/*04d0*/ MOV R7, 0x32d7322b ; /* 0x32d7322b00077802 */
/* 0x000fe20000000f00 */
/*04e0*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4c184540 ; /* 0x4c184540ff047424 */
/* 0x000fe200078e00ff */
/*04f0*/ BSSY B0, 0x5d0 ; /* 0x000000d000007945 */
/* 0x000fe20003800000 */
/*0500*/ FCHK P0, R5, -39916800 ; /* 0xcc18454005007902 */
/* 0x000e220000000000 */
/*0510*/ FADD R3, R3, R6 ; /* 0x0000000603037221 */
/* 0x000fe40000000000 */
/*0520*/ FFMA R4, -R7, R4, 1 ; /* 0x3f80000007047423 */
/* 0x000fc80000000104 */
/*0530*/ FFMA R4, R4, -R7, -2.5052107943679402524e-08 ; /* 0xb2d7322b04047423 */
/* 0x000fc80000000807 */
/*0540*/ FFMA R7, R5, R4, RZ ; /* 0x0000000405077223 */
/* 0x000fc800000000ff */
/*0550*/ FFMA R8, R7, 39916800, R5 ; /* 0x4c18454007087823 */
/* 0x000fc80000000005 */
/*0560*/ FFMA R4, R4, R8, R7 ; /* 0x0000000804047223 */
/* 0x000fe20000000007 */
/*0570*/ @!P0 BRA 0x5c0 ; /* 0x0000004000008947 */
/* 0x001fea0003800000 */
/*0580*/ HFMA2.MMA R9, -RZ, RZ, -16.375, 5.25 ; /* 0xcc184540ff097435 */
/* 0x000fe200000001ff */
/*0590*/ MOV R4, 0x5b0 ; /* 0x000005b000047802 */
/* 0x000fca0000000f00 */
/*05a0*/ CALL.REL.NOINC 0x740 ; /* 0x0000019000007944 */
/* 0x000fea0003c00000 */
/*05b0*/ IMAD.MOV.U32 R4, RZ, RZ, R9 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0009 */
/*05c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*05d0*/ FMUL R2, R2, R5 ; /* 0x0000000502027220 */
/* 0x000fe20000400000 */
/*05e0*/ MOV R7, 0x300e45bd ; /* 0x300e45bd00077802 */
/* 0x000fe20000000f00 */
/*05f0*/ IMAD.MOV.U32 R6, RZ, RZ, 0x4ee65198 ; /* 0x4ee65198ff067424 */
/* 0x000fe200078e00ff */
/*0600*/ BSSY B0, 0x6f0 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*0610*/ FCHK P0, R2, 1.93205350400000000000e+09 ; /* 0x4ee6519802007902 */
/* 0x000e220000000000 */
/*0620*/ FADD R3, R3, R4 ; /* 0x0000000403037221 */
/* 0x000fe40000000000 */
/*0630*/ FFMA R6, R7, -R6, 1 ; /* 0x3f80000007067423 */
/* 0x000fc80000000806 */
/*0640*/ FFMA R5, R6, R7, 5.1758403118995488512e-10 ; /* 0x300e45bd06057423 */
/* 0x000fc80000000007 */
/*0650*/ FFMA R6, R2, R5, RZ ; /* 0x0000000502067223 */
/* 0x000fc800000000ff */
/*0660*/ FFMA R7, R6, -1.93205350400000000000e+09, R2 ; /* 0xcee6519806077823 */
/* 0x000fc80000000002 */
/*0670*/ FFMA R6, R5, R7, R6 ; /* 0x0000000705067223 */
/* 0x000fe20000000006 */
/*0680*/ @!P0 BRA 0x6e0 ; /* 0x0000005000008947 */
/* 0x001fea0003800000 */
/*0690*/ MOV R5, R2 ; /* 0x0000000200057202 */
/* 0x000fe20000000f00 */
/*06a0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4ee65198 ; /* 0x4ee65198ff097424 */
/* 0x000fe200078e00ff */
/*06b0*/ MOV R4, 0x6d0 ; /* 0x000006d000047802 */
/* 0x000fe40000000f00 */
/*06c0*/ CALL.REL.NOINC 0x740 ; /* 0x0000007000007944 */
/* 0x000fea0003c00000 */
/*06d0*/ MOV R6, R9 ; /* 0x0000000900067202 */
/* 0x001fe40000000f00 */
/*06e0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*06f0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe400078e00ff */
/*0700*/ FADD R5, R3, R6 ; /* 0x0000000603057221 */
/* 0x000fe40000000000 */
/*0710*/ IMAD.WIDE R2, R0, R7, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fca00078e0207 */
/*0720*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0730*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0740*/ SHF.R.U32.HI R6, RZ, 0x17, R9 ; /* 0x00000017ff067819 */
/* 0x000fe20000011609 */
/*0750*/ BSSY B1, 0xd90 ; /* 0x0000063000017945 */
/* 0x000fe20003800000 */
/*0760*/ SHF.R.U32.HI R7, RZ, 0x17, R5 ; /* 0x00000017ff077819 */
/* 0x000fe40000011605 */
/*0770*/ LOP3.LUT R6, R6, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff06067812 */
/* 0x000fe400078ec0ff */
/*0780*/ LOP3.LUT R13, R7, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff070d7812 */
/* 0x000fe400078ec0ff */
/*0790*/ IADD3 R11, R6, -0x1, RZ ; /* 0xffffffff060b7810 */
/* 0x000fe40007ffe0ff */
/*07a0*/ IADD3 R10, R13, -0x1, RZ ; /* 0xffffffff0d0a7810 */
/* 0x000fc40007ffe0ff */
/*07b0*/ ISETP.GT.U32.AND P0, PT, R11, 0xfd, PT ; /* 0x000000fd0b00780c */
/* 0x000fe40003f04070 */
/*07c0*/ MOV R8, R5 ; /* 0x0000000500087202 */
/* 0x000fe40000000f00 */
/*07d0*/ ISETP.GT.U32.OR P0, PT, R10, 0xfd, P0 ; /* 0x000000fd0a00780c */
/* 0x000fda0000704470 */
/*07e0*/ @!P0 IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff078224 */
/* 0x000fe200078e00ff */
/*07f0*/ @!P0 BRA 0x970 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*0800*/ FSETP.GTU.FTZ.AND P0, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */
/* 0x000fe40003f1c200 */
/*0810*/ FSETP.GTU.FTZ.AND P1, PT, |R9|, +INF , PT ; /* 0x7f8000000900780b */
/* 0x000fc80003f3c200 */
/*0820*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000703570 */
/*0830*/ @P0 BRA 0xd70 ; /* 0x0000053000000947 */
/* 0x000fea0003800000 */
/*0840*/ LOP3.LUT P0, RZ, R9, 0x7fffffff, R8, 0xc8, !PT ; /* 0x7fffffff09ff7812 */
/* 0x000fda000780c808 */
/*0850*/ @!P0 BRA 0xd50 ; /* 0x000004f000008947 */
/* 0x000fea0003800000 */
/*0860*/ FSETP.NEU.FTZ.AND P2, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */
/* 0x000fe40003f5d200 */
/*0870*/ FSETP.NEU.FTZ.AND P1, PT, |R9|, +INF , PT ; /* 0x7f8000000900780b */
/* 0x000fe40003f3d200 */
/*0880*/ FSETP.NEU.FTZ.AND P0, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */
/* 0x000fd60003f1d200 */
/*0890*/ @!P1 BRA !P2, 0xd50 ; /* 0x000004b000009947 */
/* 0x000fea0005000000 */
/*08a0*/ LOP3.LUT P2, RZ, R8, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff08ff7812 */
/* 0x000fc8000784c0ff */
/*08b0*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000f24572 */
/*08c0*/ @P1 BRA 0xd30 ; /* 0x0000046000001947 */
/* 0x000fea0003800000 */
/*08d0*/ LOP3.LUT P1, RZ, R9, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff09ff7812 */
/* 0x000fc8000782c0ff */
/*08e0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000702572 */
/*08f0*/ @P0 BRA 0xd00 ; /* 0x0000040000000947 */
/* 0x000fea0003800000 */
/*0900*/ ISETP.GE.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe40003f06270 */
/*0910*/ ISETP.GE.AND P1, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x000fd60003f26270 */
/*0920*/ @P0 MOV R7, RZ ; /* 0x000000ff00070202 */
/* 0x000fe20000000f00 */
/*0930*/ @!P0 IMAD.MOV.U32 R7, RZ, RZ, -0x40 ; /* 0xffffffc0ff078424 */
/* 0x000fe400078e00ff */
/*0940*/ @!P0 FFMA R8, R5, 1.84467440737095516160e+19, RZ ; /* 0x5f80000005088823 */
/* 0x000fe400000000ff */
/*0950*/ @!P1 FFMA R9, R9, 1.84467440737095516160e+19, RZ ; /* 0x5f80000009099823 */
/* 0x000fe200000000ff */
/*0960*/ @!P1 IADD3 R7, R7, 0x40, RZ ; /* 0x0000004007079810 */
/* 0x000fe40007ffe0ff */
/*0970*/ LEA R10, R6, 0xc0800000, 0x17 ; /* 0xc0800000060a7811 */
/* 0x000fe200078eb8ff */
/*0980*/ BSSY B2, 0xcf0 ; /* 0x0000036000027945 */
/* 0x000fe20003800000 */
/*0990*/ IADD3 R13, R13, -0x7f, RZ ; /* 0xffffff810d0d7810 */
/* 0x000fe40007ffe0ff */
/*09a0*/ IADD3 R12, -R10, R9, RZ ; /* 0x000000090a0c7210 */
/* 0x000fc60007ffe1ff */
/*09b0*/ IMAD R10, R13.reuse, -0x800000, R8 ; /* 0xff8000000d0a7824 */
/* 0x040fe200078e0208 */
/*09c0*/ MUFU.RCP R9, R12 ; /* 0x0000000c00097308 */
/* 0x0000620000001000 */
/*09d0*/ FADD.FTZ R14, -R12, -RZ ; /* 0x800000ff0c0e7221 */
/* 0x000fe20000010100 */
/*09e0*/ IADD3 R12, R13, 0x7f, -R6 ; /* 0x0000007f0d0c7810 */
/* 0x001fca0007ffe806 */
/*09f0*/ IMAD.IADD R12, R12, 0x1, R7 ; /* 0x000000010c0c7824 */
/* 0x000fe400078e0207 */
/*0a00*/ FFMA R16, R9, R14, 1 ; /* 0x3f80000009107423 */
/* 0x002fc8000000000e */
/*0a10*/ FFMA R8, R9, R16, R9 ; /* 0x0000001009087223 */
/* 0x000fc80000000009 */
/*0a20*/ FFMA R11, R10, R8, RZ ; /* 0x000000080a0b7223 */
/* 0x000fc800000000ff */
/*0a30*/ FFMA R9, R14, R11, R10 ; /* 0x0000000b0e097223 */
/* 0x000fc8000000000a */
/*0a40*/ FFMA R11, R8, R9, R11 ; /* 0x00000009080b7223 */
/* 0x000fc8000000000b */
/*0a50*/ FFMA R10, R14, R11, R10 ; /* 0x0000000b0e0a7223 */
/* 0x000fc8000000000a */
/*0a60*/ FFMA R9, R8, R10, R11 ; /* 0x0000000a08097223 */
/* 0x000fca000000000b */
/*0a70*/ SHF.R.U32.HI R6, RZ, 0x17, R9 ; /* 0x00000017ff067819 */
/* 0x000fc80000011609 */
/*0a80*/ LOP3.LUT R6, R6, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff06067812 */
/* 0x000fc800078ec0ff */
/*0a90*/ IADD3 R13, R6, R12, RZ ; /* 0x0000000c060d7210 */
/* 0x000fc80007ffe0ff */
/*0aa0*/ IADD3 R6, R13, -0x1, RZ ; /* 0xffffffff0d067810 */
/* 0x000fc80007ffe0ff */
/*0ab0*/ ISETP.GE.U32.AND P0, PT, R6, 0xfe, PT ; /* 0x000000fe0600780c */
/* 0x000fda0003f06070 */
/*0ac0*/ @!P0 BRA 0xcd0 ; /* 0x0000020000008947 */
/* 0x000fea0003800000 */
/*0ad0*/ ISETP.GT.AND P0, PT, R13, 0xfe, PT ; /* 0x000000fe0d00780c */
/* 0x000fda0003f04270 */
/*0ae0*/ @P0 BRA 0xca0 ; /* 0x000001b000000947 */
/* 0x000fea0003800000 */
/*0af0*/ ISETP.GE.AND P0, PT, R13, 0x1, PT ; /* 0x000000010d00780c */
/* 0x000fda0003f06270 */
/*0b00*/ @P0 BRA 0xce0 ; /* 0x000001d000000947 */
/* 0x000fea0003800000 */
/*0b10*/ ISETP.GE.AND P0, PT, R13, -0x18, PT ; /* 0xffffffe80d00780c */
/* 0x000fe40003f06270 */
/*0b20*/ LOP3.LUT R9, R9, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000009097812 */
/* 0x000fd600078ec0ff */
/*0b30*/ @!P0 BRA 0xce0 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0b40*/ FFMA.RZ R6, R8, R10.reuse, R11.reuse ; /* 0x0000000a08067223 */
/* 0x180fe2000000c00b */
/*0b50*/ ISETP.NE.AND P2, PT, R13.reuse, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x040fe40003f45270 */
/*0b60*/ ISETP.NE.AND P1, PT, R13.reuse, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x040fe40003f25270 */
/*0b70*/ LOP3.LUT R7, R6, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff06077812 */
/* 0x000fe200078ec0ff */
/*0b80*/ FFMA.RP R6, R8.reuse, R10.reuse, R11.reuse ; /* 0x0000000a08067223 */
/* 0x1c0fe4000000800b */
/*0b90*/ FFMA.RM R11, R8, R10, R11 ; /* 0x0000000a080b7223 */
/* 0x000fe2000000400b */
/*0ba0*/ IADD3 R8, R13, 0x20, RZ ; /* 0x000000200d087810 */
/* 0x000fe20007ffe0ff */
/*0bb0*/ IMAD.MOV R10, RZ, RZ, -R13 ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e0a0d */
/*0bc0*/ LOP3.LUT R7, R7, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000007077812 */
/* 0x000fc400078efcff */
/*0bd0*/ FSETP.NEU.FTZ.AND P0, PT, R6, R11, PT ; /* 0x0000000b0600720b */
/* 0x000fe40003f1d000 */
/*0be0*/ SHF.L.U32 R8, R7, R8, RZ ; /* 0x0000000807087219 */
/* 0x000fe400000006ff */
/*0bf0*/ SEL R6, R10, RZ, P2 ; /* 0x000000ff0a067207 */
/* 0x000fe40001000000 */
/*0c00*/ ISETP.NE.AND P1, PT, R8, RZ, P1 ; /* 0x000000ff0800720c */
/* 0x000fe40000f25270 */
/*0c10*/ SHF.R.U32.HI R6, RZ, R6, R7 ; /* 0x00000006ff067219 */
/* 0x000fe40000011607 */
/*0c20*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40000703570 */
/*0c30*/ SHF.R.U32.HI R8, RZ, 0x1, R6 ; /* 0x00000001ff087819 */
/* 0x000fe40000011606 */
/*0c40*/ SEL R7, RZ, 0x1, !P0 ; /* 0x00000001ff077807 */
/* 0x000fc80004000000 */
/*0c50*/ LOP3.LUT R7, R7, 0x1, R8, 0xf8, !PT ; /* 0x0000000107077812 */
/* 0x000fc800078ef808 */
/*0c60*/ LOP3.LUT R7, R7, R6, RZ, 0xc0, !PT ; /* 0x0000000607077212 */
/* 0x000fc800078ec0ff */
/*0c70*/ IADD3 R8, R8, R7, RZ ; /* 0x0000000708087210 */
/* 0x000fc80007ffe0ff */
/*0c80*/ LOP3.LUT R9, R8, R9, RZ, 0xfc, !PT ; /* 0x0000000908097212 */
/* 0x000fe200078efcff */
/*0c90*/ BRA 0xce0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0ca0*/ LOP3.LUT R9, R9, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000009097812 */
/* 0x000fc800078ec0ff */
/*0cb0*/ LOP3.LUT R9, R9, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000009097812 */
/* 0x000fe200078efcff */
/*0cc0*/ BRA 0xce0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0cd0*/ IMAD R9, R12, 0x800000, R9 ; /* 0x008000000c097824 */
/* 0x000fe400078e0209 */
/*0ce0*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0cf0*/ BRA 0xd80 ; /* 0x0000008000007947 */
/* 0x000fea0003800000 */
/*0d00*/ LOP3.LUT R9, R9, 0x80000000, R8, 0x48, !PT ; /* 0x8000000009097812 */
/* 0x000fc800078e4808 */
/*0d10*/ LOP3.LUT R9, R9, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000009097812 */
/* 0x000fe200078efcff */
/*0d20*/ BRA 0xd80 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0d30*/ LOP3.LUT R9, R9, 0x80000000, R8, 0x48, !PT ; /* 0x8000000009097812 */
/* 0x000fe200078e4808 */
/*0d40*/ BRA 0xd80 ; /* 0x0000003000007947 */
/* 0x000fea0003800000 */
/*0d50*/ MUFU.RSQ R9, -QNAN ; /* 0xffc0000000097908 */
/* 0x000e220000001400 */
/*0d60*/ BRA 0xd80 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0d70*/ FADD.FTZ R9, R5, R9 ; /* 0x0000000905097221 */
/* 0x000fe40000010000 */
/*0d80*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0d90*/ HFMA2.MMA R7, -RZ, RZ, 0, 0 ; /* 0x00000000ff077435 */
/* 0x000fe200000001ff */
/*0da0*/ MOV R6, R4 ; /* 0x0000000400067202 */
/* 0x000fca0000000f00 */
/*0db0*/ RET.REL.NODEC R6 0x0 ; /* 0xfffff24006007950 */
/* 0x000fea0003c3ffff */
/*0dc0*/ BRA 0xdc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0dd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0de0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0df0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //
// Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s):Chengyao Tang,Victoria Kyereme
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
#include <stdio.h>
#include <math.h>
#include <iomanip>
#include <iostream>
#include <string>
#include <sys/time.h>
// problem size (vector length) N
static const int N = 12345678;
// Number of terms to use when approximating sine
static const int TERMS = 6;
// kernel function (CPU - Do not modify)
void sine_serial(float *input, float *output)
{
int i;
for (i=0; i<N; i++) {
float value = input[i];
float numer = input[i] * input[i] * input[i];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[i] * input[i];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[i] = value;
}
}
// kernel function (CUDA device)
// TODO: Implement your graphics kernel here. See assignment instructions for method information
__global__ void sine_parallel(float*d_input,float*d_output ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
float d_value = d_input[idx];
float d_numer = d_input[idx]*d_input[idx]*d_input[idx];
int d_denom = 6;
int d_sign = -1;
for (int d_j=1;d_j<=TERMS; d_j++){
d_value += d_sign *d_numer/d_denom;
d_numer *= d_input[idx]* d_input[idx];
d_denom *= (2*d_j+2)* (2*d_j+3);
d_sign *= -1;
}
d_output[idx] = d_value;
}
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main (int argc, char **argv)
{
//BEGIN: CPU implementation (do not modify)
float *h_cpu_result = (float*)malloc(N*sizeof(float));
float *h_input = (float*)malloc(N*sizeof(float));
//Initialize data on CPU
int i;
for (i=0; i<N; i++)
{
h_input[i] = 0.1f * i;
}
//Execute and time the CPU version
long long CPU_start_time = start_timer();
sine_serial(h_input, h_cpu_result);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time");
//END: CPU implementation (do not modify)
//TODO: Prepare and run your kernel, make sure to copy your results back into h_gpu_result and display your timing results
float *h_gpu_result = (float*)malloc(N*sizeof(float));
//declare GPU memory pointers
float *d_input;
float *d_output;
long long Memory_Allocation_start_time = start_timer();
long long GPU_start_time = start_timer();
//allocate GPU memory
cudaMalloc((void **) &d_input,N*sizeof(float));
cudaMalloc((void **) &d_output,N*sizeof(float));
long long Memory_Allocation_end_time = stop_timer(Memory_Allocation_start_time,"\nGPU Memory allocation time:");
//transfer the array to the GPU
long long Memory_copy_to_device_start_time = start_timer();
cudaMemcpy(d_input, h_input, N*sizeof(float),cudaMemcpyHostToDevice);
long long Memory_copy_to_device_end_time = stop_timer(Memory_copy_to_device_start_time,"\nGPU Memory Copy to Device time:");
//launch the kernel
int threards = N/1024;
long long Kernal_run_start_time = start_timer();
sine_parallel<<<threards,1024>>>(d_input,d_output);
long long Kernal_run_end_time = stop_timer(Kernal_run_start_time,"\nGPU Kernal run Time:");
//copy back the result array to the CPU
long long Memory_copy_to_Host_start_time = start_timer();
cudaMemcpy(h_gpu_result,d_output,N*sizeof(float),cudaMemcpyDeviceToHost);
long long Memory_copy_to_Host_end_time = stop_timer(Memory_copy_to_Host_start_time,"\nGPU Memory Copy to Host time:");
long long GPU_end_time = stop_timer(GPU_start_time,"\nTotal GPU Run time:");
// Checking to make sure the CPU and GPU results match - Do not modify
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("Result comparison failed.\n");
else
printf("Result comparison passed.\n");
// Cleaning up memory
free(h_input);
free(h_cpu_result);
free(h_gpu_result);
//Cleaning up memory for gpu pointers
cudaFree(d_input);
cudaFree(d_output);
return 0;
} | .file "tmpxft_0008fa42_00000000-6_ParallelSine.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3956:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3956:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11sine_serialPfS_
.type _Z11sine_serialPfS_, @function
_Z11sine_serialPfS_:
.LFB3949:
.cfi_startproc
endbr64
movq %rdi, %r10
movq %rsi, %r9
movl $0, %r8d
movl $6, %r11d
jmp .L5
.L9:
movss %xmm2, (%r9,%r8)
addq $4, %r8
cmpq $49382712, %r8
je .L8
.L5:
movss (%r10,%r8), %xmm2
movaps %xmm2, %xmm4
mulss %xmm2, %xmm4
movaps %xmm4, %xmm1
mulss %xmm2, %xmm1
movl $5, %esi
movl $1, %edx
movl $-1, %ecx
movl %r11d, %eax
.L4:
pxor %xmm0, %xmm0
cvtsi2ssl %ecx, %xmm0
mulss %xmm1, %xmm0
pxor %xmm3, %xmm3
cvtsi2ssl %eax, %xmm3
divss %xmm3, %xmm0
addss %xmm0, %xmm2
mulss %xmm4, %xmm1
addl $1, %edx
movl %esi, %edi
imull %edx, %edi
imull %edi, %eax
addl %eax, %eax
negl %ecx
addl $2, %esi
cmpl $7, %edx
jne .L4
jmp .L9
.L8:
ret
.cfi_endproc
.LFE3949:
.size _Z11sine_serialPfS_, .-_Z11sine_serialPfS_
.globl _Z11start_timerv
.type _Z11start_timerv, @function
_Z11start_timerv:
.LFB3950:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
imulq $1000000, (%rsp), %rax
addq 8(%rsp), %rax
movq 24(%rsp), %rdx
subq %fs:40, %rdx
jne .L13
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3950:
.size _Z11start_timerv, .-_Z11start_timerv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string ": "
.LC2:
.string " sec\n"
.text
.globl _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.type _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE, @function
_Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE:
.LFB3951:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r12
movq %rsi, %rbp
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
imulq $1000000, (%rsp), %rbx
addq 8(%rsp), %rbx
leaq _ZSt4cout(%rip), %rdi
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq $5, 8(%rdi,%rax)
movq 8(%rbp), %rdx
movq 0(%rbp), %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq %rax, %rbp
movl $2, %edx
leaq .LC0(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
subq %r12, %rbx
pxor %xmm0, %xmm0
cvtsi2ssq %rbx, %xmm0
divss .LC1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $5, %edx
leaq .LC2(%rip), %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L17
movq %rbx, %rax
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3951:
.size _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE, .-_Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.section .rodata.str1.1
.LC3:
.string "CUDA Error: %s (at %s)"
.text
.globl _Z11checkErrorsPKc
.type _Z11checkErrorsPKc, @function
_Z11checkErrorsPKc:
.LFB3952:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rdi, %rbx
call cudaThreadSynchronize@PLT
testl %eax, %eax
jne .L22
.L19:
call cudaGetLastError@PLT
testl %eax, %eax
jne .L23
.L18:
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movq %rbx, %r8
leaq .LC3(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L19
.L23:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movq %rbx, %r8
leaq .LC3(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L18
.cfi_endproc
.LFE3952:
.size _Z11checkErrorsPKc, .-_Z11checkErrorsPKc
.globl _Z35__device_stub__Z13sine_parallelPfS_PfS_
.type _Z35__device_stub__Z13sine_parallelPfS_PfS_, @function
_Z35__device_stub__Z13sine_parallelPfS_PfS_:
.LFB3978:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L28
.L24:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L29
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13sine_parallelPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L24
.L29:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3978:
.size _Z35__device_stub__Z13sine_parallelPfS_PfS_, .-_Z35__device_stub__Z13sine_parallelPfS_PfS_
.globl _Z13sine_parallelPfS_
.type _Z13sine_parallelPfS_, @function
_Z13sine_parallelPfS_:
.LFB3979:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z13sine_parallelPfS_PfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3979:
.size _Z13sine_parallelPfS_, .-_Z13sine_parallelPfS_
.section .rodata.str1.1
.LC4:
.string "_Z13sine_parallelPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3981:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z13sine_parallelPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3981:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .rodata._ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "basic_string: construction from null is not valid"
.section .text._ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_,"axG",@progbits,_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC5IS3_EEPKcRKS3_,comdat
.align 2
.weak _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_
.type _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_, @function
_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_:
.LFB4300:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $24, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
leaq 16(%rdi), %r12
movq %r12, (%rdi)
testq %rsi, %rsi
je .L43
movq %rdi, %rbx
movq %rsi, %r13
movq %rsi, %rdi
call strlen@PLT
movq %rax, %rbp
movq %rax, (%rsp)
cmpq $15, %rax
ja .L44
cmpq $1, %rax
jne .L39
movzbl 0(%r13), %eax
movb %al, 16(%rbx)
.L40:
movq (%rsp), %rax
movq %rax, 8(%rbx)
movq (%rbx), %rdx
movb $0, (%rdx,%rax)
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L45
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L46
leaq .LC5(%rip), %rdi
call _ZSt19__throw_logic_errorPKc@PLT
.L46:
call __stack_chk_fail@PLT
.L44:
movq %rsp, %rsi
movl $0, %edx
movq %rbx, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9_M_createERmm@PLT
movq %rax, %r12
movq %rax, (%rbx)
movq (%rsp), %rax
movq %rax, 16(%rbx)
.L38:
movq %rbp, %rdx
movq %r13, %rsi
movq %r12, %rdi
call memcpy@PLT
jmp .L40
.L39:
testq %rax, %rax
je .L40
jmp .L38
.L45:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4300:
.size _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_, .-_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_
.weak _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IS3_EEPKcRKS3_
.set _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IS3_EEPKcRKS3_,_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_
.section .rodata.str1.1
.LC7:
.string "\nCPU Run Time"
.LC8:
.string "\nGPU Memory allocation time:"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC9:
.string "\nGPU Memory Copy to Device time:"
.section .rodata.str1.1
.LC10:
.string "\nGPU Kernal run Time:"
.section .rodata.str1.8
.align 8
.LC11:
.string "\nGPU Memory Copy to Host time:"
.section .rodata.str1.1
.LC12:
.string "\nTotal GPU Run time:"
.LC15:
.string "Result comparison failed.\n"
.LC16:
.string "Result comparison passed.\n"
.text
.globl main
.type main, @function
main:
.LFB3953:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA3953
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $49382712, %edi
call malloc@PLT
movq %rax, %rbp
movl $49382712, %edi
call malloc@PLT
movq %rax, %rbx
movl $0, %eax
movss .LC6(%rip), %xmm1
.L48:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss %xmm1, %xmm0
movss %xmm0, (%rbx,%rax,4)
addq $1, %rax
cmpq $12345678, %rax
jne .L48
call _Z11start_timerv
movq %rax, %r12
movq %rbp, %rsi
movq %rbx, %rdi
call _Z11sine_serialPfS_
leaq 36(%rsp), %rdx
leaq 48(%rsp), %r13
leaq .LC7(%rip), %rsi
movq %r13, %rdi
.LEHB0:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IS3_EEPKcRKS3_
.LEHE0:
movq %r13, %rsi
movq %r12, %rdi
.LEHB1:
call _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.LEHE1:
movq %r13, %r15
movq %r13, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movl $49382712, %edi
call malloc@PLT
movq %rax, %r12
call _Z11start_timerv
movq %rax, %r14
call _Z11start_timerv
movq %rax, %r13
leaq 8(%rsp), %rdi
movl $49382712, %esi
.LEHB2:
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $49382712, %esi
call cudaMalloc@PLT
leaq 36(%rsp), %rdx
leaq .LC8(%rip), %rsi
movq %r15, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IS3_EEPKcRKS3_
.LEHE2:
movq %r15, %rsi
movq %r14, %rdi
.LEHB3:
call _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.LEHE3:
movq %r15, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
call _Z11start_timerv
movq %rax, %r14
movl $1, %ecx
movl $49382712, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
.LEHB4:
call cudaMemcpy@PLT
leaq 36(%rsp), %rdx
leaq .LC9(%rip), %rsi
movq %r15, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IS3_EEPKcRKS3_
.LEHE4:
movq %r15, %rsi
movq %r14, %rdi
.LEHB5:
call _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.LEHE5:
movq %r15, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
call _Z11start_timerv
movq %rax, %r14
movl $1024, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $12056, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
.LEHB6:
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L79
.L49:
leaq 36(%rsp), %rdx
leaq 48(%rsp), %r15
leaq .LC10(%rip), %rsi
movq %r15, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IS3_EEPKcRKS3_
.LEHE6:
movq %r15, %rsi
movq %r14, %rdi
.LEHB7:
call _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.LEHE7:
jmp .L80
.L79:
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
.LEHB8:
call _Z35__device_stub__Z13sine_parallelPfS_PfS_
jmp .L49
.L80:
movq %r15, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
call _Z11start_timerv
movq %rax, %r14
movl $2, %ecx
movl $49382712, %edx
movq 16(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
leaq 36(%rsp), %rdx
leaq .LC11(%rip), %rsi
movq %r15, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IS3_EEPKcRKS3_
.LEHE8:
movq %r15, %rsi
movq %r14, %rdi
.LEHB9:
call _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.LEHE9:
movq %r15, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
leaq 36(%rsp), %rdx
leaq .LC12(%rip), %rsi
movq %r15, %rdi
.LEHB10:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IS3_EEPKcRKS3_
.LEHE10:
movq %r15, %rsi
movq %r13, %rdi
.LEHB11:
call _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.LEHE11:
movq %r15, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movl $0, %eax
movl $0, %edx
movss .LC13(%rip), %xmm2
movsd .LC14(%rip), %xmm1
.L52:
movss 0(%rbp,%rax), %xmm0
subss (%r12,%rax), %xmm0
andps %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm1, %xmm0
seta %cl
movzbl %cl, %ecx
addl %ecx, %edx
addq $4, %rax
cmpq $49382712, %rax
jne .L52
testl %edx, %edx
jle .L53
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
.LEHB12:
call __printf_chk@PLT
.L54:
movq %rbx, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L81
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L54
.L68:
endbr64
movq %rax, %rbx
leaq 48(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
je .L56
call __stack_chk_fail@PLT
.L56:
movq %rbx, %rdi
call _Unwind_Resume@PLT
.L69:
endbr64
movq %rax, %rbx
leaq 48(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
je .L58
call __stack_chk_fail@PLT
.L58:
movq %rbx, %rdi
call _Unwind_Resume@PLT
.L70:
endbr64
movq %rax, %rbx
leaq 48(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
je .L60
call __stack_chk_fail@PLT
.L60:
movq %rbx, %rdi
call _Unwind_Resume@PLT
.L71:
endbr64
movq %rax, %rbx
leaq 48(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
je .L62
call __stack_chk_fail@PLT
.L62:
movq %rbx, %rdi
call _Unwind_Resume@PLT
.L72:
endbr64
movq %rax, %rbx
leaq 48(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
je .L64
call __stack_chk_fail@PLT
.L64:
movq %rbx, %rdi
call _Unwind_Resume@PLT
.L73:
endbr64
movq %rax, %rbx
leaq 48(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
je .L66
call __stack_chk_fail@PLT
.L66:
movq %rbx, %rdi
call _Unwind_Resume@PLT
.LEHE12:
.L81:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3953:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA3953:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE3953-.LLSDACSB3953
.LLSDACSB3953:
.uleb128 .LEHB0-.LFB3953
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB3953
.uleb128 .LEHE1-.LEHB1
.uleb128 .L68-.LFB3953
.uleb128 0
.uleb128 .LEHB2-.LFB3953
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.uleb128 .LEHB3-.LFB3953
.uleb128 .LEHE3-.LEHB3
.uleb128 .L69-.LFB3953
.uleb128 0
.uleb128 .LEHB4-.LFB3953
.uleb128 .LEHE4-.LEHB4
.uleb128 0
.uleb128 0
.uleb128 .LEHB5-.LFB3953
.uleb128 .LEHE5-.LEHB5
.uleb128 .L70-.LFB3953
.uleb128 0
.uleb128 .LEHB6-.LFB3953
.uleb128 .LEHE6-.LEHB6
.uleb128 0
.uleb128 0
.uleb128 .LEHB7-.LFB3953
.uleb128 .LEHE7-.LEHB7
.uleb128 .L71-.LFB3953
.uleb128 0
.uleb128 .LEHB8-.LFB3953
.uleb128 .LEHE8-.LEHB8
.uleb128 0
.uleb128 0
.uleb128 .LEHB9-.LFB3953
.uleb128 .LEHE9-.LEHB9
.uleb128 .L72-.LFB3953
.uleb128 0
.uleb128 .LEHB10-.LFB3953
.uleb128 .LEHE10-.LEHB10
.uleb128 0
.uleb128 0
.uleb128 .LEHB11-.LFB3953
.uleb128 .LEHE11-.LEHB11
.uleb128 .L73-.LFB3953
.uleb128 0
.uleb128 .LEHB12-.LFB3953
.uleb128 .LEHE12-.LEHB12
.uleb128 0
.uleb128 0
.LLSDACSE3953:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1232348160
.align 4
.LC6:
.long 1036831949
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC13:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC14:
.long -1598689907
.long 1051772663
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //
// Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s):Chengyao Tang,Victoria Kyereme
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
#include <stdio.h>
#include <math.h>
#include <iomanip>
#include <iostream>
#include <string>
#include <sys/time.h>
// problem size (vector length) N
static const int N = 12345678;
// Number of terms to use when approximating sine
static const int TERMS = 6;
// kernel function (CPU - Do not modify)
void sine_serial(float *input, float *output)
{
int i;
for (i=0; i<N; i++) {
float value = input[i];
float numer = input[i] * input[i] * input[i];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[i] * input[i];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[i] = value;
}
}
// kernel function (CUDA device)
// TODO: Implement your graphics kernel here. See assignment instructions for method information
__global__ void sine_parallel(float*d_input,float*d_output ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
float d_value = d_input[idx];
float d_numer = d_input[idx]*d_input[idx]*d_input[idx];
int d_denom = 6;
int d_sign = -1;
for (int d_j=1;d_j<=TERMS; d_j++){
d_value += d_sign *d_numer/d_denom;
d_numer *= d_input[idx]* d_input[idx];
d_denom *= (2*d_j+2)* (2*d_j+3);
d_sign *= -1;
}
d_output[idx] = d_value;
}
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main (int argc, char **argv)
{
//BEGIN: CPU implementation (do not modify)
float *h_cpu_result = (float*)malloc(N*sizeof(float));
float *h_input = (float*)malloc(N*sizeof(float));
//Initialize data on CPU
int i;
for (i=0; i<N; i++)
{
h_input[i] = 0.1f * i;
}
//Execute and time the CPU version
long long CPU_start_time = start_timer();
sine_serial(h_input, h_cpu_result);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time");
//END: CPU implementation (do not modify)
//TODO: Prepare and run your kernel, make sure to copy your results back into h_gpu_result and display your timing results
float *h_gpu_result = (float*)malloc(N*sizeof(float));
//declare GPU memory pointers
float *d_input;
float *d_output;
long long Memory_Allocation_start_time = start_timer();
long long GPU_start_time = start_timer();
//allocate GPU memory
cudaMalloc((void **) &d_input,N*sizeof(float));
cudaMalloc((void **) &d_output,N*sizeof(float));
long long Memory_Allocation_end_time = stop_timer(Memory_Allocation_start_time,"\nGPU Memory allocation time:");
//transfer the array to the GPU
long long Memory_copy_to_device_start_time = start_timer();
cudaMemcpy(d_input, h_input, N*sizeof(float),cudaMemcpyHostToDevice);
long long Memory_copy_to_device_end_time = stop_timer(Memory_copy_to_device_start_time,"\nGPU Memory Copy to Device time:");
//launch the kernel
int threards = N/1024;
long long Kernal_run_start_time = start_timer();
sine_parallel<<<threards,1024>>>(d_input,d_output);
long long Kernal_run_end_time = stop_timer(Kernal_run_start_time,"\nGPU Kernal run Time:");
//copy back the result array to the CPU
long long Memory_copy_to_Host_start_time = start_timer();
cudaMemcpy(h_gpu_result,d_output,N*sizeof(float),cudaMemcpyDeviceToHost);
long long Memory_copy_to_Host_end_time = stop_timer(Memory_copy_to_Host_start_time,"\nGPU Memory Copy to Host time:");
long long GPU_end_time = stop_timer(GPU_start_time,"\nTotal GPU Run time:");
// Checking to make sure the CPU and GPU results match - Do not modify
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("Result comparison failed.\n");
else
printf("Result comparison passed.\n");
// Cleaning up memory
free(h_input);
free(h_cpu_result);
free(h_gpu_result);
//Cleaning up memory for gpu pointers
cudaFree(d_input);
cudaFree(d_output);
return 0;
} | //
// Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s):Chengyao Tang,Victoria Kyereme
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <iomanip>
#include <iostream>
#include <string>
#include <sys/time.h>
// problem size (vector length) N
static const int N = 12345678;
// Number of terms to use when approximating sine
static const int TERMS = 6;
// kernel function (CPU - Do not modify)
void sine_serial(float *input, float *output)
{
int i;
for (i=0; i<N; i++) {
float value = input[i];
float numer = input[i] * input[i] * input[i];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[i] * input[i];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[i] = value;
}
}
// kernel function (CUDA device)
// TODO: Implement your graphics kernel here. See assignment instructions for method information
__global__ void sine_parallel(float*d_input,float*d_output ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
float d_value = d_input[idx];
float d_numer = d_input[idx]*d_input[idx]*d_input[idx];
int d_denom = 6;
int d_sign = -1;
for (int d_j=1;d_j<=TERMS; d_j++){
d_value += d_sign *d_numer/d_denom;
d_numer *= d_input[idx]* d_input[idx];
d_denom *= (2*d_j+2)* (2*d_j+3);
d_sign *= -1;
}
d_output[idx] = d_value;
}
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main (int argc, char **argv)
{
//BEGIN: CPU implementation (do not modify)
float *h_cpu_result = (float*)malloc(N*sizeof(float));
float *h_input = (float*)malloc(N*sizeof(float));
//Initialize data on CPU
int i;
for (i=0; i<N; i++)
{
h_input[i] = 0.1f * i;
}
//Execute and time the CPU version
long long CPU_start_time = start_timer();
sine_serial(h_input, h_cpu_result);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time");
//END: CPU implementation (do not modify)
//TODO: Prepare and run your kernel, make sure to copy your results back into h_gpu_result and display your timing results
float *h_gpu_result = (float*)malloc(N*sizeof(float));
//declare GPU memory pointers
float *d_input;
float *d_output;
long long Memory_Allocation_start_time = start_timer();
long long GPU_start_time = start_timer();
//allocate GPU memory
hipMalloc((void **) &d_input,N*sizeof(float));
hipMalloc((void **) &d_output,N*sizeof(float));
long long Memory_Allocation_end_time = stop_timer(Memory_Allocation_start_time,"\nGPU Memory allocation time:");
//transfer the array to the GPU
long long Memory_copy_to_device_start_time = start_timer();
hipMemcpy(d_input, h_input, N*sizeof(float),hipMemcpyHostToDevice);
long long Memory_copy_to_device_end_time = stop_timer(Memory_copy_to_device_start_time,"\nGPU Memory Copy to Device time:");
//launch the kernel
int threards = N/1024;
long long Kernal_run_start_time = start_timer();
sine_parallel<<<threards,1024>>>(d_input,d_output);
long long Kernal_run_end_time = stop_timer(Kernal_run_start_time,"\nGPU Kernal run Time:");
//copy back the result array to the CPU
long long Memory_copy_to_Host_start_time = start_timer();
hipMemcpy(h_gpu_result,d_output,N*sizeof(float),hipMemcpyDeviceToHost);
long long Memory_copy_to_Host_end_time = stop_timer(Memory_copy_to_Host_start_time,"\nGPU Memory Copy to Host time:");
long long GPU_end_time = stop_timer(GPU_start_time,"\nTotal GPU Run time:");
// Checking to make sure the CPU and GPU results match - Do not modify
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("Result comparison failed.\n");
else
printf("Result comparison passed.\n");
// Cleaning up memory
free(h_input);
free(h_cpu_result);
free(h_gpu_result);
//Cleaning up memory for gpu pointers
hipFree(d_input);
hipFree(d_output);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //
// Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s):Chengyao Tang,Victoria Kyereme
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <iomanip>
#include <iostream>
#include <string>
#include <sys/time.h>
// problem size (vector length) N
static const int N = 12345678;
// Number of terms to use when approximating sine
static const int TERMS = 6;
// kernel function (CPU - Do not modify)
void sine_serial(float *input, float *output)
{
int i;
for (i=0; i<N; i++) {
float value = input[i];
float numer = input[i] * input[i] * input[i];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[i] * input[i];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[i] = value;
}
}
// kernel function (CUDA device)
// TODO: Implement your graphics kernel here. See assignment instructions for method information
__global__ void sine_parallel(float*d_input,float*d_output ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
float d_value = d_input[idx];
float d_numer = d_input[idx]*d_input[idx]*d_input[idx];
int d_denom = 6;
int d_sign = -1;
for (int d_j=1;d_j<=TERMS; d_j++){
d_value += d_sign *d_numer/d_denom;
d_numer *= d_input[idx]* d_input[idx];
d_denom *= (2*d_j+2)* (2*d_j+3);
d_sign *= -1;
}
d_output[idx] = d_value;
}
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main (int argc, char **argv)
{
//BEGIN: CPU implementation (do not modify)
float *h_cpu_result = (float*)malloc(N*sizeof(float));
float *h_input = (float*)malloc(N*sizeof(float));
//Initialize data on CPU
int i;
for (i=0; i<N; i++)
{
h_input[i] = 0.1f * i;
}
//Execute and time the CPU version
long long CPU_start_time = start_timer();
sine_serial(h_input, h_cpu_result);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time");
//END: CPU implementation (do not modify)
//TODO: Prepare and run your kernel, make sure to copy your results back into h_gpu_result and display your timing results
float *h_gpu_result = (float*)malloc(N*sizeof(float));
//declare GPU memory pointers
float *d_input;
float *d_output;
long long Memory_Allocation_start_time = start_timer();
long long GPU_start_time = start_timer();
//allocate GPU memory
hipMalloc((void **) &d_input,N*sizeof(float));
hipMalloc((void **) &d_output,N*sizeof(float));
long long Memory_Allocation_end_time = stop_timer(Memory_Allocation_start_time,"\nGPU Memory allocation time:");
//transfer the array to the GPU
long long Memory_copy_to_device_start_time = start_timer();
hipMemcpy(d_input, h_input, N*sizeof(float),hipMemcpyHostToDevice);
long long Memory_copy_to_device_end_time = stop_timer(Memory_copy_to_device_start_time,"\nGPU Memory Copy to Device time:");
//launch the kernel
int threards = N/1024;
long long Kernal_run_start_time = start_timer();
sine_parallel<<<threards,1024>>>(d_input,d_output);
long long Kernal_run_end_time = stop_timer(Kernal_run_start_time,"\nGPU Kernal run Time:");
//copy back the result array to the CPU
long long Memory_copy_to_Host_start_time = start_timer();
hipMemcpy(h_gpu_result,d_output,N*sizeof(float),hipMemcpyDeviceToHost);
long long Memory_copy_to_Host_end_time = stop_timer(Memory_copy_to_Host_start_time,"\nGPU Memory Copy to Host time:");
long long GPU_end_time = stop_timer(GPU_start_time,"\nTotal GPU Run time:");
// Checking to make sure the CPU and GPU results match - Do not modify
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("Result comparison failed.\n");
else
printf("Result comparison passed.\n");
// Cleaning up memory
free(h_input);
free(h_cpu_result);
free(h_gpu_result);
//Cleaning up memory for gpu pointers
hipFree(d_input);
hipFree(d_output);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13sine_parallelPfS_
.globl _Z13sine_parallelPfS_
.p2align 8
.type _Z13sine_parallelPfS_,@function
_Z13sine_parallelPfS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b64 s[2:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_mov_b32 s4, 6
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_co_u32 v3, vcc_lo, s2, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
s_mov_b32 s2, -1
s_mov_b32 s3, 5
global_load_b32 v0, v[3:4], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v3, v0, v0
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v4, v0, v3
.p2align 6
.LBB0_1:
v_cvt_f32_i32_e32 v5, s2
v_cvt_f32_i32_e32 v6, s4
s_add_i32 s5, s3, -1
s_mul_i32 s4, s3, s4
s_add_i32 s3, s3, 2
v_dual_mul_f32 v5, v4, v5 :: v_dual_mul_f32 v4, v3, v4
s_sub_i32 s2, 0, s2
s_cmp_eq_u32 s3, 17
s_mul_i32 s4, s4, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f32 v7, null, v6, v6, v5
v_rcp_f32_e32 v8, v7
s_waitcnt_depctr 0xfff
v_fma_f32 v9, -v7, v8, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v8, v9, v8
v_div_scale_f32 v10, vcc_lo, v5, v6, v5
v_mul_f32_e32 v9, v10, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v11, -v7, v9, v10
v_fmac_f32_e32 v9, v11, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v7, -v7, v9, v10
v_div_fmas_f32 v7, v7, v8, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f32 v5, v7, v6, v5
v_add_f32_e32 v0, v0, v5
s_cbranch_scc0 .LBB0_1
s_load_b64 s[0:1], s[0:1], 0x8
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[1:2], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13sine_parallelPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13sine_parallelPfS_, .Lfunc_end0-_Z13sine_parallelPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13sine_parallelPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13sine_parallelPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //
// Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s):Chengyao Tang,Victoria Kyereme
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <iomanip>
#include <iostream>
#include <string>
#include <sys/time.h>
// problem size (vector length) N
static const int N = 12345678;
// Number of terms to use when approximating sine
static const int TERMS = 6;
// kernel function (CPU - Do not modify)
void sine_serial(float *input, float *output)
{
int i;
for (i=0; i<N; i++) {
float value = input[i];
float numer = input[i] * input[i] * input[i];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[i] * input[i];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[i] = value;
}
}
// kernel function (CUDA device)
// TODO: Implement your graphics kernel here. See assignment instructions for method information
__global__ void sine_parallel(float*d_input,float*d_output ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
float d_value = d_input[idx];
float d_numer = d_input[idx]*d_input[idx]*d_input[idx];
int d_denom = 6;
int d_sign = -1;
for (int d_j=1;d_j<=TERMS; d_j++){
d_value += d_sign *d_numer/d_denom;
d_numer *= d_input[idx]* d_input[idx];
d_denom *= (2*d_j+2)* (2*d_j+3);
d_sign *= -1;
}
d_output[idx] = d_value;
}
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main (int argc, char **argv)
{
//BEGIN: CPU implementation (do not modify)
float *h_cpu_result = (float*)malloc(N*sizeof(float));
float *h_input = (float*)malloc(N*sizeof(float));
//Initialize data on CPU
int i;
for (i=0; i<N; i++)
{
h_input[i] = 0.1f * i;
}
//Execute and time the CPU version
long long CPU_start_time = start_timer();
sine_serial(h_input, h_cpu_result);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time");
//END: CPU implementation (do not modify)
//TODO: Prepare and run your kernel, make sure to copy your results back into h_gpu_result and display your timing results
float *h_gpu_result = (float*)malloc(N*sizeof(float));
//declare GPU memory pointers
float *d_input;
float *d_output;
long long Memory_Allocation_start_time = start_timer();
long long GPU_start_time = start_timer();
//allocate GPU memory
hipMalloc((void **) &d_input,N*sizeof(float));
hipMalloc((void **) &d_output,N*sizeof(float));
long long Memory_Allocation_end_time = stop_timer(Memory_Allocation_start_time,"\nGPU Memory allocation time:");
//transfer the array to the GPU
long long Memory_copy_to_device_start_time = start_timer();
hipMemcpy(d_input, h_input, N*sizeof(float),hipMemcpyHostToDevice);
long long Memory_copy_to_device_end_time = stop_timer(Memory_copy_to_device_start_time,"\nGPU Memory Copy to Device time:");
//launch the kernel
int threards = N/1024;
long long Kernal_run_start_time = start_timer();
sine_parallel<<<threards,1024>>>(d_input,d_output);
long long Kernal_run_end_time = stop_timer(Kernal_run_start_time,"\nGPU Kernal run Time:");
//copy back the result array to the CPU
long long Memory_copy_to_Host_start_time = start_timer();
hipMemcpy(h_gpu_result,d_output,N*sizeof(float),hipMemcpyDeviceToHost);
long long Memory_copy_to_Host_end_time = stop_timer(Memory_copy_to_Host_start_time,"\nGPU Memory Copy to Host time:");
long long GPU_end_time = stop_timer(GPU_start_time,"\nTotal GPU Run time:");
// Checking to make sure the CPU and GPU results match - Do not modify
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("Result comparison failed.\n");
else
printf("Result comparison passed.\n");
// Cleaning up memory
free(h_input);
free(h_cpu_result);
free(h_gpu_result);
//Cleaning up memory for gpu pointers
hipFree(d_input);
hipFree(d_output);
return 0;
} | .text
.file "ParallelSine.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z11sine_serialPfS_ # -- Begin function _Z11sine_serialPfS_
.p2align 4, 0x90
.type _Z11sine_serialPfS_,@function
_Z11sine_serialPfS_: # @_Z11sine_serialPfS_
.cfi_startproc
# %bb.0:
xorl %eax, %eax
.p2align 4, 0x90
.LBB0_1: # =>This Loop Header: Depth=1
# Child Loop BB0_2 Depth 2
movss (%rdi,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movaps %xmm0, %xmm1
mulss %xmm0, %xmm1
movaps %xmm0, %xmm2
mulss %xmm1, %xmm2
movl $6, %ecx
movl $-1, %edx
movl $5, %r8d
.p2align 4, 0x90
.LBB0_2: # Parent Loop BB0_1 Depth=1
# => This Inner Loop Header: Depth=2
xorps %xmm3, %xmm3
cvtsi2ss %edx, %xmm3
mulss %xmm2, %xmm3
xorps %xmm4, %xmm4
cvtsi2ss %ecx, %xmm4
divss %xmm4, %xmm3
addss %xmm3, %xmm0
mulss %xmm1, %xmm2
leal -1(%r8), %r9d
imull %r8d, %ecx
imull %r9d, %ecx
negl %edx
addl $2, %r8d
cmpl $17, %r8d
jne .LBB0_2
# %bb.3: # in Loop: Header=BB0_1 Depth=1
movss %xmm0, (%rsi,%rax,4)
incq %rax
cmpq $12345678, %rax # imm = 0xBC614E
jne .LBB0_1
# %bb.4:
retq
.Lfunc_end0:
.size _Z11sine_serialPfS_, .Lfunc_end0-_Z11sine_serialPfS_
.cfi_endproc
# -- End function
.globl _Z28__device_stub__sine_parallelPfS_ # -- Begin function _Z28__device_stub__sine_parallelPfS_
.p2align 4, 0x90
.type _Z28__device_stub__sine_parallelPfS_,@function
_Z28__device_stub__sine_parallelPfS_: # @_Z28__device_stub__sine_parallelPfS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z13sine_parallelPfS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z28__device_stub__sine_parallelPfS_, .Lfunc_end1-_Z28__device_stub__sine_parallelPfS_
.cfi_endproc
# -- End function
.globl _Z11start_timerv # -- Begin function _Z11start_timerv
.p2align 4, 0x90
.type _Z11start_timerv,@function
_Z11start_timerv: # @_Z11start_timerv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
imulq $1000000, 8(%rsp), %rax # imm = 0xF4240
addq 16(%rsp), %rax
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z11start_timerv, .Lfunc_end2-_Z11start_timerv
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.LCPI3_0:
.long 0x49742400 # float 1.0E+6
.text
.globl _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.p2align 4, 0x90
.type _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE,@function
_Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE: # @_Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $24, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %r15
movq %rdi, %r14
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
imulq $1000000, 8(%rsp), %r12 # imm = 0xF4240
movq 16(%rsp), %rbx
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq $5, _ZSt4cout+8(%rax)
movq (%r15), %rsi
movq 8(%r15), %rdx
movl $_ZSt4cout, %edi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rax, %r15
movl $.L.str, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
subq %r14, %rbx
addq %r12, %rbx
cvtsi2ss %rbx, %xmm0
divss .LCPI3_0(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movq %r15, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.1, %esi
movl $5, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbx, %rax
addq $24, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE, .Lfunc_end3-_Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.cfi_endproc
# -- End function
.globl _Z11checkErrorsPKc # -- Begin function _Z11checkErrorsPKc
.p2align 4, 0x90
.type _Z11checkErrorsPKc,@function
_Z11checkErrorsPKc: # @_Z11checkErrorsPKc
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movq %rdi, %rbx
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB4_1
.LBB4_2:
callq hipGetLastError
testl %eax, %eax
jne .LBB4_4
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB4_1:
.cfi_def_cfa_offset 16
movl %eax, %edi
callq hipGetErrorString
movq stderr(%rip), %rdi
movl $.L.str.2, %esi
movq %rax, %rdx
movq %rbx, %rcx
xorl %eax, %eax
callq fprintf
jmp .LBB4_2
.LBB4_4:
movl %eax, %edi
callq hipGetErrorString
movq stderr(%rip), %rdi
movl $.L.str.2, %esi
movq %rax, %rdx
movq %rbx, %rcx
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 8
jmp fprintf # TAILCALL
.Lfunc_end4:
.size _Z11checkErrorsPKc, .Lfunc_end4-_Z11checkErrorsPKc
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI5_0:
.long 0x3dcccccd # float 0.100000001
.LCPI5_1:
.long 0x49742400 # float 1.0E+6
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI5_2:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI5_3:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $328, %rsp # imm = 0x148
.cfi_def_cfa_offset 384
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
.cfi_escape 0x2e, 0x00
movl $49382712, %edi # imm = 0x2F18538
callq malloc
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movl $49382712, %edi # imm = 0x2F18538
callq malloc
movq %rax, %r14
xorl %eax, %eax
movss .LCPI5_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB5_1: # =>This Inner Loop Header: Depth=1
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
mulss %xmm0, %xmm1
movss %xmm1, (%r14,%rax,4)
incq %rax
cmpq $12345678, %rax # imm = 0xBC614E
jne .LBB5_1
# %bb.2:
.cfi_escape 0x2e, 0x00
xorl %r15d, %r15d
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
movq (%rsp), %r13
movq 8(%rsp), %rax
movq %rax, 24(%rsp) # 8-byte Spill
.p2align 4, 0x90
.LBB5_3: # =>This Loop Header: Depth=1
# Child Loop BB5_4 Depth 2
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movaps %xmm0, %xmm1
mulss %xmm0, %xmm1
movaps %xmm0, %xmm2
mulss %xmm1, %xmm2
movl $6, %eax
movl $-1, %ecx
movl $5, %edx
.p2align 4, 0x90
.LBB5_4: # Parent Loop BB5_3 Depth=1
# => This Inner Loop Header: Depth=2
xorps %xmm3, %xmm3
cvtsi2ss %ecx, %xmm3
mulss %xmm2, %xmm3
xorps %xmm4, %xmm4
cvtsi2ss %eax, %xmm4
divss %xmm4, %xmm3
addss %xmm3, %xmm0
mulss %xmm1, %xmm2
leal -1(%rdx), %esi
imull %edx, %eax
imull %esi, %eax
negl %ecx
addl $2, %edx
cmpl $17, %edx
jne .LBB5_4
# %bb.5: # in Loop: Header=BB5_3 Depth=1
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq $12345678, %r15 # imm = 0xBC614E
jne .LBB5_3
# %bb.6: # %_Z11sine_serialPfS_.exit
leaq 72(%rsp), %rax
movq %rax, 56(%rsp)
movabsq $7959358215270974218, %rax # imm = 0x6E7552205550430A
movq %rax, 72(%rsp)
movabsq $7308613580334462290, %rax # imm = 0x656D6954206E7552
movq %rax, 77(%rsp)
movq $13, 64(%rsp)
movb $0, 85(%rsp)
.cfi_escape 0x2e, 0x00
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
movq (%rsp), %r12
movq 8(%rsp), %rbp
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq $5, _ZSt4cout+8(%rax)
movq 56(%rsp), %rsi
movq 64(%rsp), %rdx
.Ltmp0:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp1:
# %bb.7: # %.noexc64
.Ltmp2:
movq %rax, %r15
.cfi_escape 0x2e, 0x00
movl $.L.str, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp3:
# %bb.8: # %.noexc65
subq %r13, %r12
imulq $1000000, %r12, %rax # imm = 0xF4240
subq 24(%rsp), %rbp # 8-byte Folded Reload
addq %rax, %rbp
xorps %xmm0, %xmm0
cvtsi2ss %rbp, %xmm0
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
.Ltmp4:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp5:
# %bb.9: # %.noexc66
.Ltmp6:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %esi
movl $5, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp7:
# %bb.10: # %_Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE.exit
movq 56(%rsp), %rdi
leaq 72(%rsp), %rax
cmpq %rax, %rdi
je .LBB5_12
# %bb.11: # %.critedge.i.i
.cfi_escape 0x2e, 0x00
callq _ZdlPv
.LBB5_12: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit
.cfi_escape 0x2e, 0x00
movl $49382712, %edi # imm = 0x2F18538
callq malloc
movq %rax, %r15
.cfi_escape 0x2e, 0x00
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
movq (%rsp), %rax
movq %rax, 32(%rsp) # 8-byte Spill
movq 8(%rsp), %rax
movq %rax, 24(%rsp) # 8-byte Spill
.cfi_escape 0x2e, 0x00
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
movq (%rsp), %rax
movq %rax, 248(%rsp) # 8-byte Spill
movq 8(%rsp), %rax
movq %rax, 256(%rsp) # 8-byte Spill
.cfi_escape 0x2e, 0x00
leaq 48(%rsp), %rdi
movl $49382712, %esi # imm = 0x2F18538
callq hipMalloc
.cfi_escape 0x2e, 0x00
leaq 40(%rsp), %rdi
movl $49382712, %esi # imm = 0x2F18538
callq hipMalloc
leaq 232(%rsp), %rax
movq %rax, 216(%rsp)
.Ltmp9:
.cfi_escape 0x2e, 0x00
movl $29, %edi
callq _Znwm
.Ltmp10:
# %bb.13: # %.noexc73
movq %rax, 216(%rsp)
movq $28, 232(%rsp)
movups .L.str.4+12(%rip), %xmm0
movups %xmm0, 12(%rax)
movups .L.str.4(%rip), %xmm0
movups %xmm0, (%rax)
movq $28, 224(%rsp)
movb $0, 28(%rax)
.cfi_escape 0x2e, 0x00
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
movq (%rsp), %r13
movq 8(%rsp), %rbp
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq $5, _ZSt4cout+8(%rax)
movq 216(%rsp), %rsi
movq 224(%rsp), %rdx
.Ltmp12:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp13:
# %bb.14: # %.noexc75
.Ltmp14:
movq %rax, %r12
.cfi_escape 0x2e, 0x00
movl $.L.str, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp15:
# %bb.15: # %.noexc76
subq 32(%rsp), %r13 # 8-byte Folded Reload
imulq $1000000, %r13, %rax # imm = 0xF4240
subq 24(%rsp), %rbp # 8-byte Folded Reload
addq %rax, %rbp
xorps %xmm0, %xmm0
cvtsi2ss %rbp, %xmm0
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
.Ltmp16:
.cfi_escape 0x2e, 0x00
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp17:
# %bb.16: # %.noexc77
.Ltmp18:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %esi
movl $5, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp19:
# %bb.17: # %_Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE.exit79
movq 216(%rsp), %rdi
leaq 232(%rsp), %rax
cmpq %rax, %rdi
je .LBB5_19
# %bb.18: # %.critedge.i.i80
.cfi_escape 0x2e, 0x00
callq _ZdlPv
.LBB5_19: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit82
.cfi_escape 0x2e, 0x00
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
movq (%rsp), %rax
movq %rax, 32(%rsp) # 8-byte Spill
movq 8(%rsp), %rax
movq %rax, 24(%rsp) # 8-byte Spill
movq 48(%rsp), %rdi
.cfi_escape 0x2e, 0x00
movl $49382712, %edx # imm = 0x2F18538
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
leaq 200(%rsp), %rax
movq %rax, 184(%rsp)
.Ltmp21:
.cfi_escape 0x2e, 0x00
movl $33, %edi
callq _Znwm
.Ltmp22:
# %bb.20: # %.noexc88
movq %rax, 184(%rsp)
movq $32, 200(%rsp)
movups .L.str.5+16(%rip), %xmm0
movups %xmm0, 16(%rax)
movups .L.str.5(%rip), %xmm0
movups %xmm0, (%rax)
movq $32, 192(%rsp)
movb $0, 32(%rax)
.cfi_escape 0x2e, 0x00
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
movq (%rsp), %r13
movq 8(%rsp), %rbp
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq $5, _ZSt4cout+8(%rax)
movq 184(%rsp), %rsi
movq 192(%rsp), %rdx
.Ltmp24:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp25:
# %bb.21: # %.noexc90
.Ltmp26:
movq %rax, %r12
.cfi_escape 0x2e, 0x00
movl $.L.str, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp27:
# %bb.22: # %.noexc91
subq 32(%rsp), %r13 # 8-byte Folded Reload
imulq $1000000, %r13, %rax # imm = 0xF4240
subq 24(%rsp), %rbp # 8-byte Folded Reload
addq %rax, %rbp
xorps %xmm0, %xmm0
cvtsi2ss %rbp, %xmm0
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
.Ltmp28:
.cfi_escape 0x2e, 0x00
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp29:
# %bb.23: # %.noexc92
.Ltmp30:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %esi
movl $5, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp31:
# %bb.24: # %_Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE.exit94
movq 184(%rsp), %rdi
leaq 200(%rsp), %rax
cmpq %rax, %rdi
je .LBB5_26
# %bb.25: # %.critedge.i.i95
.cfi_escape 0x2e, 0x00
callq _ZdlPv
.LBB5_26: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit97
.cfi_escape 0x2e, 0x00
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
movq (%rsp), %rax
movq %rax, 32(%rsp) # 8-byte Spill
movq 8(%rsp), %rax
movq %rax, 24(%rsp) # 8-byte Spill
movabsq $4294968320, %rdx # imm = 0x100000400
leaq 11032(%rdx), %rdi
.cfi_escape 0x2e, 0x00
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_28
# %bb.27:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq %rax, 320(%rsp)
movq %rcx, 312(%rsp)
leaq 320(%rsp), %rax
movq %rax, (%rsp)
leaq 312(%rsp), %rax
movq %rax, 8(%rsp)
.cfi_escape 0x2e, 0x00
leaq 296(%rsp), %rdi
leaq 280(%rsp), %rsi
leaq 272(%rsp), %rdx
leaq 264(%rsp), %rcx
callq __hipPopCallConfiguration
movq 296(%rsp), %rsi
movl 304(%rsp), %edx
movq 280(%rsp), %rcx
movl 288(%rsp), %r8d
.cfi_escape 0x2e, 0x10
movq %rsp, %r9
movl $_Z13sine_parallelPfS_, %edi
pushq 264(%rsp)
.cfi_adjust_cfa_offset 8
pushq 280(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_28: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9_M_createERmm.exit.i.i107
leaq 168(%rsp), %rax
movq %rax, 152(%rsp)
.Ltmp33:
.cfi_escape 0x2e, 0x00
movl $22, %edi
callq _Znwm
.Ltmp34:
# %bb.29: # %.noexc112
movq %rax, 152(%rsp)
movq $21, 168(%rsp)
movups .L.str.6(%rip), %xmm0
movups %xmm0, (%rax)
movabsq $4207889725998001781, %rcx # imm = 0x3A656D6954206E75
movq %rcx, 13(%rax)
movq $21, 160(%rsp)
movb $0, 21(%rax)
.cfi_escape 0x2e, 0x00
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
movq (%rsp), %rbp
movq 8(%rsp), %r13
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq $5, _ZSt4cout+8(%rax)
movq 152(%rsp), %rsi
movq 160(%rsp), %rdx
.Ltmp36:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp37:
# %bb.30: # %.noexc114
.Ltmp38:
movq %rax, %r12
.cfi_escape 0x2e, 0x00
movl $.L.str, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp39:
# %bb.31: # %.noexc115
subq 32(%rsp), %rbp # 8-byte Folded Reload
imulq $1000000, %rbp, %rax # imm = 0xF4240
subq 24(%rsp), %r13 # 8-byte Folded Reload
addq %rax, %r13
xorps %xmm0, %xmm0
cvtsi2ss %r13, %xmm0
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
.Ltmp40:
.cfi_escape 0x2e, 0x00
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp41:
# %bb.32: # %.noexc116
.Ltmp42:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %esi
movl $5, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp43:
# %bb.33: # %_Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE.exit118
movq 152(%rsp), %rdi
leaq 168(%rsp), %rax
cmpq %rax, %rdi
je .LBB5_35
# %bb.34: # %.critedge.i.i119
.cfi_escape 0x2e, 0x00
callq _ZdlPv
.LBB5_35: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit121
.cfi_escape 0x2e, 0x00
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
movq (%rsp), %rax
movq %rax, 32(%rsp) # 8-byte Spill
movq 8(%rsp), %rax
movq %rax, 24(%rsp) # 8-byte Spill
movq 40(%rsp), %rsi
.cfi_escape 0x2e, 0x00
movl $49382712, %edx # imm = 0x2F18538
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
leaq 136(%rsp), %rax
movq %rax, 120(%rsp)
.Ltmp45:
.cfi_escape 0x2e, 0x00
movl $31, %edi
callq _Znwm
.Ltmp46:
# %bb.36: # %.noexc127
movq %rax, 120(%rsp)
movq $30, 136(%rsp)
movups .L.str.7+14(%rip), %xmm0
movups %xmm0, 14(%rax)
movups .L.str.7(%rip), %xmm0
movups %xmm0, (%rax)
movq $30, 128(%rsp)
movb $0, 30(%rax)
.cfi_escape 0x2e, 0x00
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
movq (%rsp), %r13
movq 8(%rsp), %rbp
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq $5, _ZSt4cout+8(%rax)
movq 120(%rsp), %rsi
movq 128(%rsp), %rdx
.Ltmp48:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp49:
# %bb.37: # %.noexc129
.Ltmp50:
movq %rax, %r12
.cfi_escape 0x2e, 0x00
movl $.L.str, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp51:
# %bb.38: # %.noexc130
subq 32(%rsp), %r13 # 8-byte Folded Reload
imulq $1000000, %r13, %rax # imm = 0xF4240
subq 24(%rsp), %rbp # 8-byte Folded Reload
addq %rax, %rbp
xorps %xmm0, %xmm0
cvtsi2ss %rbp, %xmm0
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
.Ltmp52:
.cfi_escape 0x2e, 0x00
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp53:
# %bb.39: # %.noexc131
.Ltmp54:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %esi
movl $5, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp55:
# %bb.40: # %_Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE.exit133
movq 120(%rsp), %rdi
leaq 136(%rsp), %rax
cmpq %rax, %rdi
je .LBB5_42
# %bb.41: # %.critedge.i.i134
.cfi_escape 0x2e, 0x00
callq _ZdlPv
.LBB5_42: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit136
leaq 104(%rsp), %rax
movq %rax, 88(%rsp)
.Ltmp57:
.cfi_escape 0x2e, 0x00
movl $21, %edi
callq _Znwm
.Ltmp58:
# %bb.43: # %.noexc142
movq %rax, 88(%rsp)
movq $20, 104(%rsp)
movups .L.str.8(%rip), %xmm0
movups %xmm0, (%rax)
movl $979725673, 16(%rax) # imm = 0x3A656D69
movq $20, 96(%rsp)
movb $0, 20(%rax)
.cfi_escape 0x2e, 0x00
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
movq (%rsp), %r13
movq 8(%rsp), %rbp
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq $5, _ZSt4cout+8(%rax)
movq 88(%rsp), %rsi
movq 96(%rsp), %rdx
.Ltmp60:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp61:
# %bb.44: # %.noexc144
.Ltmp62:
movq %rax, %r12
.cfi_escape 0x2e, 0x00
movl $.L.str, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp63:
# %bb.45: # %.noexc145
subq 248(%rsp), %r13 # 8-byte Folded Reload
imulq $1000000, %r13, %rax # imm = 0xF4240
subq 256(%rsp), %rbp # 8-byte Folded Reload
addq %rax, %rbp
xorps %xmm0, %xmm0
cvtsi2ss %rbp, %xmm0
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
.Ltmp64:
.cfi_escape 0x2e, 0x00
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp65:
# %bb.46: # %.noexc146
.Ltmp66:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %esi
movl $5, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp67:
# %bb.47: # %_Z10stop_timerxNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE.exit148
movq 88(%rsp), %rdi
leaq 104(%rsp), %rax
cmpq %rax, %rdi
je .LBB5_49
# %bb.48: # %.critedge.i.i149
.cfi_escape 0x2e, 0x00
callq _ZdlPv
.LBB5_49: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit151.preheader
xorl %eax, %eax
movaps .LCPI5_2(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
movsd .LCPI5_3(%rip), %xmm1 # xmm1 = mem[0],zero
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_50: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit151
# =>This Inner Loop Header: Depth=1
movss (%rbx,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
subss (%r15,%rax,4), %xmm2
andps %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
xorl %edx, %edx
ucomisd %xmm1, %xmm2
seta %dl
addl %edx, %ecx
incq %rax
cmpq $12345678, %rax # imm = 0xBC614E
jne .LBB5_50
# %bb.51:
testl %ecx, %ecx
movl $.Lstr, %eax
movl $.Lstr.1, %edi
cmoveq %rax, %rdi
.cfi_escape 0x2e, 0x00
callq puts@PLT
.cfi_escape 0x2e, 0x00
movq %r14, %rdi
callq free
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq free
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq free
movq 48(%rsp), %rdi
.cfi_escape 0x2e, 0x00
callq hipFree
movq 40(%rsp), %rdi
.cfi_escape 0x2e, 0x00
callq hipFree
xorl %eax, %eax
addq $328, %rsp # imm = 0x148
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB5_52:
.cfi_def_cfa_offset 384
.Ltmp59:
jmp .LBB5_57
.LBB5_53:
.Ltmp47:
jmp .LBB5_57
.LBB5_54:
.Ltmp35:
jmp .LBB5_57
.LBB5_55:
.Ltmp23:
jmp .LBB5_57
.LBB5_56:
.Ltmp11:
.LBB5_57: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit100
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.LBB5_58:
.Ltmp68:
movq %rax, %rbx
movq 88(%rsp), %rdi
leaq 104(%rsp), %rax
cmpq %rax, %rdi
je .LBB5_69
# %bb.59: # %.critedge.i.i158
.cfi_escape 0x2e, 0x00
jmp .LBB5_71
.LBB5_60:
.Ltmp56:
movq %rax, %rbx
movq 120(%rsp), %rdi
leaq 136(%rsp), %rax
cmpq %rax, %rdi
je .LBB5_69
# %bb.61: # %.critedge.i.i155
.cfi_escape 0x2e, 0x00
jmp .LBB5_71
.LBB5_62:
.Ltmp44:
movq %rax, %rbx
movq 152(%rsp), %rdi
leaq 168(%rsp), %rax
cmpq %rax, %rdi
je .LBB5_69
# %bb.63: # %.critedge.i.i152
.cfi_escape 0x2e, 0x00
jmp .LBB5_71
.LBB5_64:
.Ltmp32:
movq %rax, %rbx
movq 184(%rsp), %rdi
leaq 200(%rsp), %rax
cmpq %rax, %rdi
je .LBB5_69
# %bb.65: # %.critedge.i.i104
.cfi_escape 0x2e, 0x00
jmp .LBB5_71
.LBB5_66:
.Ltmp20:
movq %rax, %rbx
movq 216(%rsp), %rdi
leaq 232(%rsp), %rax
cmpq %rax, %rdi
je .LBB5_69
# %bb.67: # %.critedge.i.i101
.cfi_escape 0x2e, 0x00
jmp .LBB5_71
.LBB5_68:
.Ltmp8:
movq %rax, %rbx
movq 56(%rsp), %rdi
leaq 72(%rsp), %rax
cmpq %rax, %rdi
jne .LBB5_70
.LBB5_69: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit100
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.LBB5_70: # %.critedge.i.i98
.cfi_escape 0x2e, 0x00
.LBB5_71: # %.critedge.i.i98
callq _ZdlPv
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table5:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp7-.Ltmp0 # Call between .Ltmp0 and .Ltmp7
.uleb128 .Ltmp8-.Lfunc_begin0 # jumps to .Ltmp8
.byte 0 # On action: cleanup
.uleb128 .Ltmp7-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp9-.Ltmp7 # Call between .Ltmp7 and .Ltmp9
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp9-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp10-.Ltmp9 # Call between .Ltmp9 and .Ltmp10
.uleb128 .Ltmp11-.Lfunc_begin0 # jumps to .Ltmp11
.byte 0 # On action: cleanup
.uleb128 .Ltmp12-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp19-.Ltmp12 # Call between .Ltmp12 and .Ltmp19
.uleb128 .Ltmp20-.Lfunc_begin0 # jumps to .Ltmp20
.byte 0 # On action: cleanup
.uleb128 .Ltmp19-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp21-.Ltmp19 # Call between .Ltmp19 and .Ltmp21
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp21-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp22-.Ltmp21 # Call between .Ltmp21 and .Ltmp22
.uleb128 .Ltmp23-.Lfunc_begin0 # jumps to .Ltmp23
.byte 0 # On action: cleanup
.uleb128 .Ltmp24-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Ltmp31-.Ltmp24 # Call between .Ltmp24 and .Ltmp31
.uleb128 .Ltmp32-.Lfunc_begin0 # jumps to .Ltmp32
.byte 0 # On action: cleanup
.uleb128 .Ltmp31-.Lfunc_begin0 # >> Call Site 8 <<
.uleb128 .Ltmp33-.Ltmp31 # Call between .Ltmp31 and .Ltmp33
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp33-.Lfunc_begin0 # >> Call Site 9 <<
.uleb128 .Ltmp34-.Ltmp33 # Call between .Ltmp33 and .Ltmp34
.uleb128 .Ltmp35-.Lfunc_begin0 # jumps to .Ltmp35
.byte 0 # On action: cleanup
.uleb128 .Ltmp36-.Lfunc_begin0 # >> Call Site 10 <<
.uleb128 .Ltmp43-.Ltmp36 # Call between .Ltmp36 and .Ltmp43
.uleb128 .Ltmp44-.Lfunc_begin0 # jumps to .Ltmp44
.byte 0 # On action: cleanup
.uleb128 .Ltmp43-.Lfunc_begin0 # >> Call Site 11 <<
.uleb128 .Ltmp45-.Ltmp43 # Call between .Ltmp43 and .Ltmp45
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp45-.Lfunc_begin0 # >> Call Site 12 <<
.uleb128 .Ltmp46-.Ltmp45 # Call between .Ltmp45 and .Ltmp46
.uleb128 .Ltmp47-.Lfunc_begin0 # jumps to .Ltmp47
.byte 0 # On action: cleanup
.uleb128 .Ltmp48-.Lfunc_begin0 # >> Call Site 13 <<
.uleb128 .Ltmp55-.Ltmp48 # Call between .Ltmp48 and .Ltmp55
.uleb128 .Ltmp56-.Lfunc_begin0 # jumps to .Ltmp56
.byte 0 # On action: cleanup
.uleb128 .Ltmp57-.Lfunc_begin0 # >> Call Site 14 <<
.uleb128 .Ltmp58-.Ltmp57 # Call between .Ltmp57 and .Ltmp58
.uleb128 .Ltmp59-.Lfunc_begin0 # jumps to .Ltmp59
.byte 0 # On action: cleanup
.uleb128 .Ltmp60-.Lfunc_begin0 # >> Call Site 15 <<
.uleb128 .Ltmp67-.Ltmp60 # Call between .Ltmp60 and .Ltmp67
.uleb128 .Ltmp68-.Lfunc_begin0 # jumps to .Ltmp68
.byte 0 # On action: cleanup
.uleb128 .Ltmp67-.Lfunc_begin0 # >> Call Site 16 <<
.uleb128 .Lfunc_end5-.Ltmp67 # Call between .Ltmp67 and .Lfunc_end5
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13sine_parallelPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13sine_parallelPfS_,@object # @_Z13sine_parallelPfS_
.section .rodata,"a",@progbits
.globl _Z13sine_parallelPfS_
.p2align 3, 0x0
_Z13sine_parallelPfS_:
.quad _Z28__device_stub__sine_parallelPfS_
.size _Z13sine_parallelPfS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz ": "
.size .L.str, 3
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " sec\n"
.size .L.str.1, 6
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "CUDA Error: %s (at %s)"
.size .L.str.2, 23
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "\nCPU Run Time"
.size .L.str.3, 14
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\nGPU Memory allocation time:"
.size .L.str.4, 29
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "\nGPU Memory Copy to Device time:"
.size .L.str.5, 33
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\nGPU Kernal run Time:"
.size .L.str.6, 22
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "\nGPU Memory Copy to Host time:"
.size .L.str.7, 31
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "\nTotal GPU Run time:"
.size .L.str.8, 21
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z13sine_parallelPfS_"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Result comparison passed."
.size .Lstr, 26
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Result comparison failed."
.size .Lstr.1, 26
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__sine_parallelPfS_
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym _Z13sine_parallelPfS_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13sine_parallelPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc800078e0203 */
/*0060*/ IMAD.WIDE R6, R0, R7, c[0x0][0x160] ; /* 0x0000580000067625 */
/* 0x000fca00078e0207 */
/*0070*/ LDG.E R3, [R6.64] ; /* 0x0000000406037981 */
/* 0x000ea2000c1e1900 */
/*0080*/ HFMA2.MMA R9, -RZ, RZ, 1.541015625, -0.052093505859375 ; /* 0x3e2aaaabff097435 */
/* 0x000fe200000001ff */
/*0090*/ IMAD.MOV.U32 R4, RZ, RZ, 0x40c00000 ; /* 0x40c00000ff047424 */
/* 0x000fe200078e00ff */
/*00a0*/ BSSY B0, 0x190 ; /* 0x000000e000007945 */
/* 0x000ff00003800000 */
/*00b0*/ FFMA R4, -R9, R4, 1 ; /* 0x3f80000009047423 */
/* 0x000fc80000000104 */
/*00c0*/ FFMA R4, R4, -R9, -0.16666667163372039795 ; /* 0xbe2aaaab04047423 */
/* 0x000fe40000000809 */
/*00d0*/ FMUL R2, R3, R3 ; /* 0x0000000303027220 */
/* 0x004fc80000400000 */
/*00e0*/ FMUL R5, R3, R2 ; /* 0x0000000203057220 */
/* 0x000fc80000400000 */
/*00f0*/ FCHK P0, R5, -6 ; /* 0xc0c0000005007902 */
/* 0x000e220000000000 */
/*0100*/ FFMA R8, R5, R4, RZ ; /* 0x0000000405087223 */
/* 0x000fc800000000ff */
/*0110*/ FFMA R9, R8, 6, R5 ; /* 0x40c0000008097823 */
/* 0x000fc80000000005 */
/*0120*/ FFMA R4, R4, R9, R8 ; /* 0x0000000904047223 */
/* 0x000fe20000000008 */
/*0130*/ @!P0 BRA 0x180 ; /* 0x0000004000008947 */
/* 0x001fea0003800000 */
/*0140*/ MOV R9, 0xc0c00000 ; /* 0xc0c0000000097802 */
/* 0x000fe40000000f00 */
/*0150*/ MOV R4, 0x170 ; /* 0x0000017000047802 */
/* 0x000fe40000000f00 */
/*0160*/ CALL.REL.NOINC 0x740 ; /* 0x000005d000007944 */
/* 0x000fea0003c00000 */
/*0170*/ IMAD.MOV.U32 R4, RZ, RZ, R9 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0009 */
/*0180*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0190*/ FMUL R5, R2, R5 ; /* 0x0000000502057220 */
/* 0x000fe20000400000 */
/*01a0*/ MOV R7, 0x3c088889 ; /* 0x3c08888900077802 */
/* 0x000fe20000000f00 */
/*01b0*/ IMAD.MOV.U32 R6, RZ, RZ, 0x42f00000 ; /* 0x42f00000ff067424 */
/* 0x000fe200078e00ff */
/*01c0*/ BSSY B0, 0x2a0 ; /* 0x000000d000007945 */
/* 0x000fe20003800000 */
/*01d0*/ FCHK P0, R5, 120 ; /* 0x42f0000005007902 */
/* 0x000e220000000000 */
/*01e0*/ FADD R3, R3, R4 ; /* 0x0000000403037221 */
/* 0x000fe40000000000 */
/*01f0*/ FFMA R6, R7, -R6, 1 ; /* 0x3f80000007067423 */
/* 0x000fc80000000806 */
/*0200*/ FFMA R6, R6, R7, 0.0083333337679505348206 ; /* 0x3c08888906067423 */
/* 0x000fc80000000007 */
/*0210*/ FFMA R7, R5, R6, RZ ; /* 0x0000000605077223 */
/* 0x000fc800000000ff */
/*0220*/ FFMA R8, R7, -120, R5 ; /* 0xc2f0000007087823 */
/* 0x000fc80000000005 */
/*0230*/ FFMA R6, R6, R8, R7 ; /* 0x0000000806067223 */
/* 0x000fe20000000007 */
/*0240*/ @!P0 BRA 0x290 ; /* 0x0000004000008947 */
/* 0x001fea0003800000 */
/*0250*/ HFMA2.MMA R9, -RZ, RZ, 3.46875, 0 ; /* 0x42f00000ff097435 */
/* 0x000fe200000001ff */
/*0260*/ MOV R4, 0x280 ; /* 0x0000028000047802 */
/* 0x000fca0000000f00 */
/*0270*/ CALL.REL.NOINC 0x740 ; /* 0x000004c000007944 */
/* 0x000fea0003c00000 */
/*0280*/ IMAD.MOV.U32 R6, RZ, RZ, R9 ; /* 0x000000ffff067224 */
/* 0x001fe400078e0009 */
/*0290*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*02a0*/ FMUL R5, R2, R5 ; /* 0x0000000502057220 */
/* 0x000fe20000400000 */
/*02b0*/ MOV R7, 0x39500d01 ; /* 0x39500d0100077802 */
/* 0x000fe20000000f00 */
/*02c0*/ IMAD.MOV.U32 R4, RZ, RZ, 0x459d8000 ; /* 0x459d8000ff047424 */
/* 0x000fe200078e00ff */
/*02d0*/ BSSY B0, 0x3b0 ; /* 0x000000d000007945 */
/* 0x000fe20003800000 */
/*02e0*/ FCHK P0, R5, -5040 ; /* 0xc59d800005007902 */
/* 0x000e220000000000 */
/*02f0*/ FADD R3, R3, R6 ; /* 0x0000000603037221 */
/* 0x000fe40000000000 */
/*0300*/ FFMA R4, -R7, R4, 1 ; /* 0x3f80000007047423 */
/* 0x000fc80000000104 */
/*0310*/ FFMA R4, R4, -R7, -0.00019841270113829523325 ; /* 0xb9500d0104047423 */
/* 0x000fc80000000807 */
/*0320*/ FFMA R7, R5, R4, RZ ; /* 0x0000000405077223 */
/* 0x000fc800000000ff */
/*0330*/ FFMA R8, R7, 5040, R5 ; /* 0x459d800007087823 */
/* 0x000fc80000000005 */
/*0340*/ FFMA R4, R4, R8, R7 ; /* 0x0000000804047223 */
/* 0x000fe20000000007 */
/*0350*/ @!P0 BRA 0x3a0 ; /* 0x0000004000008947 */
/* 0x001fea0003800000 */
/*0360*/ HFMA2.MMA R9, -RZ, RZ, -5.61328125, -0.0 ; /* 0xc59d8000ff097435 */
/* 0x000fe200000001ff */
/*0370*/ MOV R4, 0x390 ; /* 0x0000039000047802 */
/* 0x000fca0000000f00 */
/*0380*/ CALL.REL.NOINC 0x740 ; /* 0x000003b000007944 */
/* 0x000fea0003c00000 */
/*0390*/ IMAD.MOV.U32 R4, RZ, RZ, R9 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0009 */
/*03a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*03b0*/ FMUL R5, R2, R5 ; /* 0x0000000502057220 */
/* 0x000fe20000400000 */
/*03c0*/ MOV R7, 0x3638ef1d ; /* 0x3638ef1d00077802 */
/* 0x000fe20000000f00 */
/*03d0*/ IMAD.MOV.U32 R6, RZ, RZ, 0x48b13000 ; /* 0x48b13000ff067424 */
/* 0x000fe200078e00ff */
/*03e0*/ BSSY B0, 0x4c0 ; /* 0x000000d000007945 */
/* 0x000fe20003800000 */
/*03f0*/ FCHK P0, R5, 362880 ; /* 0x48b1300005007902 */
/* 0x000e220000000000 */
/*0400*/ FADD R3, R3, R4 ; /* 0x0000000403037221 */
/* 0x000fe40000000000 */
/*0410*/ FFMA R6, R7, -R6, 1 ; /* 0x3f80000007067423 */
/* 0x000fc80000000806 */
/*0420*/ FFMA R6, R6, R7, 2.7557318844628753141e-06 ; /* 0x3638ef1d06067423 */
/* 0x000fc80000000007 */
/*0430*/ FFMA R7, R5, R6, RZ ; /* 0x0000000605077223 */
/* 0x000fc800000000ff */
/*0440*/ FFMA R8, R7, -362880, R5 ; /* 0xc8b1300007087823 */
/* 0x000fc80000000005 */
/*0450*/ FFMA R6, R6, R8, R7 ; /* 0x0000000806067223 */
/* 0x000fe20000000007 */
/*0460*/ @!P0 BRA 0x4b0 ; /* 0x0000004000008947 */
/* 0x001fea0003800000 */
/*0470*/ HFMA2.MMA R9, -RZ, RZ, 9.3828125, 0.125 ; /* 0x48b13000ff097435 */
/* 0x000fe200000001ff */
/*0480*/ MOV R4, 0x4a0 ; /* 0x000004a000047802 */
/* 0x000fca0000000f00 */
/*0490*/ CALL.REL.NOINC 0x740 ; /* 0x000002a000007944 */
/* 0x000fea0003c00000 */
/*04a0*/ IMAD.MOV.U32 R6, RZ, RZ, R9 ; /* 0x000000ffff067224 */
/* 0x001fe400078e0009 */
/*04b0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*04c0*/ FMUL R5, R2, R5 ; /* 0x0000000502057220 */
/* 0x000fe20000400000 */
/*04d0*/ MOV R7, 0x32d7322b ; /* 0x32d7322b00077802 */
/* 0x000fe20000000f00 */
/*04e0*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4c184540 ; /* 0x4c184540ff047424 */
/* 0x000fe200078e00ff */
/*04f0*/ BSSY B0, 0x5d0 ; /* 0x000000d000007945 */
/* 0x000fe20003800000 */
/*0500*/ FCHK P0, R5, -39916800 ; /* 0xcc18454005007902 */
/* 0x000e220000000000 */
/*0510*/ FADD R3, R3, R6 ; /* 0x0000000603037221 */
/* 0x000fe40000000000 */
/*0520*/ FFMA R4, -R7, R4, 1 ; /* 0x3f80000007047423 */
/* 0x000fc80000000104 */
/*0530*/ FFMA R4, R4, -R7, -2.5052107943679402524e-08 ; /* 0xb2d7322b04047423 */
/* 0x000fc80000000807 */
/*0540*/ FFMA R7, R5, R4, RZ ; /* 0x0000000405077223 */
/* 0x000fc800000000ff */
/*0550*/ FFMA R8, R7, 39916800, R5 ; /* 0x4c18454007087823 */
/* 0x000fc80000000005 */
/*0560*/ FFMA R4, R4, R8, R7 ; /* 0x0000000804047223 */
/* 0x000fe20000000007 */
/*0570*/ @!P0 BRA 0x5c0 ; /* 0x0000004000008947 */
/* 0x001fea0003800000 */
/*0580*/ HFMA2.MMA R9, -RZ, RZ, -16.375, 5.25 ; /* 0xcc184540ff097435 */
/* 0x000fe200000001ff */
/*0590*/ MOV R4, 0x5b0 ; /* 0x000005b000047802 */
/* 0x000fca0000000f00 */
/*05a0*/ CALL.REL.NOINC 0x740 ; /* 0x0000019000007944 */
/* 0x000fea0003c00000 */
/*05b0*/ IMAD.MOV.U32 R4, RZ, RZ, R9 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0009 */
/*05c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*05d0*/ FMUL R2, R2, R5 ; /* 0x0000000502027220 */
/* 0x000fe20000400000 */
/*05e0*/ MOV R7, 0x300e45bd ; /* 0x300e45bd00077802 */
/* 0x000fe20000000f00 */
/*05f0*/ IMAD.MOV.U32 R6, RZ, RZ, 0x4ee65198 ; /* 0x4ee65198ff067424 */
/* 0x000fe200078e00ff */
/*0600*/ BSSY B0, 0x6f0 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*0610*/ FCHK P0, R2, 1.93205350400000000000e+09 ; /* 0x4ee6519802007902 */
/* 0x000e220000000000 */
/*0620*/ FADD R3, R3, R4 ; /* 0x0000000403037221 */
/* 0x000fe40000000000 */
/*0630*/ FFMA R6, R7, -R6, 1 ; /* 0x3f80000007067423 */
/* 0x000fc80000000806 */
/*0640*/ FFMA R5, R6, R7, 5.1758403118995488512e-10 ; /* 0x300e45bd06057423 */
/* 0x000fc80000000007 */
/*0650*/ FFMA R6, R2, R5, RZ ; /* 0x0000000502067223 */
/* 0x000fc800000000ff */
/*0660*/ FFMA R7, R6, -1.93205350400000000000e+09, R2 ; /* 0xcee6519806077823 */
/* 0x000fc80000000002 */
/*0670*/ FFMA R6, R5, R7, R6 ; /* 0x0000000705067223 */
/* 0x000fe20000000006 */
/*0680*/ @!P0 BRA 0x6e0 ; /* 0x0000005000008947 */
/* 0x001fea0003800000 */
/*0690*/ MOV R5, R2 ; /* 0x0000000200057202 */
/* 0x000fe20000000f00 */
/*06a0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4ee65198 ; /* 0x4ee65198ff097424 */
/* 0x000fe200078e00ff */
/*06b0*/ MOV R4, 0x6d0 ; /* 0x000006d000047802 */
/* 0x000fe40000000f00 */
/*06c0*/ CALL.REL.NOINC 0x740 ; /* 0x0000007000007944 */
/* 0x000fea0003c00000 */
/*06d0*/ MOV R6, R9 ; /* 0x0000000900067202 */
/* 0x001fe40000000f00 */
/*06e0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*06f0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe400078e00ff */
/*0700*/ FADD R5, R3, R6 ; /* 0x0000000603057221 */
/* 0x000fe40000000000 */
/*0710*/ IMAD.WIDE R2, R0, R7, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fca00078e0207 */
/*0720*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0730*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0740*/ SHF.R.U32.HI R6, RZ, 0x17, R9 ; /* 0x00000017ff067819 */
/* 0x000fe20000011609 */
/*0750*/ BSSY B1, 0xd90 ; /* 0x0000063000017945 */
/* 0x000fe20003800000 */
/*0760*/ SHF.R.U32.HI R7, RZ, 0x17, R5 ; /* 0x00000017ff077819 */
/* 0x000fe40000011605 */
/*0770*/ LOP3.LUT R6, R6, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff06067812 */
/* 0x000fe400078ec0ff */
/*0780*/ LOP3.LUT R13, R7, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff070d7812 */
/* 0x000fe400078ec0ff */
/*0790*/ IADD3 R11, R6, -0x1, RZ ; /* 0xffffffff060b7810 */
/* 0x000fe40007ffe0ff */
/*07a0*/ IADD3 R10, R13, -0x1, RZ ; /* 0xffffffff0d0a7810 */
/* 0x000fc40007ffe0ff */
/*07b0*/ ISETP.GT.U32.AND P0, PT, R11, 0xfd, PT ; /* 0x000000fd0b00780c */
/* 0x000fe40003f04070 */
/*07c0*/ MOV R8, R5 ; /* 0x0000000500087202 */
/* 0x000fe40000000f00 */
/*07d0*/ ISETP.GT.U32.OR P0, PT, R10, 0xfd, P0 ; /* 0x000000fd0a00780c */
/* 0x000fda0000704470 */
/*07e0*/ @!P0 IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff078224 */
/* 0x000fe200078e00ff */
/*07f0*/ @!P0 BRA 0x970 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*0800*/ FSETP.GTU.FTZ.AND P0, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */
/* 0x000fe40003f1c200 */
/*0810*/ FSETP.GTU.FTZ.AND P1, PT, |R9|, +INF , PT ; /* 0x7f8000000900780b */
/* 0x000fc80003f3c200 */
/*0820*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000703570 */
/*0830*/ @P0 BRA 0xd70 ; /* 0x0000053000000947 */
/* 0x000fea0003800000 */
/*0840*/ LOP3.LUT P0, RZ, R9, 0x7fffffff, R8, 0xc8, !PT ; /* 0x7fffffff09ff7812 */
/* 0x000fda000780c808 */
/*0850*/ @!P0 BRA 0xd50 ; /* 0x000004f000008947 */
/* 0x000fea0003800000 */
/*0860*/ FSETP.NEU.FTZ.AND P2, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */
/* 0x000fe40003f5d200 */
/*0870*/ FSETP.NEU.FTZ.AND P1, PT, |R9|, +INF , PT ; /* 0x7f8000000900780b */
/* 0x000fe40003f3d200 */
/*0880*/ FSETP.NEU.FTZ.AND P0, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */
/* 0x000fd60003f1d200 */
/*0890*/ @!P1 BRA !P2, 0xd50 ; /* 0x000004b000009947 */
/* 0x000fea0005000000 */
/*08a0*/ LOP3.LUT P2, RZ, R8, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff08ff7812 */
/* 0x000fc8000784c0ff */
/*08b0*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000f24572 */
/*08c0*/ @P1 BRA 0xd30 ; /* 0x0000046000001947 */
/* 0x000fea0003800000 */
/*08d0*/ LOP3.LUT P1, RZ, R9, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff09ff7812 */
/* 0x000fc8000782c0ff */
/*08e0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000702572 */
/*08f0*/ @P0 BRA 0xd00 ; /* 0x0000040000000947 */
/* 0x000fea0003800000 */
/*0900*/ ISETP.GE.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe40003f06270 */
/*0910*/ ISETP.GE.AND P1, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x000fd60003f26270 */
/*0920*/ @P0 MOV R7, RZ ; /* 0x000000ff00070202 */
/* 0x000fe20000000f00 */
/*0930*/ @!P0 IMAD.MOV.U32 R7, RZ, RZ, -0x40 ; /* 0xffffffc0ff078424 */
/* 0x000fe400078e00ff */
/*0940*/ @!P0 FFMA R8, R5, 1.84467440737095516160e+19, RZ ; /* 0x5f80000005088823 */
/* 0x000fe400000000ff */
/*0950*/ @!P1 FFMA R9, R9, 1.84467440737095516160e+19, RZ ; /* 0x5f80000009099823 */
/* 0x000fe200000000ff */
/*0960*/ @!P1 IADD3 R7, R7, 0x40, RZ ; /* 0x0000004007079810 */
/* 0x000fe40007ffe0ff */
/*0970*/ LEA R10, R6, 0xc0800000, 0x17 ; /* 0xc0800000060a7811 */
/* 0x000fe200078eb8ff */
/*0980*/ BSSY B2, 0xcf0 ; /* 0x0000036000027945 */
/* 0x000fe20003800000 */
/*0990*/ IADD3 R13, R13, -0x7f, RZ ; /* 0xffffff810d0d7810 */
/* 0x000fe40007ffe0ff */
/*09a0*/ IADD3 R12, -R10, R9, RZ ; /* 0x000000090a0c7210 */
/* 0x000fc60007ffe1ff */
/*09b0*/ IMAD R10, R13.reuse, -0x800000, R8 ; /* 0xff8000000d0a7824 */
/* 0x040fe200078e0208 */
/*09c0*/ MUFU.RCP R9, R12 ; /* 0x0000000c00097308 */
/* 0x0000620000001000 */
/*09d0*/ FADD.FTZ R14, -R12, -RZ ; /* 0x800000ff0c0e7221 */
/* 0x000fe20000010100 */
/*09e0*/ IADD3 R12, R13, 0x7f, -R6 ; /* 0x0000007f0d0c7810 */
/* 0x001fca0007ffe806 */
/*09f0*/ IMAD.IADD R12, R12, 0x1, R7 ; /* 0x000000010c0c7824 */
/* 0x000fe400078e0207 */
/*0a00*/ FFMA R16, R9, R14, 1 ; /* 0x3f80000009107423 */
/* 0x002fc8000000000e */
/*0a10*/ FFMA R8, R9, R16, R9 ; /* 0x0000001009087223 */
/* 0x000fc80000000009 */
/*0a20*/ FFMA R11, R10, R8, RZ ; /* 0x000000080a0b7223 */
/* 0x000fc800000000ff */
/*0a30*/ FFMA R9, R14, R11, R10 ; /* 0x0000000b0e097223 */
/* 0x000fc8000000000a */
/*0a40*/ FFMA R11, R8, R9, R11 ; /* 0x00000009080b7223 */
/* 0x000fc8000000000b */
/*0a50*/ FFMA R10, R14, R11, R10 ; /* 0x0000000b0e0a7223 */
/* 0x000fc8000000000a */
/*0a60*/ FFMA R9, R8, R10, R11 ; /* 0x0000000a08097223 */
/* 0x000fca000000000b */
/*0a70*/ SHF.R.U32.HI R6, RZ, 0x17, R9 ; /* 0x00000017ff067819 */
/* 0x000fc80000011609 */
/*0a80*/ LOP3.LUT R6, R6, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff06067812 */
/* 0x000fc800078ec0ff */
/*0a90*/ IADD3 R13, R6, R12, RZ ; /* 0x0000000c060d7210 */
/* 0x000fc80007ffe0ff */
/*0aa0*/ IADD3 R6, R13, -0x1, RZ ; /* 0xffffffff0d067810 */
/* 0x000fc80007ffe0ff */
/*0ab0*/ ISETP.GE.U32.AND P0, PT, R6, 0xfe, PT ; /* 0x000000fe0600780c */
/* 0x000fda0003f06070 */
/*0ac0*/ @!P0 BRA 0xcd0 ; /* 0x0000020000008947 */
/* 0x000fea0003800000 */
/*0ad0*/ ISETP.GT.AND P0, PT, R13, 0xfe, PT ; /* 0x000000fe0d00780c */
/* 0x000fda0003f04270 */
/*0ae0*/ @P0 BRA 0xca0 ; /* 0x000001b000000947 */
/* 0x000fea0003800000 */
/*0af0*/ ISETP.GE.AND P0, PT, R13, 0x1, PT ; /* 0x000000010d00780c */
/* 0x000fda0003f06270 */
/*0b00*/ @P0 BRA 0xce0 ; /* 0x000001d000000947 */
/* 0x000fea0003800000 */
/*0b10*/ ISETP.GE.AND P0, PT, R13, -0x18, PT ; /* 0xffffffe80d00780c */
/* 0x000fe40003f06270 */
/*0b20*/ LOP3.LUT R9, R9, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000009097812 */
/* 0x000fd600078ec0ff */
/*0b30*/ @!P0 BRA 0xce0 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0b40*/ FFMA.RZ R6, R8, R10.reuse, R11.reuse ; /* 0x0000000a08067223 */
/* 0x180fe2000000c00b */
/*0b50*/ ISETP.NE.AND P2, PT, R13.reuse, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x040fe40003f45270 */
/*0b60*/ ISETP.NE.AND P1, PT, R13.reuse, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x040fe40003f25270 */
/*0b70*/ LOP3.LUT R7, R6, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff06077812 */
/* 0x000fe200078ec0ff */
/*0b80*/ FFMA.RP R6, R8.reuse, R10.reuse, R11.reuse ; /* 0x0000000a08067223 */
/* 0x1c0fe4000000800b */
/*0b90*/ FFMA.RM R11, R8, R10, R11 ; /* 0x0000000a080b7223 */
/* 0x000fe2000000400b */
/*0ba0*/ IADD3 R8, R13, 0x20, RZ ; /* 0x000000200d087810 */
/* 0x000fe20007ffe0ff */
/*0bb0*/ IMAD.MOV R10, RZ, RZ, -R13 ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e0a0d */
/*0bc0*/ LOP3.LUT R7, R7, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000007077812 */
/* 0x000fc400078efcff */
/*0bd0*/ FSETP.NEU.FTZ.AND P0, PT, R6, R11, PT ; /* 0x0000000b0600720b */
/* 0x000fe40003f1d000 */
/*0be0*/ SHF.L.U32 R8, R7, R8, RZ ; /* 0x0000000807087219 */
/* 0x000fe400000006ff */
/*0bf0*/ SEL R6, R10, RZ, P2 ; /* 0x000000ff0a067207 */
/* 0x000fe40001000000 */
/*0c00*/ ISETP.NE.AND P1, PT, R8, RZ, P1 ; /* 0x000000ff0800720c */
/* 0x000fe40000f25270 */
/*0c10*/ SHF.R.U32.HI R6, RZ, R6, R7 ; /* 0x00000006ff067219 */
/* 0x000fe40000011607 */
/*0c20*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40000703570 */
/*0c30*/ SHF.R.U32.HI R8, RZ, 0x1, R6 ; /* 0x00000001ff087819 */
/* 0x000fe40000011606 */
/*0c40*/ SEL R7, RZ, 0x1, !P0 ; /* 0x00000001ff077807 */
/* 0x000fc80004000000 */
/*0c50*/ LOP3.LUT R7, R7, 0x1, R8, 0xf8, !PT ; /* 0x0000000107077812 */
/* 0x000fc800078ef808 */
/*0c60*/ LOP3.LUT R7, R7, R6, RZ, 0xc0, !PT ; /* 0x0000000607077212 */
/* 0x000fc800078ec0ff */
/*0c70*/ IADD3 R8, R8, R7, RZ ; /* 0x0000000708087210 */
/* 0x000fc80007ffe0ff */
/*0c80*/ LOP3.LUT R9, R8, R9, RZ, 0xfc, !PT ; /* 0x0000000908097212 */
/* 0x000fe200078efcff */
/*0c90*/ BRA 0xce0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0ca0*/ LOP3.LUT R9, R9, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000009097812 */
/* 0x000fc800078ec0ff */
/*0cb0*/ LOP3.LUT R9, R9, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000009097812 */
/* 0x000fe200078efcff */
/*0cc0*/ BRA 0xce0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0cd0*/ IMAD R9, R12, 0x800000, R9 ; /* 0x008000000c097824 */
/* 0x000fe400078e0209 */
/*0ce0*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0cf0*/ BRA 0xd80 ; /* 0x0000008000007947 */
/* 0x000fea0003800000 */
/*0d00*/ LOP3.LUT R9, R9, 0x80000000, R8, 0x48, !PT ; /* 0x8000000009097812 */
/* 0x000fc800078e4808 */
/*0d10*/ LOP3.LUT R9, R9, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000009097812 */
/* 0x000fe200078efcff */
/*0d20*/ BRA 0xd80 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0d30*/ LOP3.LUT R9, R9, 0x80000000, R8, 0x48, !PT ; /* 0x8000000009097812 */
/* 0x000fe200078e4808 */
/*0d40*/ BRA 0xd80 ; /* 0x0000003000007947 */
/* 0x000fea0003800000 */
/*0d50*/ MUFU.RSQ R9, -QNAN ; /* 0xffc0000000097908 */
/* 0x000e220000001400 */
/*0d60*/ BRA 0xd80 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0d70*/ FADD.FTZ R9, R5, R9 ; /* 0x0000000905097221 */
/* 0x000fe40000010000 */
/*0d80*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0d90*/ HFMA2.MMA R7, -RZ, RZ, 0, 0 ; /* 0x00000000ff077435 */
/* 0x000fe200000001ff */
/*0da0*/ MOV R6, R4 ; /* 0x0000000400067202 */
/* 0x000fca0000000f00 */
/*0db0*/ RET.REL.NODEC R6 0x0 ; /* 0xfffff24006007950 */
/* 0x000fea0003c3ffff */
/*0dc0*/ BRA 0xdc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0dd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0de0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0df0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13sine_parallelPfS_
.globl _Z13sine_parallelPfS_
.p2align 8
.type _Z13sine_parallelPfS_,@function
_Z13sine_parallelPfS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b64 s[2:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_mov_b32 s4, 6
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_co_u32 v3, vcc_lo, s2, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
s_mov_b32 s2, -1
s_mov_b32 s3, 5
global_load_b32 v0, v[3:4], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v3, v0, v0
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v4, v0, v3
.p2align 6
.LBB0_1:
v_cvt_f32_i32_e32 v5, s2
v_cvt_f32_i32_e32 v6, s4
s_add_i32 s5, s3, -1
s_mul_i32 s4, s3, s4
s_add_i32 s3, s3, 2
v_dual_mul_f32 v5, v4, v5 :: v_dual_mul_f32 v4, v3, v4
s_sub_i32 s2, 0, s2
s_cmp_eq_u32 s3, 17
s_mul_i32 s4, s4, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f32 v7, null, v6, v6, v5
v_rcp_f32_e32 v8, v7
s_waitcnt_depctr 0xfff
v_fma_f32 v9, -v7, v8, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v8, v9, v8
v_div_scale_f32 v10, vcc_lo, v5, v6, v5
v_mul_f32_e32 v9, v10, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v11, -v7, v9, v10
v_fmac_f32_e32 v9, v11, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v7, -v7, v9, v10
v_div_fmas_f32 v7, v7, v8, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f32 v5, v7, v6, v5
v_add_f32_e32 v0, v0, v5
s_cbranch_scc0 .LBB0_1
s_load_b64 s[0:1], s[0:1], 0x8
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[1:2], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13sine_parallelPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13sine_parallelPfS_, .Lfunc_end0-_Z13sine_parallelPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13sine_parallelPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13sine_parallelPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
void __global__ mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_means[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_means[threadIdx.x] = 0.0;
for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
local_means[threadIdx.x] += words[i];
__syncthreads();
means[step*dims + threadIdx.x] = local_means[threadIdx.x]/(float)wordsInDoc;
}
}
void __global__ backprop_mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_means[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_means[threadIdx.x] = means[step*dims+threadIdx.x];
for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
words[i] = local_means[threadIdx.x]/wordsInDoc;
}
}
void __global__ max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_maxes[256];
__shared__ short local_which[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_maxes[threadIdx.x] = words[blockStarts+threadIdx.x];
local_which[threadIdx.x] = 0;
short j=1; // the word index in a doc
for (int i = blockStarts+dims+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
{
if(words[i]>local_maxes[threadIdx.x])
{
local_maxes[threadIdx.x] = words[i];
local_which[threadIdx.x] = j;
}
j++;
}
__syncthreads();
maxes[step*dims + threadIdx.x] = local_maxes[threadIdx.x];
which[step*dims + threadIdx.x] = local_which[threadIdx.x];
}
}
void __global__ backprop_max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_maxes[256];
__shared__ short local_which[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_maxes[threadIdx.x] = maxes[step*dims+threadIdx.x];
local_which[threadIdx.x] = which[step*dims+threadIdx.x];
short j=0; // the word index in a doc
for (int i = blockStarts+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
{
if(local_which[threadIdx.x]==j)
{
words[i] = local_maxes[threadIdx.x];
}
else
words[i]=0;
j++;
}
}
} | .file "tmpxft_00119ae6_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z9mean_poolPfS_PiS0_iiPfS_PiS0_ii
.type _Z37__device_stub__Z9mean_poolPfS_PiS0_iiPfS_PiS0_ii, @function
_Z37__device_stub__Z9mean_poolPfS_PiS0_iiPfS_PiS0_ii:
.LFB2081:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z9mean_poolPfS_PiS0_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z37__device_stub__Z9mean_poolPfS_PiS0_iiPfS_PiS0_ii, .-_Z37__device_stub__Z9mean_poolPfS_PiS0_iiPfS_PiS0_ii
.globl _Z9mean_poolPfS_PiS0_ii
.type _Z9mean_poolPfS_PiS0_ii, @function
_Z9mean_poolPfS_PiS0_ii:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z9mean_poolPfS_PiS0_iiPfS_PiS0_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z9mean_poolPfS_PiS0_ii, .-_Z9mean_poolPfS_PiS0_ii
.globl _Z47__device_stub__Z18backprop_mean_poolPfS_PiS0_iiPfS_PiS0_ii
.type _Z47__device_stub__Z18backprop_mean_poolPfS_PiS0_iiPfS_PiS0_ii, @function
_Z47__device_stub__Z18backprop_mean_poolPfS_PiS0_iiPfS_PiS0_ii:
.LFB2083:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z18backprop_mean_poolPfS_PiS0_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z47__device_stub__Z18backprop_mean_poolPfS_PiS0_iiPfS_PiS0_ii, .-_Z47__device_stub__Z18backprop_mean_poolPfS_PiS0_iiPfS_PiS0_ii
.globl _Z18backprop_mean_poolPfS_PiS0_ii
.type _Z18backprop_mean_poolPfS_PiS0_ii, @function
_Z18backprop_mean_poolPfS_PiS0_ii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z18backprop_mean_poolPfS_PiS0_iiPfS_PiS0_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z18backprop_mean_poolPfS_PiS0_ii, .-_Z18backprop_mean_poolPfS_PiS0_ii
.globl _Z39__device_stub__Z8max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii
.type _Z39__device_stub__Z8max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii, @function
_Z39__device_stub__Z8max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z8max_poolPfPiS_S0_S0_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z39__device_stub__Z8max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii, .-_Z39__device_stub__Z8max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii
.globl _Z8max_poolPfPiS_S0_S0_ii
.type _Z8max_poolPfPiS_S0_S0_ii, @function
_Z8max_poolPfPiS_S0_S0_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z39__device_stub__Z8max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z8max_poolPfPiS_S0_S0_ii, .-_Z8max_poolPfPiS_S0_S0_ii
.globl _Z49__device_stub__Z17backprop_max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii
.type _Z49__device_stub__Z17backprop_max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii, @function
_Z49__device_stub__Z17backprop_max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii:
.LFB2087:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z17backprop_max_poolPfPiS_S0_S0_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z49__device_stub__Z17backprop_max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii, .-_Z49__device_stub__Z17backprop_max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii
.globl _Z17backprop_max_poolPfPiS_S0_S0_ii
.type _Z17backprop_max_poolPfPiS_S0_S0_ii, @function
_Z17backprop_max_poolPfPiS_S0_S0_ii:
.LFB2088:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z49__device_stub__Z17backprop_max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z17backprop_max_poolPfPiS_S0_S0_ii, .-_Z17backprop_max_poolPfPiS_S0_S0_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z17backprop_max_poolPfPiS_S0_S0_ii"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z8max_poolPfPiS_S0_S0_ii"
.section .rodata.str1.8
.align 8
.LC2:
.string "_Z18backprop_mean_poolPfS_PiS0_ii"
.section .rodata.str1.1
.LC3:
.string "_Z9mean_poolPfS_PiS0_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z17backprop_max_poolPfPiS_S0_S0_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z8max_poolPfPiS_S0_S0_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z18backprop_mean_poolPfS_PiS0_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9mean_poolPfS_PiS0_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
void __global__ mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_means[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_means[threadIdx.x] = 0.0;
for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
local_means[threadIdx.x] += words[i];
__syncthreads();
means[step*dims + threadIdx.x] = local_means[threadIdx.x]/(float)wordsInDoc;
}
}
void __global__ backprop_mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_means[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_means[threadIdx.x] = means[step*dims+threadIdx.x];
for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
words[i] = local_means[threadIdx.x]/wordsInDoc;
}
}
void __global__ max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_maxes[256];
__shared__ short local_which[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_maxes[threadIdx.x] = words[blockStarts+threadIdx.x];
local_which[threadIdx.x] = 0;
short j=1; // the word index in a doc
for (int i = blockStarts+dims+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
{
if(words[i]>local_maxes[threadIdx.x])
{
local_maxes[threadIdx.x] = words[i];
local_which[threadIdx.x] = j;
}
j++;
}
__syncthreads();
maxes[step*dims + threadIdx.x] = local_maxes[threadIdx.x];
which[step*dims + threadIdx.x] = local_which[threadIdx.x];
}
}
void __global__ backprop_max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_maxes[256];
__shared__ short local_which[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_maxes[threadIdx.x] = maxes[step*dims+threadIdx.x];
local_which[threadIdx.x] = which[step*dims+threadIdx.x];
short j=0; // the word index in a doc
for (int i = blockStarts+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
{
if(local_which[threadIdx.x]==j)
{
words[i] = local_maxes[threadIdx.x];
}
else
words[i]=0;
j++;
}
}
} | #include <hip/hip_runtime.h>
#include <stdio.h>
void __global__ mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_means[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_means[threadIdx.x] = 0.0;
for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
local_means[threadIdx.x] += words[i];
__syncthreads();
means[step*dims + threadIdx.x] = local_means[threadIdx.x]/(float)wordsInDoc;
}
}
void __global__ backprop_mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_means[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_means[threadIdx.x] = means[step*dims+threadIdx.x];
for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
words[i] = local_means[threadIdx.x]/wordsInDoc;
}
}
void __global__ max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_maxes[256];
__shared__ short local_which[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_maxes[threadIdx.x] = words[blockStarts+threadIdx.x];
local_which[threadIdx.x] = 0;
short j=1; // the word index in a doc
for (int i = blockStarts+dims+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
{
if(words[i]>local_maxes[threadIdx.x])
{
local_maxes[threadIdx.x] = words[i];
local_which[threadIdx.x] = j;
}
j++;
}
__syncthreads();
maxes[step*dims + threadIdx.x] = local_maxes[threadIdx.x];
which[step*dims + threadIdx.x] = local_which[threadIdx.x];
}
}
void __global__ backprop_max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_maxes[256];
__shared__ short local_which[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_maxes[threadIdx.x] = maxes[step*dims+threadIdx.x];
local_which[threadIdx.x] = which[step*dims+threadIdx.x];
short j=0; // the word index in a doc
for (int i = blockStarts+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
{
if(local_which[threadIdx.x]==j)
{
words[i] = local_maxes[threadIdx.x];
}
else
words[i]=0;
j++;
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
void __global__ mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_means[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_means[threadIdx.x] = 0.0;
for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
local_means[threadIdx.x] += words[i];
__syncthreads();
means[step*dims + threadIdx.x] = local_means[threadIdx.x]/(float)wordsInDoc;
}
}
void __global__ backprop_mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_means[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_means[threadIdx.x] = means[step*dims+threadIdx.x];
for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
words[i] = local_means[threadIdx.x]/wordsInDoc;
}
}
void __global__ max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_maxes[256];
__shared__ short local_which[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_maxes[threadIdx.x] = words[blockStarts+threadIdx.x];
local_which[threadIdx.x] = 0;
short j=1; // the word index in a doc
for (int i = blockStarts+dims+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
{
if(words[i]>local_maxes[threadIdx.x])
{
local_maxes[threadIdx.x] = words[i];
local_which[threadIdx.x] = j;
}
j++;
}
__syncthreads();
maxes[step*dims + threadIdx.x] = local_maxes[threadIdx.x];
which[step*dims + threadIdx.x] = local_which[threadIdx.x];
}
}
void __global__ backprop_max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_maxes[256];
__shared__ short local_which[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_maxes[threadIdx.x] = maxes[step*dims+threadIdx.x];
local_which[threadIdx.x] = which[step*dims+threadIdx.x];
short j=0; // the word index in a doc
for (int i = blockStarts+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
{
if(local_which[threadIdx.x]==j)
{
words[i] = local_maxes[threadIdx.x];
}
else
words[i]=0;
j++;
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9mean_poolPfS_PiS0_ii
.globl _Z9mean_poolPfS_PiS0_ii
.p2align 8
.type _Z9mean_poolPfS_PiS0_ii,@function
_Z9mean_poolPfS_PiS0_ii:
s_load_b32 s16, s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s15, s16
s_cbranch_scc1 .LBB0_7
s_clause 0x1
s_load_b64 s[12:13], s[0:1], 0x24
s_load_b256 s[4:11], s[0:1], 0x0
v_dual_mov_b32 v5, 0 :: v_dual_lshlrev_b32 v4, 2, v0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_ashr_i32 s1, s12, 31
s_mov_b32 s0, s12
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[14:15], s[0:1], 2
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s1
s_waitcnt lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
ds_load_b32 v3, v4
v_cvt_f32_i32_e32 v6, v6
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v7, null, v6, v6, v3
v_div_scale_f32 v9, vcc_lo, v3, v6, v3
v_rcp_f32_e32 v8, v7
s_waitcnt_depctr 0xfff
v_fma_f32 v1, -v7, v8, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v8, v1, v8
v_mul_f32_e32 v10, v9, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v1, -v7, v10, v9
v_fmac_f32_e32 v10, v1, v8
v_mad_u64_u32 v[1:2], null, s2, s12, v[0:1]
v_mov_b32_e32 v2, v5
s_add_i32 s2, s13, s2
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fma_f32 v7, -v7, v10, v9
s_cmp_ge_i32 s2, s16
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_div_fmas_f32 v7, v7, v8, v10
v_add_co_u32 v1, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_div_fixup_f32 v3, v7, v6, v3
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_store_b32 v[1:2], v3, off
s_cbranch_scc1 .LBB0_7
.LBB0_3:
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[0:1], s[2:3], 2
s_add_u32 s18, s8, s0
s_addc_u32 s19, s9, s1
s_add_u32 s0, s10, s0
s_addc_u32 s1, s11, s1
s_clause 0x1
global_load_b32 v6, v5, s[18:19]
global_load_b32 v3, v5, s[0:1]
s_mov_b32 s1, exec_lo
ds_store_b32 v4, v5
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v7, v3, v6
v_mad_u64_u32 v[1:2], null, v3, s12, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v7, v7, s12
v_cmpx_lt_i32_e64 v1, v7
s_cbranch_execz .LBB0_2
ds_load_b32 v8, v4
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s6, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo
.LBB0_5:
global_load_b32 v9, v[2:3], off
v_add_nc_u32_e32 v1, s12, v1
v_add_co_u32 v2, vcc_lo, v2, s14
v_add_co_ci_u32_e32 v3, vcc_lo, s15, v3, vcc_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_f32_e32 v8, v9, v8
v_cmp_ge_i32_e64 s0, v1, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_or_b32 s3, s0, s3
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_5
s_or_b32 exec_lo, exec_lo, s3
ds_store_b32 v4, v8
s_branch .LBB0_2
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9mean_poolPfS_PiS0_ii
.amdhsa_group_segment_fixed_size 1024
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 20
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9mean_poolPfS_PiS0_ii, .Lfunc_end0-_Z9mean_poolPfS_PiS0_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z18backprop_mean_poolPfS_PiS0_ii
.globl _Z18backprop_mean_poolPfS_PiS0_ii
.p2align 8
.type _Z18backprop_mean_poolPfS_PiS0_ii,@function
_Z18backprop_mean_poolPfS_PiS0_ii:
s_load_b32 s16, s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s15, s16
s_cbranch_scc1 .LBB1_6
s_clause 0x1
s_load_b64 s[12:13], s[0:1], 0x24
s_load_b256 s[4:11], s[0:1], 0x0
v_mov_b32_e32 v4, 0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_ashr_i32 s1, s12, 31
s_mov_b32 s0, s12
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[14:15], s[0:1], 2
s_branch .LBB1_3
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s3
s_add_i32 s2, s13, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s2, s16
s_cbranch_scc1 .LBB1_6
.LBB1_3:
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[0:1], s[2:3], 2
s_mov_b32 s3, exec_lo
s_add_u32 s18, s8, s0
s_addc_u32 s19, s9, s1
s_add_u32 s0, s10, s0
s_addc_u32 s1, s11, s1
s_load_b32 s1, s[0:1], 0x0
s_load_b32 s0, s[18:19], 0x0
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[1:2], null, s1, s12, v[0:1]
s_add_i32 s1, s1, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_mul_i32 s1, s1, s12
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmpx_gt_i32_e64 s1, v1
s_cbranch_execz .LBB1_2
v_mad_u64_u32 v[2:3], null, s2, s12, v[0:1]
v_mov_b32_e32 v3, v4
v_cvt_f32_i32_e32 v6, s0
s_mov_b32 s17, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v2, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
global_load_b32 v5, v[2:3], off
s_waitcnt vmcnt(0)
v_div_scale_f32 v2, null, v6, v6, v5
v_div_scale_f32 v8, vcc_lo, v5, v6, v5
v_rcp_f32_e32 v3, v2
s_waitcnt_depctr 0xfff
v_fma_f32 v7, -v2, v3, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v3, v7, v3
v_mul_f32_e32 v7, v8, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v9, -v2, v7, v8
v_fmac_f32_e32 v7, v9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fma_f32 v8, -v2, v7, v8
v_ashrrev_i32_e32 v2, 31, v1
v_div_fmas_f32 v7, v8, v3, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_div_fixup_f32 v5, v7, v6, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo
.LBB1_5:
v_add_nc_u32_e32 v1, s12, v1
global_store_b32 v[2:3], v5, off
v_add_co_u32 v2, s0, v2, s14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_add_co_ci_u32_e64 v3, s0, s15, v3, s0
v_cmp_le_i32_e32 vcc_lo, s1, v1
s_or_b32 s17, vcc_lo, s17
s_and_not1_b32 exec_lo, exec_lo, s17
s_cbranch_execnz .LBB1_5
s_branch .LBB1_2
.LBB1_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18backprop_mean_poolPfS_PiS0_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 20
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z18backprop_mean_poolPfS_PiS0_ii, .Lfunc_end1-_Z18backprop_mean_poolPfS_PiS0_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z8max_poolPfPiS_S0_S0_ii
.globl _Z8max_poolPfPiS_S0_S0_ii
.p2align 8
.type _Z8max_poolPfPiS_S0_S0_ii,@function
_Z8max_poolPfPiS_S0_S0_ii:
s_load_b32 s18, s[0:1], 0x28
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s15, s18
s_cbranch_scc1 .LBB2_8
s_mov_b32 s2, s15
s_clause 0x2
s_load_b64 s[12:13], s[0:1], 0x2c
s_load_b64 s[14:15], s[0:1], 0x20
s_load_b256 s[4:11], s[0:1], 0x0
v_dual_mov_b32 v9, 0 :: v_dual_lshlrev_b32 v6, 2, v0
v_lshl_add_u32 v7, v0, 1, 0x400
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v8, s12, v0
s_ashr_i32 s1, s12, 31
s_mov_b32 s0, s12
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[16:17], s[0:1], 2
s_branch .LBB2_3
.LBB2_2:
s_or_b32 exec_lo, exec_lo, s1
v_mad_u64_u32 v[3:4], null, s2, s12, v[0:1]
s_waitcnt lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
v_mov_b32_e32 v4, v2
ds_load_b32 v1, v6
ds_load_i16 v5, v7
s_add_i32 s2, s13, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
s_cmp_ge_i32 s2, s18
v_lshlrev_b64 v[3:4], 2, v[3:4]
v_add_co_u32 v10, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v11, vcc_lo, s5, v4, vcc_lo
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
s_waitcnt lgkmcnt(1)
global_store_b32 v[10:11], v1, off
s_waitcnt lgkmcnt(0)
global_store_b32 v[3:4], v5, off
s_cbranch_scc1 .LBB2_8
.LBB2_3:
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[0:1], s[2:3], 2
s_add_u32 s20, s10, s0
s_addc_u32 s21, s11, s1
s_add_u32 s0, s14, s0
s_addc_u32 s1, s15, s1
s_clause 0x1
global_load_b32 v11, v2, s[20:21]
global_load_b32 v5, v2, s[0:1]
s_mov_b32 s1, exec_lo
s_waitcnt vmcnt(0)
v_mul_lo_u32 v10, v5, s12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, v10, v0
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_nc_u32_e32 v1, v5, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mul_lo_u32 v1, v1, s12
v_add_co_u32 v3, vcc_lo, s8, v3
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v4, vcc_lo, s9, v4, vcc_lo
global_load_b32 v4, v[3:4], off
v_add_nc_u32_e32 v3, v8, v10
s_waitcnt vmcnt(0)
ds_store_b32 v6, v4
ds_store_b16 v7, v9
v_cmpx_lt_i32_e64 v3, v1
s_cbranch_execz .LBB2_2
ds_load_b32 v10, v6
v_ashrrev_i32_e32 v4, 31, v3
v_mov_b32_e32 v11, 1
s_mov_b32 s3, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[3:4]
v_add_co_u32 v4, vcc_lo, s8, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v5, vcc_lo
s_branch .LBB2_6
.p2align 6
.LBB2_5:
s_or_b32 exec_lo, exec_lo, s0
v_add_nc_u32_e32 v3, s12, v3
v_add_co_u32 v4, s0, v4, s16
v_add_nc_u16 v11, v11, 1
v_add_co_ci_u32_e64 v5, s0, s17, v5, s0
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_ge_i32_e32 vcc_lo, v3, v1
s_or_b32 s3, vcc_lo, s3
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execz .LBB2_2
.LBB2_6:
global_load_b32 v12, v[4:5], off
s_mov_b32 s0, exec_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmpx_gt_f32_e32 v12, v10
s_cbranch_execz .LBB2_5
v_mov_b32_e32 v10, v12
ds_store_b32 v6, v12
ds_store_b16 v7, v11
s_branch .LBB2_5
.LBB2_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8max_poolPfPiS_S0_S0_ii
.amdhsa_group_segment_fixed_size 1536
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 22
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z8max_poolPfPiS_S0_S0_ii, .Lfunc_end2-_Z8max_poolPfPiS_S0_S0_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z17backprop_max_poolPfPiS_S0_S0_ii
.globl _Z17backprop_max_poolPfPiS_S0_S0_ii
.p2align 8
.type _Z17backprop_max_poolPfPiS_S0_S0_ii,@function
_Z17backprop_max_poolPfPiS_S0_S0_ii:
s_load_b32 s18, s[0:1], 0x28
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s15, s18
s_cbranch_scc1 .LBB3_6
s_mov_b32 s2, s15
s_clause 0x2
s_load_b64 s[12:13], s[0:1], 0x2c
s_load_b64 s[14:15], s[0:1], 0x20
s_load_b256 s[4:11], s[0:1], 0x0
v_mov_b32_e32 v4, 0
s_waitcnt lgkmcnt(0)
s_ashr_i32 s1, s12, 31
s_mov_b32 s0, s12
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[16:17], s[0:1], 2
s_branch .LBB3_3
.LBB3_2:
s_or_b32 exec_lo, exec_lo, s3
s_add_i32 s2, s13, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s2, s18
s_cbranch_scc1 .LBB3_6
.LBB3_3:
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[0:1], s[2:3], 2
s_mov_b32 s3, exec_lo
s_add_u32 s20, s10, s0
s_addc_u32 s21, s11, s1
s_add_u32 s0, s14, s0
s_addc_u32 s1, s15, s1
s_load_b32 s0, s[0:1], 0x0
s_load_b32 s1, s[20:21], 0x0
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[1:2], null, s0, s12, v[0:1]
s_add_i32 s1, s0, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_mul_i32 s1, s1, s12
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmpx_gt_i32_e64 s1, v1
s_cbranch_execz .LBB3_2
v_mad_u64_u32 v[2:3], null, s2, s12, v[0:1]
v_mov_b32_e32 v3, v4
v_mov_b32_e32 v7, 0
s_mov_b32 s19, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v5, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo
global_load_b32 v5, v[5:6], off
global_load_b32 v6, v[2:3], off
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s8, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
.LBB3_5:
s_waitcnt vmcnt(0)
v_cmp_eq_u16_e32 vcc_lo, v7, v6
v_add_nc_u32_e32 v1, s12, v1
v_add_nc_u16 v7, v7, 1
v_cndmask_b32_e32 v8, 0, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_cmp_le_i32_e32 vcc_lo, s1, v1
global_store_b32 v[2:3], v8, off
v_add_co_u32 v2, s0, v2, s16
v_add_co_ci_u32_e64 v3, s0, s17, v3, s0
s_or_b32 s19, vcc_lo, s19
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s19
s_cbranch_execnz .LBB3_5
s_branch .LBB3_2
.LBB3_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17backprop_max_poolPfPiS_S0_S0_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 22
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z17backprop_max_poolPfPiS_S0_S0_ii, .Lfunc_end3-_Z17backprop_max_poolPfPiS_S0_S0_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 1024
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9mean_poolPfS_PiS0_ii
.private_segment_fixed_size: 0
.sgpr_count: 22
.sgpr_spill_count: 0
.symbol: _Z9mean_poolPfS_PiS0_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18backprop_mean_poolPfS_PiS0_ii
.private_segment_fixed_size: 0
.sgpr_count: 22
.sgpr_spill_count: 0
.symbol: _Z18backprop_mean_poolPfS_PiS0_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 44
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 1536
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8max_poolPfPiS_S0_S0_ii
.private_segment_fixed_size: 0
.sgpr_count: 24
.sgpr_spill_count: 0
.symbol: _Z8max_poolPfPiS_S0_S0_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 44
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17backprop_max_poolPfPiS_S0_S0_ii
.private_segment_fixed_size: 0
.sgpr_count: 24
.sgpr_spill_count: 0
.symbol: _Z17backprop_max_poolPfPiS_S0_S0_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
void __global__ mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_means[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_means[threadIdx.x] = 0.0;
for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
local_means[threadIdx.x] += words[i];
__syncthreads();
means[step*dims + threadIdx.x] = local_means[threadIdx.x]/(float)wordsInDoc;
}
}
void __global__ backprop_mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_means[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_means[threadIdx.x] = means[step*dims+threadIdx.x];
for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
words[i] = local_means[threadIdx.x]/wordsInDoc;
}
}
void __global__ max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_maxes[256];
__shared__ short local_which[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_maxes[threadIdx.x] = words[blockStarts+threadIdx.x];
local_which[threadIdx.x] = 0;
short j=1; // the word index in a doc
for (int i = blockStarts+dims+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
{
if(words[i]>local_maxes[threadIdx.x])
{
local_maxes[threadIdx.x] = words[i];
local_which[threadIdx.x] = j;
}
j++;
}
__syncthreads();
maxes[step*dims + threadIdx.x] = local_maxes[threadIdx.x];
which[step*dims + threadIdx.x] = local_which[threadIdx.x];
}
}
void __global__ backprop_max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_maxes[256];
__shared__ short local_which[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_maxes[threadIdx.x] = maxes[step*dims+threadIdx.x];
local_which[threadIdx.x] = which[step*dims+threadIdx.x];
short j=0; // the word index in a doc
for (int i = blockStarts+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
{
if(local_which[threadIdx.x]==j)
{
words[i] = local_maxes[threadIdx.x];
}
else
words[i]=0;
j++;
}
}
} | .text
.file "kernel.hip"
.globl _Z24__device_stub__mean_poolPfS_PiS0_ii # -- Begin function _Z24__device_stub__mean_poolPfS_PiS0_ii
.p2align 4, 0x90
.type _Z24__device_stub__mean_poolPfS_PiS0_ii,@function
_Z24__device_stub__mean_poolPfS_PiS0_ii: # @_Z24__device_stub__mean_poolPfS_PiS0_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9mean_poolPfS_PiS0_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z24__device_stub__mean_poolPfS_PiS0_ii, .Lfunc_end0-_Z24__device_stub__mean_poolPfS_PiS0_ii
.cfi_endproc
# -- End function
.globl _Z33__device_stub__backprop_mean_poolPfS_PiS0_ii # -- Begin function _Z33__device_stub__backprop_mean_poolPfS_PiS0_ii
.p2align 4, 0x90
.type _Z33__device_stub__backprop_mean_poolPfS_PiS0_ii,@function
_Z33__device_stub__backprop_mean_poolPfS_PiS0_ii: # @_Z33__device_stub__backprop_mean_poolPfS_PiS0_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z18backprop_mean_poolPfS_PiS0_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z33__device_stub__backprop_mean_poolPfS_PiS0_ii, .Lfunc_end1-_Z33__device_stub__backprop_mean_poolPfS_PiS0_ii
.cfi_endproc
# -- End function
.globl _Z23__device_stub__max_poolPfPiS_S0_S0_ii # -- Begin function _Z23__device_stub__max_poolPfPiS_S0_S0_ii
.p2align 4, 0x90
.type _Z23__device_stub__max_poolPfPiS_S0_S0_ii,@function
_Z23__device_stub__max_poolPfPiS_S0_S0_ii: # @_Z23__device_stub__max_poolPfPiS_S0_S0_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movl %r9d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z8max_poolPfPiS_S0_S0_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z23__device_stub__max_poolPfPiS_S0_S0_ii, .Lfunc_end2-_Z23__device_stub__max_poolPfPiS_S0_S0_ii
.cfi_endproc
# -- End function
.globl _Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii # -- Begin function _Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii
.p2align 4, 0x90
.type _Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii,@function
_Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii: # @_Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movl %r9d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z17backprop_max_poolPfPiS_S0_S0_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end3:
.size _Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii, .Lfunc_end3-_Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9mean_poolPfS_PiS0_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18backprop_mean_poolPfS_PiS0_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8max_poolPfPiS_S0_S0_ii, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17backprop_max_poolPfPiS_S0_S0_ii, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9mean_poolPfS_PiS0_ii,@object # @_Z9mean_poolPfS_PiS0_ii
.section .rodata,"a",@progbits
.globl _Z9mean_poolPfS_PiS0_ii
.p2align 3, 0x0
_Z9mean_poolPfS_PiS0_ii:
.quad _Z24__device_stub__mean_poolPfS_PiS0_ii
.size _Z9mean_poolPfS_PiS0_ii, 8
.type _Z18backprop_mean_poolPfS_PiS0_ii,@object # @_Z18backprop_mean_poolPfS_PiS0_ii
.globl _Z18backprop_mean_poolPfS_PiS0_ii
.p2align 3, 0x0
_Z18backprop_mean_poolPfS_PiS0_ii:
.quad _Z33__device_stub__backprop_mean_poolPfS_PiS0_ii
.size _Z18backprop_mean_poolPfS_PiS0_ii, 8
.type _Z8max_poolPfPiS_S0_S0_ii,@object # @_Z8max_poolPfPiS_S0_S0_ii
.globl _Z8max_poolPfPiS_S0_S0_ii
.p2align 3, 0x0
_Z8max_poolPfPiS_S0_S0_ii:
.quad _Z23__device_stub__max_poolPfPiS_S0_S0_ii
.size _Z8max_poolPfPiS_S0_S0_ii, 8
.type _Z17backprop_max_poolPfPiS_S0_S0_ii,@object # @_Z17backprop_max_poolPfPiS_S0_S0_ii
.globl _Z17backprop_max_poolPfPiS_S0_S0_ii
.p2align 3, 0x0
_Z17backprop_max_poolPfPiS_S0_S0_ii:
.quad _Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii
.size _Z17backprop_max_poolPfPiS_S0_S0_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9mean_poolPfS_PiS0_ii"
.size .L__unnamed_1, 24
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z18backprop_mean_poolPfS_PiS0_ii"
.size .L__unnamed_2, 34
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z8max_poolPfPiS_S0_S0_ii"
.size .L__unnamed_3, 26
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z17backprop_max_poolPfPiS_S0_S0_ii"
.size .L__unnamed_4, 36
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__mean_poolPfS_PiS0_ii
.addrsig_sym _Z33__device_stub__backprop_mean_poolPfS_PiS0_ii
.addrsig_sym _Z23__device_stub__max_poolPfPiS_S0_S0_ii
.addrsig_sym _Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9mean_poolPfS_PiS0_ii
.addrsig_sym _Z18backprop_mean_poolPfS_PiS0_ii
.addrsig_sym _Z8max_poolPfPiS_S0_S0_ii
.addrsig_sym _Z17backprop_max_poolPfPiS_S0_S0_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00119ae6_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z9mean_poolPfS_PiS0_iiPfS_PiS0_ii
.type _Z37__device_stub__Z9mean_poolPfS_PiS0_iiPfS_PiS0_ii, @function
_Z37__device_stub__Z9mean_poolPfS_PiS0_iiPfS_PiS0_ii:
.LFB2081:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z9mean_poolPfS_PiS0_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z37__device_stub__Z9mean_poolPfS_PiS0_iiPfS_PiS0_ii, .-_Z37__device_stub__Z9mean_poolPfS_PiS0_iiPfS_PiS0_ii
.globl _Z9mean_poolPfS_PiS0_ii
.type _Z9mean_poolPfS_PiS0_ii, @function
_Z9mean_poolPfS_PiS0_ii:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z9mean_poolPfS_PiS0_iiPfS_PiS0_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z9mean_poolPfS_PiS0_ii, .-_Z9mean_poolPfS_PiS0_ii
.globl _Z47__device_stub__Z18backprop_mean_poolPfS_PiS0_iiPfS_PiS0_ii
.type _Z47__device_stub__Z18backprop_mean_poolPfS_PiS0_iiPfS_PiS0_ii, @function
_Z47__device_stub__Z18backprop_mean_poolPfS_PiS0_iiPfS_PiS0_ii:
.LFB2083:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z18backprop_mean_poolPfS_PiS0_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z47__device_stub__Z18backprop_mean_poolPfS_PiS0_iiPfS_PiS0_ii, .-_Z47__device_stub__Z18backprop_mean_poolPfS_PiS0_iiPfS_PiS0_ii
.globl _Z18backprop_mean_poolPfS_PiS0_ii
.type _Z18backprop_mean_poolPfS_PiS0_ii, @function
_Z18backprop_mean_poolPfS_PiS0_ii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z18backprop_mean_poolPfS_PiS0_iiPfS_PiS0_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z18backprop_mean_poolPfS_PiS0_ii, .-_Z18backprop_mean_poolPfS_PiS0_ii
.globl _Z39__device_stub__Z8max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii
.type _Z39__device_stub__Z8max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii, @function
_Z39__device_stub__Z8max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z8max_poolPfPiS_S0_S0_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z39__device_stub__Z8max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii, .-_Z39__device_stub__Z8max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii
.globl _Z8max_poolPfPiS_S0_S0_ii
.type _Z8max_poolPfPiS_S0_S0_ii, @function
_Z8max_poolPfPiS_S0_S0_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z39__device_stub__Z8max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z8max_poolPfPiS_S0_S0_ii, .-_Z8max_poolPfPiS_S0_S0_ii
.globl _Z49__device_stub__Z17backprop_max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii
.type _Z49__device_stub__Z17backprop_max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii, @function
_Z49__device_stub__Z17backprop_max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii:
.LFB2087:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z17backprop_max_poolPfPiS_S0_S0_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z49__device_stub__Z17backprop_max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii, .-_Z49__device_stub__Z17backprop_max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii
.globl _Z17backprop_max_poolPfPiS_S0_S0_ii
.type _Z17backprop_max_poolPfPiS_S0_S0_ii, @function
_Z17backprop_max_poolPfPiS_S0_S0_ii:
.LFB2088:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z49__device_stub__Z17backprop_max_poolPfPiS_S0_S0_iiPfPiS_S0_S0_ii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z17backprop_max_poolPfPiS_S0_S0_ii, .-_Z17backprop_max_poolPfPiS_S0_S0_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z17backprop_max_poolPfPiS_S0_S0_ii"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z8max_poolPfPiS_S0_S0_ii"
.section .rodata.str1.8
.align 8
.LC2:
.string "_Z18backprop_mean_poolPfS_PiS0_ii"
.section .rodata.str1.1
.LC3:
.string "_Z9mean_poolPfS_PiS0_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z17backprop_max_poolPfPiS_S0_S0_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z8max_poolPfPiS_S0_S0_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z18backprop_mean_poolPfS_PiS0_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9mean_poolPfS_PiS0_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.globl _Z24__device_stub__mean_poolPfS_PiS0_ii # -- Begin function _Z24__device_stub__mean_poolPfS_PiS0_ii
.p2align 4, 0x90
.type _Z24__device_stub__mean_poolPfS_PiS0_ii,@function
_Z24__device_stub__mean_poolPfS_PiS0_ii: # @_Z24__device_stub__mean_poolPfS_PiS0_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9mean_poolPfS_PiS0_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z24__device_stub__mean_poolPfS_PiS0_ii, .Lfunc_end0-_Z24__device_stub__mean_poolPfS_PiS0_ii
.cfi_endproc
# -- End function
.globl _Z33__device_stub__backprop_mean_poolPfS_PiS0_ii # -- Begin function _Z33__device_stub__backprop_mean_poolPfS_PiS0_ii
.p2align 4, 0x90
.type _Z33__device_stub__backprop_mean_poolPfS_PiS0_ii,@function
_Z33__device_stub__backprop_mean_poolPfS_PiS0_ii: # @_Z33__device_stub__backprop_mean_poolPfS_PiS0_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z18backprop_mean_poolPfS_PiS0_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z33__device_stub__backprop_mean_poolPfS_PiS0_ii, .Lfunc_end1-_Z33__device_stub__backprop_mean_poolPfS_PiS0_ii
.cfi_endproc
# -- End function
.globl _Z23__device_stub__max_poolPfPiS_S0_S0_ii # -- Begin function _Z23__device_stub__max_poolPfPiS_S0_S0_ii
.p2align 4, 0x90
.type _Z23__device_stub__max_poolPfPiS_S0_S0_ii,@function
_Z23__device_stub__max_poolPfPiS_S0_S0_ii: # @_Z23__device_stub__max_poolPfPiS_S0_S0_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movl %r9d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z8max_poolPfPiS_S0_S0_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z23__device_stub__max_poolPfPiS_S0_S0_ii, .Lfunc_end2-_Z23__device_stub__max_poolPfPiS_S0_S0_ii
.cfi_endproc
# -- End function
.globl _Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii # -- Begin function _Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii
.p2align 4, 0x90
.type _Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii,@function
_Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii: # @_Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movl %r9d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z17backprop_max_poolPfPiS_S0_S0_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end3:
.size _Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii, .Lfunc_end3-_Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9mean_poolPfS_PiS0_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18backprop_mean_poolPfS_PiS0_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8max_poolPfPiS_S0_S0_ii, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17backprop_max_poolPfPiS_S0_S0_ii, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9mean_poolPfS_PiS0_ii,@object # @_Z9mean_poolPfS_PiS0_ii
.section .rodata,"a",@progbits
.globl _Z9mean_poolPfS_PiS0_ii
.p2align 3, 0x0
_Z9mean_poolPfS_PiS0_ii:
.quad _Z24__device_stub__mean_poolPfS_PiS0_ii
.size _Z9mean_poolPfS_PiS0_ii, 8
.type _Z18backprop_mean_poolPfS_PiS0_ii,@object # @_Z18backprop_mean_poolPfS_PiS0_ii
.globl _Z18backprop_mean_poolPfS_PiS0_ii
.p2align 3, 0x0
_Z18backprop_mean_poolPfS_PiS0_ii:
.quad _Z33__device_stub__backprop_mean_poolPfS_PiS0_ii
.size _Z18backprop_mean_poolPfS_PiS0_ii, 8
.type _Z8max_poolPfPiS_S0_S0_ii,@object # @_Z8max_poolPfPiS_S0_S0_ii
.globl _Z8max_poolPfPiS_S0_S0_ii
.p2align 3, 0x0
_Z8max_poolPfPiS_S0_S0_ii:
.quad _Z23__device_stub__max_poolPfPiS_S0_S0_ii
.size _Z8max_poolPfPiS_S0_S0_ii, 8
.type _Z17backprop_max_poolPfPiS_S0_S0_ii,@object # @_Z17backprop_max_poolPfPiS_S0_S0_ii
.globl _Z17backprop_max_poolPfPiS_S0_S0_ii
.p2align 3, 0x0
_Z17backprop_max_poolPfPiS_S0_S0_ii:
.quad _Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii
.size _Z17backprop_max_poolPfPiS_S0_S0_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9mean_poolPfS_PiS0_ii"
.size .L__unnamed_1, 24
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z18backprop_mean_poolPfS_PiS0_ii"
.size .L__unnamed_2, 34
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z8max_poolPfPiS_S0_S0_ii"
.size .L__unnamed_3, 26
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z17backprop_max_poolPfPiS_S0_S0_ii"
.size .L__unnamed_4, 36
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__mean_poolPfS_PiS0_ii
.addrsig_sym _Z33__device_stub__backprop_mean_poolPfS_PiS0_ii
.addrsig_sym _Z23__device_stub__max_poolPfPiS_S0_S0_ii
.addrsig_sym _Z32__device_stub__backprop_max_poolPfPiS_S0_S0_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9mean_poolPfS_PiS0_ii
.addrsig_sym _Z18backprop_mean_poolPfS_PiS0_ii
.addrsig_sym _Z8max_poolPfPiS_S0_S0_ii
.addrsig_sym _Z17backprop_max_poolPfPiS_S0_S0_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /* Example showing the use of CUFFT for fast 1D-convolution using FFT. -KERNEL part separated from original source*/
#include <vector_types.h>
// Complex data type
typedef float2 Complex;
static __device__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
extern "C"
// Complex pointwise multiplication
// Note the static function cannot be applicable to extern "C"
/*static*/ __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
// Complex scale
static __device__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
} | code for sm_80
Function : ComplexPointwiseMulAndScale
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ BSSY B0, 0x370 ; /* 0x000002e000007945 */
/* 0x000fe60003800000 */
/*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */
/* 0x000fc800078e02ff */
/*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */
/* 0x000e220000209000 */
/*00b0*/ IADD3 R9, RZ, -R0, RZ ; /* 0x80000000ff097210 */
/* 0x000fe40007ffe0ff */
/*00c0*/ IADD3 R2, R0.reuse, R3, RZ ; /* 0x0000000300027210 */
/* 0x040fe40007ffe0ff */
/*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f45070 */
/*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */
/* 0x000fc800078e33ff */
/*00f0*/ IADD3 R7, R7, c[0x0][0x170], R0 ; /* 0x00005c0007077a10 */
/* 0x000fe20007ffe000 */
/*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*0130*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */
/* 0x002fd200078e02ff */
/*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */
/* 0x000fcc00078e0004 */
/*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */
/* 0x000fca00078e00ff */
/*0170*/ IADD3 R4, -R2, RZ, RZ ; /* 0x000000ff02047210 */
/* 0x000fca0007ffe1ff */
/*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06070 */
/*01a0*/ @P0 IADD3 R7, -R0, R7, RZ ; /* 0x0000000700070210 */
/* 0x000fe40007ffe1ff */
/*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f26070 */
/*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */
/* 0x000fe40007ffe0ff */
/*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */
/* 0x000fc800078e33ff */
/*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */
/* 0x040fe40007ffe0ff */
/*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fda000780c0ff */
/*0220*/ @!P0 BRA 0x360 ; /* 0x0000013000008947 */
/* 0x000fea0003800000 */
/*0230*/ MOV R6, 0x8 ; /* 0x0000000800067802 */
/* 0x000fe40000000f00 */
/*0240*/ MOV R2, R4 ; /* 0x0000000400027202 */
/* 0x000fc60000000f00 */
/*0250*/ IMAD.WIDE R4, R3, R6, c[0x0][0x168] ; /* 0x00005a0003047625 */
/* 0x000fc800078e0206 */
/*0260*/ IMAD.WIDE R6, R3, R6, c[0x0][0x160] ; /* 0x0000580003067625 */
/* 0x000fc800078e0206 */
/*0270*/ LDG.E.64 R10, [R4.64] ; /* 0x00000004040a7981 */
/* 0x0000a8000c1e1b00 */
/*0280*/ LDG.E.64 R8, [R6.64] ; /* 0x0000000406087981 */
/* 0x000ea2000c1e1b00 */
/*0290*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe40007ffe0ff */
/*02a0*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*02b0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f05270 */
/*02c0*/ IMAD.WIDE R4, R0, 0x8, R4 ; /* 0x0000000800047825 */
/* 0x001fc800078e0204 */
/*02d0*/ FMUL R13, R9.reuse, R11.reuse ; /* 0x0000000b090d7220 */
/* 0x0c4fe40000400000 */
/*02e0*/ FMUL R9, R9, R10.reuse ; /* 0x0000000a09097220 */
/* 0x080fe40000400000 */
/*02f0*/ FFMA R13, R8.reuse, R10, -R13 ; /* 0x0000000a080d7223 */
/* 0x040fe4000000080d */
/*0300*/ FFMA R9, R8, R11, R9 ; /* 0x0000000b08097223 */
/* 0x000fe40000000009 */
/*0310*/ FMUL R8, R13, c[0x0][0x174] ; /* 0x00005d000d087a20 */
/* 0x000fe40000400000 */
/*0320*/ FMUL R9, R9, c[0x0][0x174] ; /* 0x00005d0009097a20 */
/* 0x000fca0000400000 */
/*0330*/ STG.E.64 [R6.64], R8 ; /* 0x0000000806007986 */
/* 0x0001e4000c101b04 */
/*0340*/ IMAD.WIDE R6, R0, 0x8, R6 ; /* 0x0000000800067825 */
/* 0x001fe200078e0206 */
/*0350*/ @P0 BRA 0x270 ; /* 0xffffff1000000947 */
/* 0x000fea000383ffff */
/*0360*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0370*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*0380*/ HFMA2.MMA R8, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff087435 */
/* 0x001fd400000001ff */
/*0390*/ IMAD.WIDE R6, R3, R8, c[0x0][0x168] ; /* 0x00005a0003067625 */
/* 0x000fc800078e0208 */
/*03a0*/ IMAD.WIDE R8, R3, R8, c[0x0][0x160] ; /* 0x0000580003087625 */
/* 0x000fe200078e0208 */
/*03b0*/ LDG.E.64 R10, [R6.64] ; /* 0x00000004060a7981 */
/* 0x000ea8000c1e1b00 */
/*03c0*/ LDG.E.64 R4, [R8.64] ; /* 0x0000000408047981 */
/* 0x000ea4000c1e1b00 */
/*03d0*/ FMUL R13, R5.reuse, R11 ; /* 0x0000000b050d7220 */
/* 0x044fe40000400000 */
/*03e0*/ FMUL R5, R5, R10.reuse ; /* 0x0000000a05057220 */
/* 0x080fe40000400000 */
/*03f0*/ FFMA R13, R4, R10, -R13 ; /* 0x0000000a040d7223 */
/* 0x000fc4000000080d */
/*0400*/ FFMA R5, R4, R11, R5 ; /* 0x0000000b04057223 */
/* 0x000fe40000000005 */
/*0410*/ FMUL R14, R13, c[0x0][0x174] ; /* 0x00005d000d0e7a20 */
/* 0x000fe40000400000 */
/*0420*/ FMUL R15, R5, c[0x0][0x174] ; /* 0x00005d00050f7a20 */
/* 0x000fe40000400000 */
/*0430*/ IMAD.WIDE R10, R0, 0x8, R6 ; /* 0x00000008000a7825 */
/* 0x000fc600078e0206 */
/*0440*/ STG.E.64 [R8.64], R14 ; /* 0x0000000e08007986 */
/* 0x0001e2000c101b04 */
/*0450*/ IMAD.WIDE R4, R0, 0x8, R8 ; /* 0x0000000800047825 */
/* 0x000fc600078e0208 */
/*0460*/ LDG.E.64 R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea8000c1e1b00 */
/*0470*/ LDG.E.64 R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000ea4000c1e1b00 */
/*0480*/ FMUL R17, R7.reuse, R13 ; /* 0x0000000d07117220 */
/* 0x044fe40000400000 */
/*0490*/ FMUL R7, R7, R12.reuse ; /* 0x0000000c07077220 */
/* 0x080fe40000400000 */
/*04a0*/ FFMA R17, R6, R12, -R17 ; /* 0x0000000c06117223 */
/* 0x000fc40000000811 */
/*04b0*/ FFMA R7, R6, R13, R7 ; /* 0x0000000d06077223 */
/* 0x000fe40000000007 */
/*04c0*/ FMUL R16, R17, c[0x0][0x174] ; /* 0x00005d0011107a20 */
/* 0x000fe40000400000 */
/*04d0*/ FMUL R17, R7, c[0x0][0x174] ; /* 0x00005d0007117a20 */
/* 0x000fe40000400000 */
/*04e0*/ IMAD.WIDE R12, R0, 0x8, R10 ; /* 0x00000008000c7825 */
/* 0x000fc600078e020a */
/*04f0*/ STG.E.64 [R4.64], R16 ; /* 0x0000001004007986 */
/* 0x0003e2000c101b04 */
/*0500*/ IMAD.WIDE R6, R0, 0x8, R4 ; /* 0x0000000800067825 */
/* 0x000fc600078e0204 */
/*0510*/ LDG.E.64 R10, [R12.64] ; /* 0x000000040c0a7981 */
/* 0x000ea8000c1e1b00 */
/*0520*/ LDG.E.64 R8, [R6.64] ; /* 0x0000000406087981 */
/* 0x001ea4000c1e1b00 */
/*0530*/ FMUL R15, R9.reuse, R11 ; /* 0x0000000b090f7220 */
/* 0x044fe40000400000 */
/*0540*/ FMUL R9, R9, R10.reuse ; /* 0x0000000a09097220 */
/* 0x080fe40000400000 */
/*0550*/ FFMA R15, R8, R10, -R15 ; /* 0x0000000a080f7223 */
/* 0x000fc4000000080f */
/*0560*/ FFMA R9, R8, R11, R9 ; /* 0x0000000b08097223 */
/* 0x000fe40000000009 */
/*0570*/ FMUL R14, R15, c[0x0][0x174] ; /* 0x00005d000f0e7a20 */
/* 0x000fe40000400000 */
/*0580*/ FMUL R15, R9, c[0x0][0x174] ; /* 0x00005d00090f7a20 */
/* 0x000fe40000400000 */
/*0590*/ IMAD.WIDE R10, R0, 0x8, R12 ; /* 0x00000008000a7825 */
/* 0x000fc600078e020c */
/*05a0*/ STG.E.64 [R6.64], R14 ; /* 0x0000000e06007986 */
/* 0x0001e2000c101b04 */
/*05b0*/ IMAD.WIDE R8, R0, 0x8, R6 ; /* 0x0000000800087825 */
/* 0x000fc600078e0206 */
/*05c0*/ LDG.E.64 R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000ea8000c1e1b00 */
/*05d0*/ LDG.E.64 R4, [R8.64] ; /* 0x0000000408047981 */
/* 0x002ea2000c1e1b00 */
/*05e0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*05f0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*0600*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fe20003f06270 */
/*0610*/ FMUL R13, R5.reuse, R11.reuse ; /* 0x0000000b050d7220 */
/* 0x0c4fe40000400000 */
/*0620*/ FMUL R5, R5, R10.reuse ; /* 0x0000000a05057220 */
/* 0x080fe40000400000 */
/*0630*/ FFMA R13, R4.reuse, R10, -R13 ; /* 0x0000000a040d7223 */
/* 0x040fe4000000080d */
/*0640*/ FFMA R5, R4, R11, R5 ; /* 0x0000000b04057223 */
/* 0x000fe40000000005 */
/*0650*/ FMUL R4, R13, c[0x0][0x174] ; /* 0x00005d000d047a20 */
/* 0x000fe40000400000 */
/*0660*/ FMUL R5, R5, c[0x0][0x174] ; /* 0x00005d0005057a20 */
/* 0x000fca0000400000 */
/*0670*/ STG.E.64 [R8.64], R4 ; /* 0x0000000408007986 */
/* 0x0001e2000c101b04 */
/*0680*/ @!P0 BRA 0x380 ; /* 0xfffffcf000008947 */
/* 0x000fea000383ffff */
/*0690*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*06a0*/ BRA 0x6a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*06b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0700*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0710*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0720*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0730*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0740*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0750*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0760*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0770*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* Example showing the use of CUFFT for fast 1D-convolution using FFT. -KERNEL part separated from original source*/
#include <vector_types.h>
// Complex data type
typedef float2 Complex;
static __device__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
extern "C"
// Complex pointwise multiplication
// Note the static function cannot be applicable to extern "C"
/*static*/ __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
// Complex scale
static __device__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
} | .file "tmpxft_00125d24_00000000-6_simpleCUFFT_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2031:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2031:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z59__device_stub__Z27ComplexPointwiseMulAndScaleP6float2PKS_ifP6float2PKS_if
.type _Z59__device_stub__Z27ComplexPointwiseMulAndScaleP6float2PKS_ifP6float2PKS_if, @function
_Z59__device_stub__Z27ComplexPointwiseMulAndScaleP6float2PKS_ifP6float2PKS_if:
.LFB2053:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq ComplexPointwiseMulAndScale(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z59__device_stub__Z27ComplexPointwiseMulAndScaleP6float2PKS_ifP6float2PKS_if, .-_Z59__device_stub__Z27ComplexPointwiseMulAndScaleP6float2PKS_ifP6float2PKS_if
.globl ComplexPointwiseMulAndScale
.type ComplexPointwiseMulAndScale, @function
ComplexPointwiseMulAndScale:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z59__device_stub__Z27ComplexPointwiseMulAndScaleP6float2PKS_ifP6float2PKS_if
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size ComplexPointwiseMulAndScale, .-ComplexPointwiseMulAndScale
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "ComplexPointwiseMulAndScale"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq ComplexPointwiseMulAndScale(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* Example showing the use of CUFFT for fast 1D-convolution using FFT. -KERNEL part separated from original source*/
#include <vector_types.h>
// Complex data type
typedef float2 Complex;
static __device__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
extern "C"
// Complex pointwise multiplication
// Note the static function cannot be applicable to extern "C"
/*static*/ __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
// Complex scale
static __device__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
} | /* Example showing the use of CUFFT for fast 1D-convolution using FFT. -KERNEL part separated from original source*/
#include <hip/hip_runtime.h>
#include <hip/hip_vector_types.h>
// Complex data type
typedef float2 Complex;
static __device__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
extern "C"
// Complex pointwise multiplication
// Note the static function cannot be applicable to extern "C"
/*static*/ __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
// Complex scale
static __device__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* Example showing the use of CUFFT for fast 1D-convolution using FFT. -KERNEL part separated from original source*/
#include <hip/hip_runtime.h>
#include <hip/hip_vector_types.h>
// Complex data type
typedef float2 Complex;
static __device__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
extern "C"
// Complex pointwise multiplication
// Note the static function cannot be applicable to extern "C"
/*static*/ __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
// Complex scale
static __device__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected ComplexPointwiseMulAndScale
.globl ComplexPointwiseMulAndScale
.p2align 8
.type ComplexPointwiseMulAndScale,@function
ComplexPointwiseMulAndScale:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x10
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s10, v1
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b32 s1, s[0:1], 0x14
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 3, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s8
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[2:3], 3
s_mov_b32 s3, 0
.p2align 6
.LBB0_2:
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, s8
global_load_b64 v[8:9], v[4:5], off
global_load_b64 v[6:7], v[6:7], off
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
v_add_nc_u32_e32 v1, s2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_le_i32_e64 s0, s10, v1
s_or_b32 s3, s0, s3
s_waitcnt vmcnt(0)
v_mul_f32_e32 v0, v9, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f32 v0, v8, v6, -v0
v_mul_f32_e32 v7, v8, v7
v_dual_fmac_f32 v7, v9, v6 :: v_dual_mul_f32 v6, s1, v0
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v7, s1, v7
global_store_b64 v[4:5], v[6:7], off
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel ComplexPointwiseMulAndScale
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size ComplexPointwiseMulAndScale, .Lfunc_end0-ComplexPointwiseMulAndScale
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: ComplexPointwiseMulAndScale
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: ComplexPointwiseMulAndScale.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* Example showing the use of CUFFT for fast 1D-convolution using FFT. -KERNEL part separated from original source*/
#include <hip/hip_runtime.h>
#include <hip/hip_vector_types.h>
// Complex data type
typedef float2 Complex;
static __device__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
extern "C"
// Complex pointwise multiplication
// Note the static function cannot be applicable to extern "C"
/*static*/ __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
// Complex scale
static __device__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
} | .text
.file "simpleCUFFT_kernel.hip"
.globl __device_stub__ComplexPointwiseMulAndScale # -- Begin function __device_stub__ComplexPointwiseMulAndScale
.p2align 4, 0x90
.type __device_stub__ComplexPointwiseMulAndScale,@function
__device_stub__ComplexPointwiseMulAndScale: # @__device_stub__ComplexPointwiseMulAndScale
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movss %xmm0, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $ComplexPointwiseMulAndScale, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size __device_stub__ComplexPointwiseMulAndScale, .Lfunc_end0-__device_stub__ComplexPointwiseMulAndScale
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $ComplexPointwiseMulAndScale, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type ComplexPointwiseMulAndScale,@object # @ComplexPointwiseMulAndScale
.section .rodata,"a",@progbits
.globl ComplexPointwiseMulAndScale
.p2align 3, 0x0
ComplexPointwiseMulAndScale:
.quad __device_stub__ComplexPointwiseMulAndScale
.size ComplexPointwiseMulAndScale, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "ComplexPointwiseMulAndScale"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__ComplexPointwiseMulAndScale
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym ComplexPointwiseMulAndScale
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : ComplexPointwiseMulAndScale
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ BSSY B0, 0x370 ; /* 0x000002e000007945 */
/* 0x000fe60003800000 */
/*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */
/* 0x000fc800078e02ff */
/*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */
/* 0x000e220000209000 */
/*00b0*/ IADD3 R9, RZ, -R0, RZ ; /* 0x80000000ff097210 */
/* 0x000fe40007ffe0ff */
/*00c0*/ IADD3 R2, R0.reuse, R3, RZ ; /* 0x0000000300027210 */
/* 0x040fe40007ffe0ff */
/*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f45070 */
/*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */
/* 0x000fc800078e33ff */
/*00f0*/ IADD3 R7, R7, c[0x0][0x170], R0 ; /* 0x00005c0007077a10 */
/* 0x000fe20007ffe000 */
/*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*0130*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */
/* 0x002fd200078e02ff */
/*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */
/* 0x000fcc00078e0004 */
/*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */
/* 0x000fca00078e00ff */
/*0170*/ IADD3 R4, -R2, RZ, RZ ; /* 0x000000ff02047210 */
/* 0x000fca0007ffe1ff */
/*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06070 */
/*01a0*/ @P0 IADD3 R7, -R0, R7, RZ ; /* 0x0000000700070210 */
/* 0x000fe40007ffe1ff */
/*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f26070 */
/*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */
/* 0x000fe40007ffe0ff */
/*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */
/* 0x000fc800078e33ff */
/*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */
/* 0x040fe40007ffe0ff */
/*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fda000780c0ff */
/*0220*/ @!P0 BRA 0x360 ; /* 0x0000013000008947 */
/* 0x000fea0003800000 */
/*0230*/ MOV R6, 0x8 ; /* 0x0000000800067802 */
/* 0x000fe40000000f00 */
/*0240*/ MOV R2, R4 ; /* 0x0000000400027202 */
/* 0x000fc60000000f00 */
/*0250*/ IMAD.WIDE R4, R3, R6, c[0x0][0x168] ; /* 0x00005a0003047625 */
/* 0x000fc800078e0206 */
/*0260*/ IMAD.WIDE R6, R3, R6, c[0x0][0x160] ; /* 0x0000580003067625 */
/* 0x000fc800078e0206 */
/*0270*/ LDG.E.64 R10, [R4.64] ; /* 0x00000004040a7981 */
/* 0x0000a8000c1e1b00 */
/*0280*/ LDG.E.64 R8, [R6.64] ; /* 0x0000000406087981 */
/* 0x000ea2000c1e1b00 */
/*0290*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe40007ffe0ff */
/*02a0*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*02b0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f05270 */
/*02c0*/ IMAD.WIDE R4, R0, 0x8, R4 ; /* 0x0000000800047825 */
/* 0x001fc800078e0204 */
/*02d0*/ FMUL R13, R9.reuse, R11.reuse ; /* 0x0000000b090d7220 */
/* 0x0c4fe40000400000 */
/*02e0*/ FMUL R9, R9, R10.reuse ; /* 0x0000000a09097220 */
/* 0x080fe40000400000 */
/*02f0*/ FFMA R13, R8.reuse, R10, -R13 ; /* 0x0000000a080d7223 */
/* 0x040fe4000000080d */
/*0300*/ FFMA R9, R8, R11, R9 ; /* 0x0000000b08097223 */
/* 0x000fe40000000009 */
/*0310*/ FMUL R8, R13, c[0x0][0x174] ; /* 0x00005d000d087a20 */
/* 0x000fe40000400000 */
/*0320*/ FMUL R9, R9, c[0x0][0x174] ; /* 0x00005d0009097a20 */
/* 0x000fca0000400000 */
/*0330*/ STG.E.64 [R6.64], R8 ; /* 0x0000000806007986 */
/* 0x0001e4000c101b04 */
/*0340*/ IMAD.WIDE R6, R0, 0x8, R6 ; /* 0x0000000800067825 */
/* 0x001fe200078e0206 */
/*0350*/ @P0 BRA 0x270 ; /* 0xffffff1000000947 */
/* 0x000fea000383ffff */
/*0360*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0370*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*0380*/ HFMA2.MMA R8, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff087435 */
/* 0x001fd400000001ff */
/*0390*/ IMAD.WIDE R6, R3, R8, c[0x0][0x168] ; /* 0x00005a0003067625 */
/* 0x000fc800078e0208 */
/*03a0*/ IMAD.WIDE R8, R3, R8, c[0x0][0x160] ; /* 0x0000580003087625 */
/* 0x000fe200078e0208 */
/*03b0*/ LDG.E.64 R10, [R6.64] ; /* 0x00000004060a7981 */
/* 0x000ea8000c1e1b00 */
/*03c0*/ LDG.E.64 R4, [R8.64] ; /* 0x0000000408047981 */
/* 0x000ea4000c1e1b00 */
/*03d0*/ FMUL R13, R5.reuse, R11 ; /* 0x0000000b050d7220 */
/* 0x044fe40000400000 */
/*03e0*/ FMUL R5, R5, R10.reuse ; /* 0x0000000a05057220 */
/* 0x080fe40000400000 */
/*03f0*/ FFMA R13, R4, R10, -R13 ; /* 0x0000000a040d7223 */
/* 0x000fc4000000080d */
/*0400*/ FFMA R5, R4, R11, R5 ; /* 0x0000000b04057223 */
/* 0x000fe40000000005 */
/*0410*/ FMUL R14, R13, c[0x0][0x174] ; /* 0x00005d000d0e7a20 */
/* 0x000fe40000400000 */
/*0420*/ FMUL R15, R5, c[0x0][0x174] ; /* 0x00005d00050f7a20 */
/* 0x000fe40000400000 */
/*0430*/ IMAD.WIDE R10, R0, 0x8, R6 ; /* 0x00000008000a7825 */
/* 0x000fc600078e0206 */
/*0440*/ STG.E.64 [R8.64], R14 ; /* 0x0000000e08007986 */
/* 0x0001e2000c101b04 */
/*0450*/ IMAD.WIDE R4, R0, 0x8, R8 ; /* 0x0000000800047825 */
/* 0x000fc600078e0208 */
/*0460*/ LDG.E.64 R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea8000c1e1b00 */
/*0470*/ LDG.E.64 R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000ea4000c1e1b00 */
/*0480*/ FMUL R17, R7.reuse, R13 ; /* 0x0000000d07117220 */
/* 0x044fe40000400000 */
/*0490*/ FMUL R7, R7, R12.reuse ; /* 0x0000000c07077220 */
/* 0x080fe40000400000 */
/*04a0*/ FFMA R17, R6, R12, -R17 ; /* 0x0000000c06117223 */
/* 0x000fc40000000811 */
/*04b0*/ FFMA R7, R6, R13, R7 ; /* 0x0000000d06077223 */
/* 0x000fe40000000007 */
/*04c0*/ FMUL R16, R17, c[0x0][0x174] ; /* 0x00005d0011107a20 */
/* 0x000fe40000400000 */
/*04d0*/ FMUL R17, R7, c[0x0][0x174] ; /* 0x00005d0007117a20 */
/* 0x000fe40000400000 */
/*04e0*/ IMAD.WIDE R12, R0, 0x8, R10 ; /* 0x00000008000c7825 */
/* 0x000fc600078e020a */
/*04f0*/ STG.E.64 [R4.64], R16 ; /* 0x0000001004007986 */
/* 0x0003e2000c101b04 */
/*0500*/ IMAD.WIDE R6, R0, 0x8, R4 ; /* 0x0000000800067825 */
/* 0x000fc600078e0204 */
/*0510*/ LDG.E.64 R10, [R12.64] ; /* 0x000000040c0a7981 */
/* 0x000ea8000c1e1b00 */
/*0520*/ LDG.E.64 R8, [R6.64] ; /* 0x0000000406087981 */
/* 0x001ea4000c1e1b00 */
/*0530*/ FMUL R15, R9.reuse, R11 ; /* 0x0000000b090f7220 */
/* 0x044fe40000400000 */
/*0540*/ FMUL R9, R9, R10.reuse ; /* 0x0000000a09097220 */
/* 0x080fe40000400000 */
/*0550*/ FFMA R15, R8, R10, -R15 ; /* 0x0000000a080f7223 */
/* 0x000fc4000000080f */
/*0560*/ FFMA R9, R8, R11, R9 ; /* 0x0000000b08097223 */
/* 0x000fe40000000009 */
/*0570*/ FMUL R14, R15, c[0x0][0x174] ; /* 0x00005d000f0e7a20 */
/* 0x000fe40000400000 */
/*0580*/ FMUL R15, R9, c[0x0][0x174] ; /* 0x00005d00090f7a20 */
/* 0x000fe40000400000 */
/*0590*/ IMAD.WIDE R10, R0, 0x8, R12 ; /* 0x00000008000a7825 */
/* 0x000fc600078e020c */
/*05a0*/ STG.E.64 [R6.64], R14 ; /* 0x0000000e06007986 */
/* 0x0001e2000c101b04 */
/*05b0*/ IMAD.WIDE R8, R0, 0x8, R6 ; /* 0x0000000800087825 */
/* 0x000fc600078e0206 */
/*05c0*/ LDG.E.64 R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000ea8000c1e1b00 */
/*05d0*/ LDG.E.64 R4, [R8.64] ; /* 0x0000000408047981 */
/* 0x002ea2000c1e1b00 */
/*05e0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*05f0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*0600*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fe20003f06270 */
/*0610*/ FMUL R13, R5.reuse, R11.reuse ; /* 0x0000000b050d7220 */
/* 0x0c4fe40000400000 */
/*0620*/ FMUL R5, R5, R10.reuse ; /* 0x0000000a05057220 */
/* 0x080fe40000400000 */
/*0630*/ FFMA R13, R4.reuse, R10, -R13 ; /* 0x0000000a040d7223 */
/* 0x040fe4000000080d */
/*0640*/ FFMA R5, R4, R11, R5 ; /* 0x0000000b04057223 */
/* 0x000fe40000000005 */
/*0650*/ FMUL R4, R13, c[0x0][0x174] ; /* 0x00005d000d047a20 */
/* 0x000fe40000400000 */
/*0660*/ FMUL R5, R5, c[0x0][0x174] ; /* 0x00005d0005057a20 */
/* 0x000fca0000400000 */
/*0670*/ STG.E.64 [R8.64], R4 ; /* 0x0000000408007986 */
/* 0x0001e2000c101b04 */
/*0680*/ @!P0 BRA 0x380 ; /* 0xfffffcf000008947 */
/* 0x000fea000383ffff */
/*0690*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*06a0*/ BRA 0x6a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*06b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0700*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0710*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0720*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0730*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0740*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0750*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0760*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0770*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected ComplexPointwiseMulAndScale
.globl ComplexPointwiseMulAndScale
.p2align 8
.type ComplexPointwiseMulAndScale,@function
ComplexPointwiseMulAndScale:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x10
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s10, v1
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b32 s1, s[0:1], 0x14
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 3, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s8
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[2:3], 3
s_mov_b32 s3, 0
.p2align 6
.LBB0_2:
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, s8
global_load_b64 v[8:9], v[4:5], off
global_load_b64 v[6:7], v[6:7], off
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
v_add_nc_u32_e32 v1, s2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_le_i32_e64 s0, s10, v1
s_or_b32 s3, s0, s3
s_waitcnt vmcnt(0)
v_mul_f32_e32 v0, v9, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f32 v0, v8, v6, -v0
v_mul_f32_e32 v7, v8, v7
v_dual_fmac_f32 v7, v9, v6 :: v_dual_mul_f32 v6, s1, v0
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v7, s1, v7
global_store_b64 v[4:5], v[6:7], off
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel ComplexPointwiseMulAndScale
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size ComplexPointwiseMulAndScale, .Lfunc_end0-ComplexPointwiseMulAndScale
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: ComplexPointwiseMulAndScale
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: ComplexPointwiseMulAndScale.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00125d24_00000000-6_simpleCUFFT_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2031:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2031:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z59__device_stub__Z27ComplexPointwiseMulAndScaleP6float2PKS_ifP6float2PKS_if
.type _Z59__device_stub__Z27ComplexPointwiseMulAndScaleP6float2PKS_ifP6float2PKS_if, @function
_Z59__device_stub__Z27ComplexPointwiseMulAndScaleP6float2PKS_ifP6float2PKS_if:
.LFB2053:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq ComplexPointwiseMulAndScale(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z59__device_stub__Z27ComplexPointwiseMulAndScaleP6float2PKS_ifP6float2PKS_if, .-_Z59__device_stub__Z27ComplexPointwiseMulAndScaleP6float2PKS_ifP6float2PKS_if
.globl ComplexPointwiseMulAndScale
.type ComplexPointwiseMulAndScale, @function
ComplexPointwiseMulAndScale:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z59__device_stub__Z27ComplexPointwiseMulAndScaleP6float2PKS_ifP6float2PKS_if
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size ComplexPointwiseMulAndScale, .-ComplexPointwiseMulAndScale
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "ComplexPointwiseMulAndScale"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq ComplexPointwiseMulAndScale(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "simpleCUFFT_kernel.hip"
.globl __device_stub__ComplexPointwiseMulAndScale # -- Begin function __device_stub__ComplexPointwiseMulAndScale
.p2align 4, 0x90
.type __device_stub__ComplexPointwiseMulAndScale,@function
__device_stub__ComplexPointwiseMulAndScale: # @__device_stub__ComplexPointwiseMulAndScale
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movss %xmm0, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $ComplexPointwiseMulAndScale, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size __device_stub__ComplexPointwiseMulAndScale, .Lfunc_end0-__device_stub__ComplexPointwiseMulAndScale
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $ComplexPointwiseMulAndScale, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type ComplexPointwiseMulAndScale,@object # @ComplexPointwiseMulAndScale
.section .rodata,"a",@progbits
.globl ComplexPointwiseMulAndScale
.p2align 3, 0x0
ComplexPointwiseMulAndScale:
.quad __device_stub__ComplexPointwiseMulAndScale
.size ComplexPointwiseMulAndScale, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "ComplexPointwiseMulAndScale"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__ComplexPointwiseMulAndScale
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym ComplexPointwiseMulAndScale
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define ROWS 32
#define COLS 16
#define CHECK(res) if(res!=cudaSuccess){exit(-1);}
__global__ void Kerneltest(int **da, unsigned int rows, unsigned int cols)
{
unsigned int row = blockDim.y*blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x*blockIdx.x + threadIdx.x;
if (row < rows && col < cols)
{
da[row][col] = row*cols + col;
}
}
int main(int argc, char **argv)
{
int **da = NULL;
int **ha = NULL;
int *dc = NULL;
int *hc = NULL;
cudaError_t res;
int r, c;
bool is_right=true;
res = cudaMalloc((void**)(&da), ROWS*sizeof(int*));CHECK(res)
res = cudaMalloc((void**)(&dc), ROWS*COLS*sizeof(int));CHECK(res)
ha = (int**)malloc(ROWS*sizeof(int*));
hc = (int*)malloc(ROWS*COLS*sizeof(int));
for (r = 0; r < ROWS; r++)
{
ha[r] = dc + r*COLS;
}
res = cudaMemcpy((void*)(da), (void*)(ha), ROWS*sizeof(int*), cudaMemcpyHostToDevice);CHECK(res)
dim3 dimBlock(16,16);
dim3 dimGrid((COLS+dimBlock.x-1)/(dimBlock.x), (ROWS+dimBlock.y-1)/(dimBlock.y));
Kerneltest<<<dimGrid, dimBlock>>>(da, ROWS, COLS);
res = cudaMemcpy((void*)(hc), (void*)(dc), ROWS*COLS*sizeof(int), cudaMemcpyDeviceToHost);CHECK(res)
for (r = 0; r < ROWS; r++)
{
for (c = 0; c < COLS; c++)
{
printf("%4d ", hc[r*COLS+c]);
if (hc[r*COLS+c] != (r*COLS+c))
{
is_right = false;
}
}
printf("\n");
}
printf("the result is %s!\n", is_right? "right":"false");
cudaFree((void*)da);
cudaFree((void*)dc);
free(ha);
free(hc);
getchar();
return 0;
} | code for sm_80
Function : _Z10KerneltestPPijj
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e280000002500 */
/*0020*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e280000002100 */
/*0030*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002600 */
/*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R5, R5, c[0x0][0x0], R2 ; /* 0x0000000005057a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R5, c[0x0][0x16c], PT ; /* 0x00005b0005007a0c */
/* 0x000fe20003f06070 */
/*0070*/ IMAD R0, R0, c[0x0][0x4], R3 ; /* 0x0000010000007a24 */
/* 0x002fca00078e0203 */
/*0080*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x168], P0 ; /* 0x00005a0000007a0c */
/* 0x000fda0000706470 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R3, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff037435 */
/* 0x000fe200000001ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*00c0*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fcc00078e0003 */
/*00d0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*00e0*/ IMAD R7, R0, c[0x0][0x16c], R5 ; /* 0x00005b0000077a24 */
/* 0x000fe400078e0205 */
/*00f0*/ IMAD.WIDE.U32 R4, R5, 0x4, R2 ; /* 0x0000000405047825 */
/* 0x004fca00078e0002 */
/*0100*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define ROWS 32
#define COLS 16
#define CHECK(res) if(res!=cudaSuccess){exit(-1);}
__global__ void Kerneltest(int **da, unsigned int rows, unsigned int cols)
{
unsigned int row = blockDim.y*blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x*blockIdx.x + threadIdx.x;
if (row < rows && col < cols)
{
da[row][col] = row*cols + col;
}
}
int main(int argc, char **argv)
{
int **da = NULL;
int **ha = NULL;
int *dc = NULL;
int *hc = NULL;
cudaError_t res;
int r, c;
bool is_right=true;
res = cudaMalloc((void**)(&da), ROWS*sizeof(int*));CHECK(res)
res = cudaMalloc((void**)(&dc), ROWS*COLS*sizeof(int));CHECK(res)
ha = (int**)malloc(ROWS*sizeof(int*));
hc = (int*)malloc(ROWS*COLS*sizeof(int));
for (r = 0; r < ROWS; r++)
{
ha[r] = dc + r*COLS;
}
res = cudaMemcpy((void*)(da), (void*)(ha), ROWS*sizeof(int*), cudaMemcpyHostToDevice);CHECK(res)
dim3 dimBlock(16,16);
dim3 dimGrid((COLS+dimBlock.x-1)/(dimBlock.x), (ROWS+dimBlock.y-1)/(dimBlock.y));
Kerneltest<<<dimGrid, dimBlock>>>(da, ROWS, COLS);
res = cudaMemcpy((void*)(hc), (void*)(dc), ROWS*COLS*sizeof(int), cudaMemcpyDeviceToHost);CHECK(res)
for (r = 0; r < ROWS; r++)
{
for (c = 0; c < COLS; c++)
{
printf("%4d ", hc[r*COLS+c]);
if (hc[r*COLS+c] != (r*COLS+c))
{
is_right = false;
}
}
printf("\n");
}
printf("the result is %s!\n", is_right? "right":"false");
cudaFree((void*)da);
cudaFree((void*)dc);
free(ha);
free(hc);
getchar();
return 0;
} | .file "tmpxft_001367da_00000000-6_2Dvec_add.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z10KerneltestPPijjPPijj
.type _Z33__device_stub__Z10KerneltestPPijjPPijj, @function
_Z33__device_stub__Z10KerneltestPPijjPPijj:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z10KerneltestPPijj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z33__device_stub__Z10KerneltestPPijjPPijj, .-_Z33__device_stub__Z10KerneltestPPijjPPijj
.globl _Z10KerneltestPPijj
.type _Z10KerneltestPPijj, @function
_Z10KerneltestPPijj:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z10KerneltestPPijjPPijj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10KerneltestPPijj, .-_Z10KerneltestPPijj
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "right"
.LC1:
.string "false"
.LC2:
.string "%4d "
.LC3:
.string "\n"
.LC4:
.string "the result is %s!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movq $0, 32(%rsp)
movq $0, 40(%rsp)
leaq 32(%rsp), %rdi
movl $256, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L29
leaq 40(%rsp), %rdi
movl $2048, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L30
movl $256, %edi
call malloc@PLT
movq %rax, %rbx
movq %rax, 16(%rsp)
movl $2048, %edi
call malloc@PLT
movq %rax, 24(%rsp)
movq 40(%rsp), %rax
movq %rbx, %rdx
leaq 2048(%rax), %rcx
.L14:
movq %rax, (%rdx)
addq $64, %rax
addq $8, %rdx
cmpq %rcx, %rax
jne .L14
movl $1, %ecx
movl $256, %edx
movq 16(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L31
movl $1, 60(%rsp)
movl $2, 64(%rsp)
movl $16, 48(%rsp)
movl $16, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 48(%rsp), %rdx
movl $1, %ecx
movq 60(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L32
.L16:
movl $2, %ecx
movl $2048, %edx
movq 40(%rsp), %rsi
movq 24(%rsp), %rbx
movq %rbx, %rdi
call cudaMemcpy@PLT
movl %eax, %r15d
testl %eax, %eax
jne .L17
movq %rbx, %rax
leaq 64(%rbx), %r13
addq $2112, %rax
movq %rax, 8(%rsp)
movl $1, %r12d
leaq .LC2(%rip), %r14
jmp .L18
.L29:
movl $-1, %edi
call exit@PLT
.L30:
movl $-1, %edi
call exit@PLT
.L31:
movl $-1, %edi
call exit@PLT
.L32:
movl $16, %edx
movl $32, %esi
movq 32(%rsp), %rdi
call _Z33__device_stub__Z10KerneltestPPijjPPijj
jmp .L16
.L17:
movl $-1, %edi
call exit@PLT
.L33:
leaq .LC3(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
addq $64, %r13
addl $16, %r15d
movq 8(%rsp), %rax
cmpq %rax, %r13
je .L21
.L18:
leaq -64(%r13), %rbx
movl %r15d, %ebp
.L20:
movl (%rbx), %edx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl %ebp, (%rbx)
movl $0, %eax
cmovne %eax, %r12d
addq $4, %rbx
addl $1, %ebp
cmpq %r13, %rbx
jne .L20
jmp .L33
.L21:
testb %r12b, %r12b
leaq .LC1(%rip), %rdx
leaq .LC0(%rip), %rax
cmovne %rax, %rdx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call free@PLT
movq 24(%rsp), %rdi
call free@PLT
movq stdin(%rip), %rdi
call getc@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L34
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L34:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z10KerneltestPPijj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z10KerneltestPPijj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define ROWS 32
#define COLS 16
#define CHECK(res) if(res!=cudaSuccess){exit(-1);}
__global__ void Kerneltest(int **da, unsigned int rows, unsigned int cols)
{
unsigned int row = blockDim.y*blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x*blockIdx.x + threadIdx.x;
if (row < rows && col < cols)
{
da[row][col] = row*cols + col;
}
}
int main(int argc, char **argv)
{
int **da = NULL;
int **ha = NULL;
int *dc = NULL;
int *hc = NULL;
cudaError_t res;
int r, c;
bool is_right=true;
res = cudaMalloc((void**)(&da), ROWS*sizeof(int*));CHECK(res)
res = cudaMalloc((void**)(&dc), ROWS*COLS*sizeof(int));CHECK(res)
ha = (int**)malloc(ROWS*sizeof(int*));
hc = (int*)malloc(ROWS*COLS*sizeof(int));
for (r = 0; r < ROWS; r++)
{
ha[r] = dc + r*COLS;
}
res = cudaMemcpy((void*)(da), (void*)(ha), ROWS*sizeof(int*), cudaMemcpyHostToDevice);CHECK(res)
dim3 dimBlock(16,16);
dim3 dimGrid((COLS+dimBlock.x-1)/(dimBlock.x), (ROWS+dimBlock.y-1)/(dimBlock.y));
Kerneltest<<<dimGrid, dimBlock>>>(da, ROWS, COLS);
res = cudaMemcpy((void*)(hc), (void*)(dc), ROWS*COLS*sizeof(int), cudaMemcpyDeviceToHost);CHECK(res)
for (r = 0; r < ROWS; r++)
{
for (c = 0; c < COLS; c++)
{
printf("%4d ", hc[r*COLS+c]);
if (hc[r*COLS+c] != (r*COLS+c))
{
is_right = false;
}
}
printf("\n");
}
printf("the result is %s!\n", is_right? "right":"false");
cudaFree((void*)da);
cudaFree((void*)dc);
free(ha);
free(hc);
getchar();
return 0;
} | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define ROWS 32
#define COLS 16
#define CHECK(res) if(res!=hipSuccess){exit(-1);}
__global__ void Kerneltest(int **da, unsigned int rows, unsigned int cols)
{
unsigned int row = blockDim.y*blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x*blockIdx.x + threadIdx.x;
if (row < rows && col < cols)
{
da[row][col] = row*cols + col;
}
}
int main(int argc, char **argv)
{
int **da = NULL;
int **ha = NULL;
int *dc = NULL;
int *hc = NULL;
hipError_t res;
int r, c;
bool is_right=true;
res = hipMalloc((void**)(&da), ROWS*sizeof(int*));CHECK(res)
res = hipMalloc((void**)(&dc), ROWS*COLS*sizeof(int));CHECK(res)
ha = (int**)malloc(ROWS*sizeof(int*));
hc = (int*)malloc(ROWS*COLS*sizeof(int));
for (r = 0; r < ROWS; r++)
{
ha[r] = dc + r*COLS;
}
res = hipMemcpy((void*)(da), (void*)(ha), ROWS*sizeof(int*), hipMemcpyHostToDevice);CHECK(res)
dim3 dimBlock(16,16);
dim3 dimGrid((COLS+dimBlock.x-1)/(dimBlock.x), (ROWS+dimBlock.y-1)/(dimBlock.y));
Kerneltest<<<dimGrid, dimBlock>>>(da, ROWS, COLS);
res = hipMemcpy((void*)(hc), (void*)(dc), ROWS*COLS*sizeof(int), hipMemcpyDeviceToHost);CHECK(res)
for (r = 0; r < ROWS; r++)
{
for (c = 0; c < COLS; c++)
{
printf("%4d ", hc[r*COLS+c]);
if (hc[r*COLS+c] != (r*COLS+c))
{
is_right = false;
}
}
printf("\n");
}
printf("the result is %s!\n", is_right? "right":"false");
hipFree((void*)da);
hipFree((void*)dc);
free(ha);
free(hc);
getchar();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define ROWS 32
#define COLS 16
#define CHECK(res) if(res!=hipSuccess){exit(-1);}
__global__ void Kerneltest(int **da, unsigned int rows, unsigned int cols)
{
unsigned int row = blockDim.y*blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x*blockIdx.x + threadIdx.x;
if (row < rows && col < cols)
{
da[row][col] = row*cols + col;
}
}
int main(int argc, char **argv)
{
int **da = NULL;
int **ha = NULL;
int *dc = NULL;
int *hc = NULL;
hipError_t res;
int r, c;
bool is_right=true;
res = hipMalloc((void**)(&da), ROWS*sizeof(int*));CHECK(res)
res = hipMalloc((void**)(&dc), ROWS*COLS*sizeof(int));CHECK(res)
ha = (int**)malloc(ROWS*sizeof(int*));
hc = (int*)malloc(ROWS*COLS*sizeof(int));
for (r = 0; r < ROWS; r++)
{
ha[r] = dc + r*COLS;
}
res = hipMemcpy((void*)(da), (void*)(ha), ROWS*sizeof(int*), hipMemcpyHostToDevice);CHECK(res)
dim3 dimBlock(16,16);
dim3 dimGrid((COLS+dimBlock.x-1)/(dimBlock.x), (ROWS+dimBlock.y-1)/(dimBlock.y));
Kerneltest<<<dimGrid, dimBlock>>>(da, ROWS, COLS);
res = hipMemcpy((void*)(hc), (void*)(dc), ROWS*COLS*sizeof(int), hipMemcpyDeviceToHost);CHECK(res)
for (r = 0; r < ROWS; r++)
{
for (c = 0; c < COLS; c++)
{
printf("%4d ", hc[r*COLS+c]);
if (hc[r*COLS+c] != (r*COLS+c))
{
is_right = false;
}
}
printf("\n");
}
printf("the result is %s!\n", is_right? "right":"false");
hipFree((void*)da);
hipFree((void*)dc);
free(ha);
free(hc);
getchar();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10KerneltestPPijj
.globl _Z10KerneltestPPijj
.p2align 8
.type _Z10KerneltestPPijj,@function
_Z10KerneltestPPijj:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b64 s[2:3], s[0:1], 0x8
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v4, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s4, 16
s_and_b32 s4, s4, 0xffff
v_mad_u64_u32 v[0:1], null, s15, s5, v[2:3]
v_mad_u64_u32 v[2:3], null, s14, s4, v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_u32_e32 vcc_lo, s2, v0
v_cmp_gt_u32_e64 s2, s3, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_mov_b32_e32 v1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 3, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s0, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo
v_mad_u64_u32 v[6:7], null, v0, s3, v[2:3]
global_load_b64 v[4:5], v[3:4], off
v_mov_b32_e32 v3, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt vmcnt(0)
v_add_co_u32 v0, vcc_lo, v4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, v5, v1, vcc_lo
flat_store_b32 v[0:1], v6
.LBB0_2:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10KerneltestPPijj
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10KerneltestPPijj, .Lfunc_end0-_Z10KerneltestPPijj
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10KerneltestPPijj
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10KerneltestPPijj.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.