system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z7kernExpPdS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ MOV R10, c[0x0][0x168] ; /* 0x00005a00000a7a02 */
/* 0x000fe20000000f00 */
/*0030*/ IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff0b7624 */
/* 0x000fe200078e00ff */
/*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0050*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0060*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0070*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fe400078e0203 */
/*0080*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */
/* 0x000fc400078e00ff */
/*0090*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fe400078e0205 */
/*00a0*/ LDG.E.64 R4, [R10.64] ; /* 0x000000040a047981 */
/* 0x000ea4000c1e1b00 */
/*00b0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0203 */
/*00c0*/ LDG.E.64 R6, [R2.64] ; /* 0x0000000402067981 */
/* 0x000ea2000c1e1b00 */
/*00d0*/ HFMA2.MMA R8, -RZ, RZ, 1323, -4.565715789794921875e-05 ; /* 0x652b82feff087435 */
/* 0x000fe200000001ff */
/*00e0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x3ff71547 ; /* 0x3ff71547ff097424 */
/* 0x000fe200078e00ff */
/*00f0*/ MOV R14, 0x69ce2bdf ; /* 0x69ce2bdf000e7802 */
/* 0x000fe20000000f00 */
/*0100*/ IMAD.MOV.U32 R15, RZ, RZ, 0x3e5ade15 ; /* 0x3e5ade15ff0f7424 */
/* 0x000fe200078e00ff */
/*0110*/ BSSY B0, 0x330 ; /* 0x0000021000007945 */
/* 0x000fe20003800000 */
/*0120*/ DADD R4, -R4, R6 ; /* 0x0000000004047229 */
/* 0x004e0c0000000106 */
/*0130*/ DFMA R6, R4, R8, 6.75539944105574400000e+15 ; /* 0x433800000406742b */
/* 0x001e0c0000000008 */
/*0140*/ DADD R8, R6, -6.75539944105574400000e+15 ; /* 0xc338000006087429 */
/* 0x001e0c0000000000 */
/*0150*/ DFMA R12, R8, c[0x2][0x0], R4 ; /* 0x00800000080c7a2b */
/* 0x001e0c0000000004 */
/*0160*/ DFMA R8, R8, c[0x2][0x8], R12 ; /* 0x0080020008087a2b */
/* 0x001e0c000000000c */
/*0170*/ DFMA R12, R8, R14, c[0x2][0x10] ; /* 0x00800400080c762b */
/* 0x001e0c000000000e */
/*0180*/ DFMA R12, R8, R12, c[0x2][0x18] ; /* 0x00800600080c762b */
/* 0x001e0c000000000c */
/*0190*/ DFMA R12, R8, R12, c[0x2][0x20] ; /* 0x00800800080c762b */
/* 0x001e0c000000000c */
/*01a0*/ DFMA R12, R8, R12, c[0x2][0x28] ; /* 0x00800a00080c762b */
/* 0x001e0c000000000c */
/*01b0*/ DFMA R12, R8, R12, c[0x2][0x30] ; /* 0x00800c00080c762b */
/* 0x001e0c000000000c */
/*01c0*/ DFMA R12, R8, R12, c[0x2][0x38] ; /* 0x00800e00080c762b */
/* 0x001e0c000000000c */
/*01d0*/ DFMA R12, R8, R12, c[0x2][0x40] ; /* 0x00801000080c762b */
/* 0x001e0c000000000c */
/*01e0*/ DFMA R12, R8, R12, c[0x2][0x48] ; /* 0x00801200080c762b */
/* 0x001e0c000000000c */
/*01f0*/ DFMA R12, R8, R12, c[0x2][0x50] ; /* 0x00801400080c762b */
/* 0x001e22000000000c */
/*0200*/ FSETP.GEU.AND P0, PT, |R5|, 4.1917929649353027344, PT ; /* 0x4086232b0500780b */
/* 0x000fca0003f0e200 */
/*0210*/ DFMA R12, R8, R12, 1 ; /* 0x3ff00000080c742b */
/* 0x001e0c000000000c */
/*0220*/ DFMA R12, R8, R12, 1 ; /* 0x3ff00000080c742b */
/* 0x001e14000000000c */
/*0230*/ LEA R9, R6, R13, 0x14 ; /* 0x0000000d06097211 */
/* 0x001fe200078ea0ff */
/*0240*/ IMAD.MOV.U32 R8, RZ, RZ, R12 ; /* 0x000000ffff087224 */
/* 0x000fe200078e000c */
/*0250*/ @!P0 BRA 0x320 ; /* 0x000000c000008947 */
/* 0x000fea0003800000 */
/*0260*/ FSETP.GEU.AND P1, PT, |R5|, 4.2275390625, PT ; /* 0x408748000500780b */
/* 0x000fe20003f2e200 */
/*0270*/ DADD R8, R4, +INF ; /* 0x7ff0000004087429 */
/* 0x000fc80000000000 */
/*0280*/ DSETP.GEU.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400722a */
/* 0x000e0c0003f0e000 */
/*0290*/ FSEL R8, R8, RZ, P0 ; /* 0x000000ff08087208 */
/* 0x001fe40000000000 */
/*02a0*/ @!P1 LEA.HI R0, R6, R6, RZ, 0x1 ; /* 0x0000000606009211 */
/* 0x000fe400078f08ff */
/*02b0*/ FSEL R9, R9, RZ, P0 ; /* 0x000000ff09097208 */
/* 0x000fe40000000000 */
/*02c0*/ @!P1 SHF.R.S32.HI R5, RZ, 0x1, R0 ; /* 0x00000001ff059819 */
/* 0x000fc80000011400 */
/*02d0*/ @!P1 IADD3 R4, R6, -R5, RZ ; /* 0x8000000506049210 */
/* 0x000fe20007ffe0ff */
/*02e0*/ @!P1 IMAD R13, R5, 0x100000, R13 ; /* 0x00100000050d9824 */
/* 0x000fc600078e020d */
/*02f0*/ @!P1 LEA R5, R4, 0x3ff00000, 0x14 ; /* 0x3ff0000004059811 */
/* 0x000fe400078ea0ff */
/*0300*/ @!P1 MOV R4, RZ ; /* 0x000000ff00049202 */
/* 0x000fcc0000000f00 */
/*0310*/ @!P1 DMUL R8, R12, R4 ; /* 0x000000040c089228 */
/* 0x00004c0000000000 */
/*0320*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0330*/ STG.E.64 [R2.64], R8 ; /* 0x0000000802007986 */
/* 0x002fe2000c101b04 */
/*0340*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0350*/ BRA 0x350; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7kernExpPdS_
.globl _Z7kernExpPdS_
.p2align 8
.type _Z7kernExpPdS_,@function
_Z7kernExpPdS_:
s_clause 0x2
s_load_b32 s4, s[0:1], 0x10
s_load_b32 s5, s[0:1], 0x1c
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_mul_i32 s4, s4, s15
s_and_b32 s5, s5, 0xffff
s_add_i32 s4, s4, s14
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s4, s5, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[1:2]
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_load_b64 s[0:1], s[2:3], 0x0
s_mov_b32 s3, 0x3e5ade15
s_mov_b32 s2, 0x6a5dcb37
global_load_b64 v[2:3], v[0:1], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_f64 v[2:3], v[2:3], -s[0:1]
s_mov_b32 s1, 0x3ff71547
s_mov_b32 s0, 0x652b82fe
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mul_f64 v[4:5], v[2:3], s[0:1]
s_mov_b32 s1, 0xbfe62e42
s_mov_b32 s0, 0xfefa39ef
v_cmp_nlt_f64_e32 vcc_lo, 0x40900000, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rndne_f64_e32 v[4:5], v[4:5]
v_fma_f64 v[6:7], v[4:5], s[0:1], v[2:3]
s_mov_b32 s1, 0xbc7abc9e
s_mov_b32 s0, 0x3b39803f
v_cvt_i32_f64_e32 v10, v[4:5]
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[6:7], v[4:5], s[0:1], v[6:7]
s_mov_b32 s1, 0x3e928af3
s_mov_b32 s0, 0xfca7ab0c
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], s[2:3], s[0:1]
s_mov_b32 s1, 0x3ec71dee
s_mov_b32 s0, 0x623fde64
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3efa0199
s_mov_b32 s0, 0x7c89e6b0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3f2a01a0
s_mov_b32 s0, 0x14761f6e
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3f56c16c
s_mov_b32 s0, 0x1852b7b0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3f811111
s_mov_b32 s0, 0x11122322
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3fa55555
s_mov_b32 s0, 0x555502a1
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3fc55555
s_mov_b32 s0, 0x55555511
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3fe00000
s_mov_b32 s0, 11
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
v_cmp_ngt_f64_e64 s0, 0xc090cc00, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], 1.0
v_fma_f64 v[4:5], v[6:7], v[8:9], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ldexp_f64 v[4:5], v[4:5], v10
v_cndmask_b32_e32 v5, 0x7ff00000, v5, vcc_lo
s_and_b32 vcc_lo, s0, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v2, 0, v4, vcc_lo
v_cndmask_b32_e64 v3, 0, v5, s0
global_store_b64 v[0:1], v[2:3], off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7kernExpPdS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7kernExpPdS_, .Lfunc_end0-_Z7kernExpPdS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7kernExpPdS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7kernExpPdS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00044c48_00000000-6_kernExp.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z28__device_stub__Z7kernExpPdS_PdS_
.type _Z28__device_stub__Z7kernExpPdS_PdS_, @function
_Z28__device_stub__Z7kernExpPdS_PdS_:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z7kernExpPdS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z28__device_stub__Z7kernExpPdS_PdS_, .-_Z28__device_stub__Z7kernExpPdS_PdS_
.globl _Z7kernExpPdS_
.type _Z7kernExpPdS_, @function
_Z7kernExpPdS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z7kernExpPdS_PdS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z7kernExpPdS_, .-_Z7kernExpPdS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z7kernExpPdS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7kernExpPdS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernExp.hip"
.globl _Z22__device_stub__kernExpPdS_ # -- Begin function _Z22__device_stub__kernExpPdS_
.p2align 4, 0x90
.type _Z22__device_stub__kernExpPdS_,@function
_Z22__device_stub__kernExpPdS_: # @_Z22__device_stub__kernExpPdS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z7kernExpPdS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z22__device_stub__kernExpPdS_, .Lfunc_end0-_Z22__device_stub__kernExpPdS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7kernExpPdS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7kernExpPdS_,@object # @_Z7kernExpPdS_
.section .rodata,"a",@progbits
.globl _Z7kernExpPdS_
.p2align 3, 0x0
_Z7kernExpPdS_:
.quad _Z22__device_stub__kernExpPdS_
.size _Z7kernExpPdS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7kernExpPdS_"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__kernExpPdS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7kernExpPdS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include <map>
#include <iterator>
#include <algorithm>
#include <assert.h>
//#include <time.h>
using namespace std;
const int N = 2394385;
const int M = 5021410;
const int BLOCK_SIZE = 256;
const unsigned MAX_NUM = 4294967295;
const int NUM_BANKS = 16;
const int LOG_NUM_BANKS = 4;
//const string fn("test.txt");
const string fn("WikiTalk.txt");
//const string fn("soc-LiveJournal1.txt");
unsigned **scanBlockSums;
unsigned numEltsAllocated = 0;
unsigned numLevelsAllocated = 0;
__device__ unsigned Mterminate;
__device__ unsigned numActiveThreads;
__device__ unsigned *range;
__device__ unsigned *pivot;
#define FALSE 0u
#define TRUE 1u
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "Error: %s\nFile %s, line %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
inline
bool isPowerOfTwo(int n) {
return (n & (n - 1)) == 0;
}
inline
int floorPow2(int n) {
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
}
template <bool isNP2>
__device__
void loadSharedChunkFromMem(unsigned *s_data, const unsigned *idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB);
template <bool isNP2>
__device__
void storeSharedChunkToMem(unsigned* odata, const unsigned* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB);
template <bool storeSum>
__device__
void clearLastElement(unsigned* s_data, unsigned *blockSums, int blockIndex);
__device__
unsigned int buildSum(unsigned *s_data);
__device__
void scanRootToLeaves(unsigned *s_data, unsigned int stride);
template <bool storeSum>
__device__
void prescanBlock(unsigned *data, int blockIndex, unsigned *blockSums);
template <bool storeSum, bool isNP2>
__global__
void prescan(unsigned *odata, const unsigned *idata, unsigned *blockSums, int n, int blockIndex, int baseIndex);
__global__
void uniformAdd(unsigned *data, unsigned *uniforms, int n, int blockOffset, int baseIndex);
__global__
void BFSKernel1(
size_t graphSize, unsigned *activeMask, unsigned *V, unsigned *E,
unsigned *F, unsigned *X,unsigned *Fu);
__global__
void BFSKernel2(size_t graphSize, unsigned *F, unsigned *X, unsigned *Fu);
__global__
void TRIMKernel(size_t graphSize,unsigned *VF,unsigned *EF,unsigned *VB,unsigned *EB);
/*__global__
void getActiveMaskTemp(size_t graphSize, unsigned *F, unsigned *activeMask);*/
__global__
void compactSIMD(size_t N, unsigned *prefixSums, unsigned *activeMask, size_t blockSize);
__global__
void PIVOTS_SEL_Kernel(size_t graphSize);
__global__
void UpdateKernel(size_t graphSize, unsigned *visF,unsigned *visB);
__global__
void UpdateKernel1(size_t graphSize,unsigned *visF,unsigned *visB,unsigned *ac);
__global__
void RenumberKernel(size_t graphSize,unsigned *prefixsum);
__global__
void initpivot(size_t graphSize);
__global__
void ReadPivot(size_t graphSize, unsigned *pivot_);
__host__
void setUInt(unsigned *address, unsigned value);
__host__
void Graphpreproc(const string filename, vector<unsigned> &VF, vector<unsigned> &EF, vector<unsigned> &VB, vector<unsigned> &EB);
__host__
void BFS(vector<unsigned> &V, vector<unsigned> &E, unsigned index_s, unsigned index_e, vector<unsigned> &visited);
__host__
void TRIMMING(vector<unsigned> &VF, vector<unsigned> &EF, vector<unsigned> &VB, vector<unsigned> &EB);
__host__
void PIVOTS_SEL();
__host__
void Update(vector<unsigned> &visF,vector<unsigned> &visB,unsigned &termin);
__host__
void Update1(vector<unsigned> &visF,vector<unsigned> &visB,vector<unsigned> &prefixsum, unsigned &termin);
__host__
void preallocBlockSums(unsigned maxNumElements);
__host__
void deallocBlockSums();
__host__
void prescanArrayRecursive(unsigned *outArray, const unsigned *inArray, int numElements, int level);
__host__
void prescanArray(unsigned *outArray, unsigned *inArray, int numElements);
int main()
{
vector<unsigned> VF, EF, VB, EB, visF, visB,range_,pivot_,prefixsum_;
vector<unsigned>::iterator itr;
/*ofstream out("out1.txt");
if(!out){
cout << "Unable to open outfile";
exit(1); // terminate with error
}*/
unsigned termin;
unsigned *d_r,*d_p;
unsigned *d_prefixsum;
range_.resize(N);
pivot_.resize(N);
//prefixsum_.resize(N+1);
VF.reserve(N + 1);
EF.reserve(M);
VB.reserve(N + 1);
EB.reserve(M);
Graphpreproc(fn, VF, EF, VB, EB);
unsigned index_s = 0;
unsigned index_e = 0;
unsigned loop = 0;
const size_t gridSizeK2 =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;
gpuErrchk(cudaMalloc(&d_r, N*sizeof(unsigned)));
gpuErrchk(cudaMemset(d_r, FALSE, N*sizeof(unsigned)));
//gpuErrchk(cudaMemcpyToSymbol(range, &d_r, sizeof(unsigned *),size_t(0), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpyToSymbol(range, &d_r, sizeof(d_r)));
gpuErrchk(cudaMalloc(&d_p, N*sizeof(unsigned)));
gpuErrchk(cudaMemset(d_p, 255, N*sizeof(unsigned)));
//gpuErrchk(cudaMemcpyToSymbol(pivot, &d_p, sizeof(unsigned *),size_t(0), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpyToSymbol(pivot, &d_p, sizeof(d_p)));
gpuErrchk(cudaMalloc(&d_prefixsum, (N+1)*sizeof(unsigned)));
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
while(true){
loop++;
TRIMMING(VF,EF,VB,EB);
PIVOTS_SEL();
BFS(VF, EF, index_s, index_e, visF);
BFS(VB, EB, index_s, index_e ,visB);
if(true){
Update1(visF,visB,prefixsum_,termin);
if(termin == FALSE){
gpuErrchk(cudaMemcpy(d_prefixsum, prefixsum_.data(), (N+1)*sizeof(unsigned), cudaMemcpyHostToDevice));
RenumberKernel<<<gridSizeK2,BLOCK_SIZE>>>(N,d_prefixsum);
gpuErrchk(cudaDeviceSynchronize());
index_e = prefixsum_.at(N) - 1;
}
}
else{
Update(visF,visB,termin);
index_e = 3 * index_e + 2;
}
if(termin)
break;
initpivot<<<gridSizeK2,BLOCK_SIZE>>>(N);
gpuErrchk(cudaDeviceSynchronize());
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time,start,stop);
printf("time is %f\n",time);
printf("%u\n", loop);
gpuErrchk(cudaFree(d_r));
gpuErrchk(cudaFree(d_p));
gpuErrchk(cudaFree(d_prefixsum));
//out.close();
return 0;
}
__host__
void setUInt(unsigned *address, unsigned value) {
gpuErrchk(cudaMemcpy(address, &value, sizeof(unsigned), cudaMemcpyHostToDevice));
}
__host__
void Graphpreproc(const string filename, vector<unsigned> &VF, vector<unsigned> &EF, vector<unsigned> &VB, vector<unsigned> &EB)
{
ifstream in_f;
vector<unsigned> t;
vector<unsigned>::iterator itr;
multimap<const unsigned, unsigned> m;
multimap<const unsigned, unsigned>::iterator mitr;
unsigned count = 0;
in_f.open(filename.c_str(), ios::in);
while (!in_f.eof()){
string temp, s1, s2;
stringstream ss1, ss2;
unsigned t1, t2;
getline(in_f, temp);
if(temp.length() == 0)
continue;
if (*(temp.begin()) == '#')
continue;
s1 = string(temp, 0, temp.find_first_of('\t'));
s2 = string(temp, temp.find_first_not_of('\t', temp.find_first_of('\t')), temp.find_last_not_of('\t'));
ss1 << s1;
ss1 >> t1;
ss2 << s2;
ss2 >> t2;
t.push_back(t1);
m.insert(make_pair(t2, t1));
EF.push_back(t2);
}
itr = t.begin();
VF.push_back(0);
for (int i = 0; i < N - 1; i++){
while ((itr != t.end()) && (*itr == i)){
count++;
itr++;
}
count += VF.at(i);
VF.push_back(count);
count = 0;
}
VF.push_back(M);
mitr = m.begin();
VB.push_back(0);
for (int i = 0; i < N - 1; i++){
while ((mitr != m.end()) && ((mitr->first) == i)){
count++;
mitr++;
}
count += VB.at(i);
VB.push_back(count);
count = 0;
}
VB.push_back(M);
for (mitr = m.begin(); mitr != m.end(); mitr++){
EB.push_back(mitr->second);
}
}
__global__
void BFSKernel1(
size_t graphSize, unsigned *activeMask, unsigned *V, unsigned *E,
unsigned *F, unsigned *X,unsigned *Fu) {
unsigned activeMaskIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// If vertex is active at current iteration
if (activeMaskIdx < numActiveThreads) {
unsigned v = activeMask[activeMaskIdx];
// Remove v from current frontier
F[v] = FALSE;
// Iterate over v's neighbors
for (unsigned edge = V[v]; edge < V[v + 1]; ++edge) {
unsigned neighbor = E[edge];
// If neighbor wasn't visited
if(range[v] == range[neighbor]){
if (X[neighbor] == FALSE){
//C[neighbor] = C[v] + 1;
Fu[neighbor] = TRUE;
}
}
}
}
}
__global__
void BFSKernel2(size_t graphSize, unsigned *F, unsigned *X, unsigned *Fu) {
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// If vertex v exists and has recently joined the frontier
if (v < graphSize && Fu[v]) {
// Copy the new frontier into F
F[v] = TRUE;
// Set v as visited
X[v] = TRUE;
// Clean up the new frontier
Fu[v] = FALSE;
Mterminate = FALSE;
}
}
__global__
void TRIMKernel(size_t graphSize,unsigned *VF,unsigned *EF,unsigned *VB,unsigned *EB)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
bool elim;
if(v < graphSize){
if(range[v] != MAX_NUM){
elim = true;
for (unsigned edge = VF[v]; edge < VF[v + 1]; ++edge) {
unsigned neighbor = EF[edge];
if(range[neighbor] == range[v]){
elim = false;
break;
}
}
if(elim == false){
elim = true;
for (unsigned edge = VB[v]; edge < VB[v + 1]; ++edge) {
unsigned neighbor = EB[edge];
if(range[neighbor] == range[v]){
elim = false;
break;
}
}
}
if(elim == true){
range[v] = MAX_NUM;
Mterminate = FALSE;
}
}
}
}
__global__
void PIVOTS_SEL_Kernel(size_t graphSize)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(v < graphSize){
if(range[v] != MAX_NUM)
pivot[range[v]] = v;
}
}
__global__
void UpdateKernel(size_t graphSize,unsigned *visF,unsigned *visB)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(v < graphSize){
if(range[v] != MAX_NUM){
if(visF[v] == TRUE){
if(visB[v] == TRUE){
range[v] = MAX_NUM;
}
else if(visB[v] == FALSE){
range[v] = 3*range[v];
Mterminate = FALSE;
}
}
else if(visF[v] == FALSE){
if(visB[v] == TRUE){
range[v] = 3*range[v] + 1;
Mterminate = FALSE;
}
else if(visB[v] == FALSE){
range[v] = 3*range[v] + 2;
Mterminate = FALSE;
}
}
}
}
}
__global__
void UpdateKernel1(size_t graphSize,unsigned *visF,unsigned *visB,unsigned *ac)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(v < graphSize){
if(range[v] != MAX_NUM){
if(visF[v] == TRUE){
if(visB[v] == TRUE){
range[v] = MAX_NUM;
}
else if(visB[v] == FALSE){
range[v] = 3*range[v];
ac[range[v]] = TRUE;
Mterminate = FALSE;
}
}
else if(visF[v] == FALSE){
if(visB[v] == TRUE){
range[v] = 3*range[v] + 1;
ac[range[v]] = TRUE;
Mterminate = FALSE;
}
else if(visB[v] == FALSE){
range[v] = 3*range[v] + 2;
ac[range[v]] = TRUE;
Mterminate = FALSE;
}
}
}
}
}
__global__
void RenumberKernel(size_t graphSize,unsigned *prefixsum)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(v < graphSize){
if(range[v] != MAX_NUM)
range[v] = prefixsum[range[v]];
}
}
__global__
void initpivot(size_t graphSize)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(v < graphSize){
pivot[v] = MAX_NUM;
}
}
__global__
void ReadPivot(size_t graphSize ,unsigned *pivot_)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(v < graphSize){
pivot_[v] = pivot[v];
}
}
// Very slow but correct "active mask" calculation; for debugging
/*__global__
void getActiveMaskTemp(size_t graphSize, unsigned *F, unsigned *activeMask) {
numActiveThreads = 0;
for (int i = 0; i < graphSize; ++i) {
if (F[i]) {
activeMask[numActiveThreads] = i;
++numActiveThreads;
}
}
}*/
__global__
void compactSIMD(size_t N, unsigned *prefixSums, unsigned *activeMask, size_t blockSize) {
size_t v = blockIdx.x * blockSize + threadIdx.x;
if (v < N) {
// Can possibly be accelerated by using shared memory
if (prefixSums[v + 1] != prefixSums[v]) {
activeMask[prefixSums[v]] = v;
}
}
}
__host__
void BFS(vector<unsigned> &V, vector<unsigned> &E, unsigned index_s, unsigned index_e, vector<unsigned> &visited)
{
assert(sizeof(unsigned) == 4);
unsigned size_sub = index_e - index_s + 1;
visited.clear();
visited.resize(N);
vector<unsigned> pivot_h;
pivot_h.resize(size_sub);
unsigned *d_V, *d_E, *d_p;
unsigned *d_F, *d_X, *d_Fu;
unsigned *activeMask, *prefixSums;
//unsigned **prefixSums;
size_t memSize = (N + 1) * sizeof(unsigned);
size_t memSizeE = M * sizeof(unsigned);
gpuErrchk(cudaMalloc(&d_F, memSize));
gpuErrchk(cudaMemset(d_F, FALSE, memSize));
//setUInt(d_F + piv, TRUE); // add source to frontier
gpuErrchk(cudaMalloc(&d_X, memSize));
gpuErrchk(cudaMemset(d_X, FALSE, memSize));
//setUInt(d_X + sourceVertex, TRUE); // set source as visited
gpuErrchk(cudaMalloc(&d_Fu, memSize));
gpuErrchk(cudaMemset(d_Fu, FALSE, memSize));
gpuErrchk(cudaMalloc(&d_V, memSize));
gpuErrchk(cudaMemcpy(d_V, V.data(), memSize, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(&d_E, memSizeE));
gpuErrchk(cudaMemcpy(d_E, E.data(), memSizeE, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(&activeMask, memSize));
//setUInt(activeMask + 0, sourceVertex); // set thread #source as active
gpuErrchk(cudaMalloc(&d_p, size_sub*sizeof(unsigned)));
//gpuErrchk(cudaMemset(d_p, FALSE, N*sizeof(unsigned)));
gpuErrchk(cudaMalloc(&prefixSums, memSize));
preallocBlockSums(N + 1);
const size_t gridSizeK2 =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;
const size_t gridSizeK3 =
(size_sub + BLOCK_SIZE - 1) / BLOCK_SIZE;
ReadPivot<<<gridSizeK3,BLOCK_SIZE>>>(size_sub,d_p);
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpy(pivot_h.data(), d_p, size_sub*sizeof(unsigned), cudaMemcpyDeviceToHost);
unsigned numActiveThreadsHost = 0;
for(int i = index_s;i <= index_e;i++){
if(pivot_h.at(i) == MAX_NUM)
continue;
setUInt(d_F + pivot_h.at(i), TRUE);
setUInt(d_X + pivot_h.at(i), TRUE);
setUInt(activeMask + numActiveThreadsHost, pivot_h.at(i));
numActiveThreadsHost++;
}
gpuErrchk(cudaMemcpyToSymbol(numActiveThreads, &numActiveThreadsHost, sizeof(unsigned)));
// Main loop
/*const size_t prefixSumGridSize =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;*/
while (true) {
// Terminate <- TRUE
unsigned terminateHost = TRUE;
gpuErrchk(cudaMemcpyToSymbol(Mterminate, &terminateHost, sizeof(unsigned)));
// Kernel 1: need to assign ACTIVE vertices to SIMD lanes (threads)
//gpuErrchk(cudaMemcpyFromSymbol(&numActiveThreadsHost, numActiveThreads, sizeof(unsigned)));
const size_t gridSizeK1 =
(numActiveThreadsHost + BLOCK_SIZE - 1) / BLOCK_SIZE;
// launch kernel 1
BFSKernel1 <<<gridSizeK1, BLOCK_SIZE >>> (N,activeMask, d_V, d_E, d_F, d_X,d_Fu);
//gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Kernel 2: need to assign ALL vertices to SIMD lanes
// launch kernel 2
BFSKernel2 <<<gridSizeK2, BLOCK_SIZE >>> (N, d_F, d_X, d_Fu);
//gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpyFromSymbol(&terminateHost, Mterminate, sizeof(unsigned)));
if (terminateHost) {
break;
}
else {
// Get prefix sums of F
prescanArray(prefixSums, d_F, N + 1);
//cudaMemcpy(&numActiveThreads, prefixSums + N, sizeof(unsigned), cudaMemcpyDeviceToDevice);
cudaMemcpy(&numActiveThreadsHost, prefixSums + N, sizeof(unsigned), cudaMemcpyDeviceToHost);
gpuErrchk(cudaMemcpyToSymbol(numActiveThreads, &numActiveThreadsHost, sizeof(unsigned)));
compactSIMD <<<gridSizeK2, BLOCK_SIZE>>> (N, prefixSums, activeMask, BLOCK_SIZE);
//gpuErrchk(cudaPeekAtLastError());
//getActiveMaskTemp<<<1,1>>>(N,d_F,activeMask);
gpuErrchk(cudaDeviceSynchronize());
//gpuErrchk(cudaPeekAtLastError());
}
}
// Download result
//gpuErrchk(cudaMemcpy(distances.data(), d_C, memSize - sizeof(unsigned), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(visited.data(), d_X, memSize - sizeof(unsigned), cudaMemcpyDeviceToHost));
// Free memory
gpuErrchk(cudaFree(d_F));
gpuErrchk(cudaFree(d_X));
gpuErrchk(cudaFree(d_Fu));
gpuErrchk(cudaFree(d_V));
gpuErrchk(cudaFree(d_E));
gpuErrchk(cudaFree(d_p));
gpuErrchk(cudaFree(activeMask));
deallocBlockSums();
gpuErrchk(cudaFree(prefixSums));
}
__host__
void TRIMMING(vector<unsigned> &VF, vector<unsigned> &EF, vector<unsigned> &VB, vector<unsigned> &EB)
{
unsigned *d_VF, *d_EF, *d_VB, *d_EB;
size_t memSize = (N + 1) * sizeof(unsigned);
size_t memSizeE = M * sizeof(unsigned);
gpuErrchk(cudaMalloc(&d_VF, memSize));
gpuErrchk(cudaMemcpy(d_VF, VF.data(), memSize, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(&d_EF, memSizeE));
gpuErrchk(cudaMemcpy(d_EF, EF.data(), memSizeE, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(&d_VB, memSize));
gpuErrchk(cudaMemcpy(d_VB, VB.data(), memSize, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(&d_EB, memSizeE));
gpuErrchk(cudaMemcpy(d_EB, EB.data(), memSizeE, cudaMemcpyHostToDevice));
const size_t gridSizeK2 =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;
while (true) {
// Terminate <- TRUE
unsigned terminateHost = TRUE;
gpuErrchk(cudaMemcpyToSymbol(Mterminate, &terminateHost, sizeof(unsigned)));
// launch kernel 2
TRIMKernel <<<gridSizeK2, BLOCK_SIZE >>> (N, d_VF, d_EF, d_VB, d_EB);
//gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpyFromSymbol(&terminateHost, Mterminate, sizeof(unsigned)));
if (terminateHost) {
break;
}
}
gpuErrchk(cudaFree(d_VF));
gpuErrchk(cudaFree(d_EF));
gpuErrchk(cudaFree(d_VB));
gpuErrchk(cudaFree(d_EB));
}
__host__
void PIVOTS_SEL()
{
const size_t gridSizeK2 =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;
PIVOTS_SEL_Kernel<<<gridSizeK2, BLOCK_SIZE>>>(N);
//gpuErrchk(cudaGetLastError());
gpuErrchk(cudaDeviceSynchronize());
}
__host__
void Update(vector<unsigned> &visF,vector<unsigned> &visB,unsigned &termin)
{
unsigned *d_vf,*d_vb;
size_t memSize = N * sizeof(unsigned);
gpuErrchk(cudaMalloc(&d_vf, memSize));
gpuErrchk(cudaMemcpy(d_vf, visF.data(), memSize, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(&d_vb, memSize));
gpuErrchk(cudaMemcpy(d_vb, visB.data(), memSize, cudaMemcpyHostToDevice));
// Terminate <- TRUE
termin = TRUE;
gpuErrchk(cudaMemcpyToSymbol(Mterminate, &termin, sizeof(unsigned)));
const size_t gridSizeK2 =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;
UpdateKernel<<<gridSizeK2, BLOCK_SIZE>>>(N,d_vf,d_vb);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpyFromSymbol(&termin, Mterminate, sizeof(unsigned)));
gpuErrchk(cudaFree(d_vf));
gpuErrchk(cudaFree(d_vb));
}
__host__
void Update1(vector<unsigned> &visF,vector<unsigned> &visB,vector<unsigned> &prefixsum, unsigned &termin)
{
prefixsum.clear();
prefixsum.resize(N+1);
unsigned *d_vf,*d_vb,*d_ac;
unsigned *prefixSums;
size_t memSize = (N + 1) * sizeof(unsigned);
gpuErrchk(cudaMalloc(&d_ac, memSize));
gpuErrchk(cudaMemset(d_ac, FALSE, memSize));
gpuErrchk(cudaMalloc(&d_vf, memSize));
gpuErrchk(cudaMemcpy(d_vf, visF.data(), memSize - sizeof(unsigned), cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(&d_vb, memSize));
gpuErrchk(cudaMemcpy(d_vb, visB.data(), memSize - sizeof(unsigned), cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(&prefixSums, memSize));
preallocBlockSums(N + 1);
// Terminate <- TRUE
termin = TRUE;
gpuErrchk(cudaMemcpyToSymbol(Mterminate, &termin, sizeof(unsigned)));
const size_t gridSizeK2 =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;
UpdateKernel1<<<gridSizeK2, BLOCK_SIZE>>>(N,d_vf,d_vb,d_ac);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpyFromSymbol(&termin, Mterminate, sizeof(unsigned)));
if(termin == FALSE){
prescanArray(prefixSums, d_ac, N + 1);
//gpuErrchk(cudaMemcpyToSymbol(prefixsum, &prefixSums, sizeof(unsigned *),size_t(0), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(prefixsum.data(), prefixSums, memSize, cudaMemcpyDeviceToHost));
}
gpuErrchk(cudaFree(d_vf));
gpuErrchk(cudaFree(d_vb));
gpuErrchk(cudaFree(d_ac));
deallocBlockSums();
gpuErrchk(cudaFree(prefixSums));
}
__host__
void preallocBlockSums(unsigned maxNumElements) {
numEltsAllocated = maxNumElements;
unsigned blockSize = BLOCK_SIZE;
unsigned numElts = maxNumElements;
int level = 0;
do {
unsigned numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) {
level++;
}
numElts = numBlocks;
} while (numElts > 1);
scanBlockSums = (unsigned**)malloc(level * sizeof(unsigned*));
numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) {
gpuErrchk(cudaMalloc(&scanBlockSums[level++], numBlocks * sizeof(unsigned)));
}
numElts = numBlocks;
} while (numElts > 1);
}
__host__
void deallocBlockSums() {
for (unsigned i = 0; i < numLevelsAllocated; i++) {
cudaFree(scanBlockSums[i]);
}
free(scanBlockSums);
scanBlockSums = 0;
numEltsAllocated = 0;
numLevelsAllocated = 0;
}
__host__
void prescanArrayRecursive(unsigned *outArray,
const unsigned *inArray,
int numElements,
int level) {
unsigned blockSize = BLOCK_SIZE;
unsigned numBlocks =
max(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned numEltsPerBlock = numThreads * 2;
unsigned numEltsLastBlock =
numElements - (numBlocks - 1) * numEltsPerBlock;
unsigned numThreadsLastBlock = max(1u, numEltsLastBlock / 2);
unsigned np2LastBlock = 0;
unsigned sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if (!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock =
sizeof(unsigned)* (2 * numThreadsLastBlock + extraSpace);
}
// Avoid shared memory bank conflicts
unsigned extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned sharedMemSize =
sizeof(unsigned)* (numEltsPerBlock + extraSpace);
dim3 grid(max(1u, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// Main action
if (numBlocks > 1) {
prescan<true, false> <<< grid, threads, sharedMemSize >>> (
outArray, inArray, scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
prescan<true, true> <<< 1, numThreadsLastBlock, sharedMemLastBlock >>> (
outArray, inArray, scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
}
prescanArrayRecursive(scanBlockSums[level], scanBlockSums[level], numBlocks, level + 1);
uniformAdd <<< grid, threads >>> (
outArray, scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
uniformAdd <<<1, numThreadsLastBlock >>> (
outArray, scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
}
}
else if (isPowerOfTwo(numElements)) {
prescan<false, false> <<<grid, threads, sharedMemSize >>> (
outArray, inArray, 0, numThreads * 2, 0, 0);
}
else {
prescan<false, true> <<<grid, threads, sharedMemSize >>> (
outArray, inArray, 0, numElements, 0, 0);
}
}
__host__
void prescanArray(unsigned *outArray, unsigned *inArray, int numElements) {
prescanArrayRecursive(outArray, inArray, numElements, 0);
}
template <bool isNP2>
__device__ void loadSharedChunkFromMem(unsigned *s_data,
const unsigned *idata,
int n, int baseIndex,
int& ai, int& bi,
int& mem_ai, int& mem_bi,
int& bankOffsetA, int& bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
s_data[ai + bankOffsetA] = idata[mem_ai];
if (isNP2) {
s_data[bi + bankOffsetB] = (bi < n) ? idata[mem_bi] : 0;
}
else {
s_data[bi + bankOffsetB] = idata[mem_bi];
}
}
template <bool isNP2>
__device__
void storeSharedChunkToMem(unsigned* odata,
const unsigned* s_data,
int n,
int ai, int bi,
int mem_ai, int mem_bi,
int bankOffsetA, int bankOffsetB) {
__syncthreads();
odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) {
if (bi < n)
odata[mem_bi] = s_data[bi + bankOffsetB];
}
else {
odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__
void clearLastElement(unsigned* s_data,
unsigned *blockSums,
int blockIndex) {
if (threadIdx.x == 0) {
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) {
blockSums[blockIndex] = s_data[index];
}
s_data[index] = 0;
}
}
__device__
unsigned int buildSum(unsigned *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__
void scanRootToLeaves(unsigned *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d)
{
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__
void prescanBlock(unsigned *data, int blockIndex, unsigned *blockSums) {
int stride = buildSum(data);
clearLastElement<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride);
}
template <bool storeSum, bool isNP2>
__global__
void prescan(unsigned *odata,
const unsigned *idata,
unsigned *blockSums,
int n,
int blockIndex,
int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ unsigned s_data[];
loadSharedChunkFromMem<isNP2>(s_data, idata, n,
(baseIndex == 0) ?
__mul24(blockIdx.x, (blockDim.x << 1)) : baseIndex,
ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
prescanBlock<storeSum>(s_data, blockIndex, blockSums);
storeSharedChunkToMem<isNP2>(odata, s_data, n,
ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
}
__global__
void uniformAdd(unsigned *data,
unsigned *uniforms,
int n,
int blockOffset,
int baseIndex) {
__shared__ unsigned uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
data[address] += uni;
data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
} | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include <map>
#include <iterator>
#include <algorithm>
#include <assert.h>
//#include <time.h>
using namespace std;
const int N = 2394385;
const int M = 5021410;
const int BLOCK_SIZE = 256;
const unsigned MAX_NUM = 4294967295;
const int NUM_BANKS = 16;
const int LOG_NUM_BANKS = 4;
//const string fn("test.txt");
const string fn("WikiTalk.txt");
//const string fn("soc-LiveJournal1.txt");
unsigned **scanBlockSums;
unsigned numEltsAllocated = 0;
unsigned numLevelsAllocated = 0;
__device__ unsigned Mterminate;
__device__ unsigned numActiveThreads;
__device__ unsigned *range;
__device__ unsigned *pivot;
#define FALSE 0u
#define TRUE 1u
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "Error: %s\nFile %s, line %d\n", hipGetErrorString(code), file, line);
exit(code);
}
}
inline
bool isPowerOfTwo(int n) {
return (n & (n - 1)) == 0;
}
inline
int floorPow2(int n) {
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
}
template <bool isNP2>
__device__
void loadSharedChunkFromMem(unsigned *s_data, const unsigned *idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB);
template <bool isNP2>
__device__
void storeSharedChunkToMem(unsigned* odata, const unsigned* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB);
template <bool storeSum>
__device__
void clearLastElement(unsigned* s_data, unsigned *blockSums, int blockIndex);
__device__
unsigned int buildSum(unsigned *s_data);
__device__
void scanRootToLeaves(unsigned *s_data, unsigned int stride);
template <bool storeSum>
__device__
void prescanBlock(unsigned *data, int blockIndex, unsigned *blockSums);
template <bool storeSum, bool isNP2>
__global__
void prescan(unsigned *odata, const unsigned *idata, unsigned *blockSums, int n, int blockIndex, int baseIndex);
__global__
void uniformAdd(unsigned *data, unsigned *uniforms, int n, int blockOffset, int baseIndex);
__global__
void BFSKernel1(
size_t graphSize, unsigned *activeMask, unsigned *V, unsigned *E,
unsigned *F, unsigned *X,unsigned *Fu);
__global__
void BFSKernel2(size_t graphSize, unsigned *F, unsigned *X, unsigned *Fu);
__global__
void TRIMKernel(size_t graphSize,unsigned *VF,unsigned *EF,unsigned *VB,unsigned *EB);
/*__global__
void getActiveMaskTemp(size_t graphSize, unsigned *F, unsigned *activeMask);*/
__global__
void compactSIMD(size_t N, unsigned *prefixSums, unsigned *activeMask, size_t blockSize);
__global__
void PIVOTS_SEL_Kernel(size_t graphSize);
__global__
void UpdateKernel(size_t graphSize, unsigned *visF,unsigned *visB);
__global__
void UpdateKernel1(size_t graphSize,unsigned *visF,unsigned *visB,unsigned *ac);
__global__
void RenumberKernel(size_t graphSize,unsigned *prefixsum);
__global__
void initpivot(size_t graphSize);
__global__
void ReadPivot(size_t graphSize, unsigned *pivot_);
__host__
void setUInt(unsigned *address, unsigned value);
__host__
void Graphpreproc(const string filename, vector<unsigned> &VF, vector<unsigned> &EF, vector<unsigned> &VB, vector<unsigned> &EB);
__host__
void BFS(vector<unsigned> &V, vector<unsigned> &E, unsigned index_s, unsigned index_e, vector<unsigned> &visited);
__host__
void TRIMMING(vector<unsigned> &VF, vector<unsigned> &EF, vector<unsigned> &VB, vector<unsigned> &EB);
__host__
void PIVOTS_SEL();
__host__
void Update(vector<unsigned> &visF,vector<unsigned> &visB,unsigned &termin);
__host__
void Update1(vector<unsigned> &visF,vector<unsigned> &visB,vector<unsigned> &prefixsum, unsigned &termin);
__host__
void preallocBlockSums(unsigned maxNumElements);
__host__
void deallocBlockSums();
__host__
void prescanArrayRecursive(unsigned *outArray, const unsigned *inArray, int numElements, int level);
__host__
void prescanArray(unsigned *outArray, unsigned *inArray, int numElements);
int main()
{
vector<unsigned> VF, EF, VB, EB, visF, visB,range_,pivot_,prefixsum_;
vector<unsigned>::iterator itr;
/*ofstream out("out1.txt");
if(!out){
cout << "Unable to open outfile";
exit(1); // terminate with error
}*/
unsigned termin;
unsigned *d_r,*d_p;
unsigned *d_prefixsum;
range_.resize(N);
pivot_.resize(N);
//prefixsum_.resize(N+1);
VF.reserve(N + 1);
EF.reserve(M);
VB.reserve(N + 1);
EB.reserve(M);
Graphpreproc(fn, VF, EF, VB, EB);
unsigned index_s = 0;
unsigned index_e = 0;
unsigned loop = 0;
const size_t gridSizeK2 =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;
gpuErrchk(hipMalloc(&d_r, N*sizeof(unsigned)));
gpuErrchk(hipMemset(d_r, FALSE, N*sizeof(unsigned)));
//gpuErrchk(cudaMemcpyToSymbol(range, &d_r, sizeof(unsigned *),size_t(0), cudaMemcpyHostToDevice));
gpuErrchk(hipMemcpyToSymbol(HIP_SYMBOL(range), &d_r, sizeof(d_r)));
gpuErrchk(hipMalloc(&d_p, N*sizeof(unsigned)));
gpuErrchk(hipMemset(d_p, 255, N*sizeof(unsigned)));
//gpuErrchk(cudaMemcpyToSymbol(pivot, &d_p, sizeof(unsigned *),size_t(0), cudaMemcpyHostToDevice));
gpuErrchk(hipMemcpyToSymbol(HIP_SYMBOL(pivot), &d_p, sizeof(d_p)));
gpuErrchk(hipMalloc(&d_prefixsum, (N+1)*sizeof(unsigned)));
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
while(true){
loop++;
TRIMMING(VF,EF,VB,EB);
PIVOTS_SEL();
BFS(VF, EF, index_s, index_e, visF);
BFS(VB, EB, index_s, index_e ,visB);
if(true){
Update1(visF,visB,prefixsum_,termin);
if(termin == FALSE){
gpuErrchk(hipMemcpy(d_prefixsum, prefixsum_.data(), (N+1)*sizeof(unsigned), hipMemcpyHostToDevice));
RenumberKernel<<<gridSizeK2,BLOCK_SIZE>>>(N,d_prefixsum);
gpuErrchk(hipDeviceSynchronize());
index_e = prefixsum_.at(N) - 1;
}
}
else{
Update(visF,visB,termin);
index_e = 3 * index_e + 2;
}
if(termin)
break;
initpivot<<<gridSizeK2,BLOCK_SIZE>>>(N);
gpuErrchk(hipDeviceSynchronize());
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float time;
hipEventElapsedTime(&time,start,stop);
printf("time is %f\n",time);
printf("%u\n", loop);
gpuErrchk(hipFree(d_r));
gpuErrchk(hipFree(d_p));
gpuErrchk(hipFree(d_prefixsum));
//out.close();
return 0;
}
__host__
void setUInt(unsigned *address, unsigned value) {
gpuErrchk(hipMemcpy(address, &value, sizeof(unsigned), hipMemcpyHostToDevice));
}
__host__
void Graphpreproc(const string filename, vector<unsigned> &VF, vector<unsigned> &EF, vector<unsigned> &VB, vector<unsigned> &EB)
{
ifstream in_f;
vector<unsigned> t;
vector<unsigned>::iterator itr;
multimap<const unsigned, unsigned> m;
multimap<const unsigned, unsigned>::iterator mitr;
unsigned count = 0;
in_f.open(filename.c_str(), ios::in);
while (!in_f.eof()){
string temp, s1, s2;
stringstream ss1, ss2;
unsigned t1, t2;
getline(in_f, temp);
if(temp.length() == 0)
continue;
if (*(temp.begin()) == '#')
continue;
s1 = string(temp, 0, temp.find_first_of('\t'));
s2 = string(temp, temp.find_first_not_of('\t', temp.find_first_of('\t')), temp.find_last_not_of('\t'));
ss1 << s1;
ss1 >> t1;
ss2 << s2;
ss2 >> t2;
t.push_back(t1);
m.insert(make_pair(t2, t1));
EF.push_back(t2);
}
itr = t.begin();
VF.push_back(0);
for (int i = 0; i < N - 1; i++){
while ((itr != t.end()) && (*itr == i)){
count++;
itr++;
}
count += VF.at(i);
VF.push_back(count);
count = 0;
}
VF.push_back(M);
mitr = m.begin();
VB.push_back(0);
for (int i = 0; i < N - 1; i++){
while ((mitr != m.end()) && ((mitr->first) == i)){
count++;
mitr++;
}
count += VB.at(i);
VB.push_back(count);
count = 0;
}
VB.push_back(M);
for (mitr = m.begin(); mitr != m.end(); mitr++){
EB.push_back(mitr->second);
}
}
__global__
void BFSKernel1(
size_t graphSize, unsigned *activeMask, unsigned *V, unsigned *E,
unsigned *F, unsigned *X,unsigned *Fu) {
unsigned activeMaskIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// If vertex is active at current iteration
if (activeMaskIdx < numActiveThreads) {
unsigned v = activeMask[activeMaskIdx];
// Remove v from current frontier
F[v] = FALSE;
// Iterate over v's neighbors
for (unsigned edge = V[v]; edge < V[v + 1]; ++edge) {
unsigned neighbor = E[edge];
// If neighbor wasn't visited
if(range[v] == range[neighbor]){
if (X[neighbor] == FALSE){
//C[neighbor] = C[v] + 1;
Fu[neighbor] = TRUE;
}
}
}
}
}
__global__
void BFSKernel2(size_t graphSize, unsigned *F, unsigned *X, unsigned *Fu) {
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// If vertex v exists and has recently joined the frontier
if (v < graphSize && Fu[v]) {
// Copy the new frontier into F
F[v] = TRUE;
// Set v as visited
X[v] = TRUE;
// Clean up the new frontier
Fu[v] = FALSE;
Mterminate = FALSE;
}
}
__global__
void TRIMKernel(size_t graphSize,unsigned *VF,unsigned *EF,unsigned *VB,unsigned *EB)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
bool elim;
if(v < graphSize){
if(range[v] != MAX_NUM){
elim = true;
for (unsigned edge = VF[v]; edge < VF[v + 1]; ++edge) {
unsigned neighbor = EF[edge];
if(range[neighbor] == range[v]){
elim = false;
break;
}
}
if(elim == false){
elim = true;
for (unsigned edge = VB[v]; edge < VB[v + 1]; ++edge) {
unsigned neighbor = EB[edge];
if(range[neighbor] == range[v]){
elim = false;
break;
}
}
}
if(elim == true){
range[v] = MAX_NUM;
Mterminate = FALSE;
}
}
}
}
__global__
void PIVOTS_SEL_Kernel(size_t graphSize)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(v < graphSize){
if(range[v] != MAX_NUM)
pivot[range[v]] = v;
}
}
__global__
void UpdateKernel(size_t graphSize,unsigned *visF,unsigned *visB)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(v < graphSize){
if(range[v] != MAX_NUM){
if(visF[v] == TRUE){
if(visB[v] == TRUE){
range[v] = MAX_NUM;
}
else if(visB[v] == FALSE){
range[v] = 3*range[v];
Mterminate = FALSE;
}
}
else if(visF[v] == FALSE){
if(visB[v] == TRUE){
range[v] = 3*range[v] + 1;
Mterminate = FALSE;
}
else if(visB[v] == FALSE){
range[v] = 3*range[v] + 2;
Mterminate = FALSE;
}
}
}
}
}
__global__
void UpdateKernel1(size_t graphSize,unsigned *visF,unsigned *visB,unsigned *ac)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(v < graphSize){
if(range[v] != MAX_NUM){
if(visF[v] == TRUE){
if(visB[v] == TRUE){
range[v] = MAX_NUM;
}
else if(visB[v] == FALSE){
range[v] = 3*range[v];
ac[range[v]] = TRUE;
Mterminate = FALSE;
}
}
else if(visF[v] == FALSE){
if(visB[v] == TRUE){
range[v] = 3*range[v] + 1;
ac[range[v]] = TRUE;
Mterminate = FALSE;
}
else if(visB[v] == FALSE){
range[v] = 3*range[v] + 2;
ac[range[v]] = TRUE;
Mterminate = FALSE;
}
}
}
}
}
__global__
void RenumberKernel(size_t graphSize,unsigned *prefixsum)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(v < graphSize){
if(range[v] != MAX_NUM)
range[v] = prefixsum[range[v]];
}
}
__global__
void initpivot(size_t graphSize)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(v < graphSize){
pivot[v] = MAX_NUM;
}
}
__global__
void ReadPivot(size_t graphSize ,unsigned *pivot_)
{
int v = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(v < graphSize){
pivot_[v] = pivot[v];
}
}
// Very slow but correct "active mask" calculation; for debugging
/*__global__
void getActiveMaskTemp(size_t graphSize, unsigned *F, unsigned *activeMask) {
numActiveThreads = 0;
for (int i = 0; i < graphSize; ++i) {
if (F[i]) {
activeMask[numActiveThreads] = i;
++numActiveThreads;
}
}
}*/
__global__
void compactSIMD(size_t N, unsigned *prefixSums, unsigned *activeMask, size_t blockSize) {
size_t v = blockIdx.x * blockSize + threadIdx.x;
if (v < N) {
// Can possibly be accelerated by using shared memory
if (prefixSums[v + 1] != prefixSums[v]) {
activeMask[prefixSums[v]] = v;
}
}
}
__host__
void BFS(vector<unsigned> &V, vector<unsigned> &E, unsigned index_s, unsigned index_e, vector<unsigned> &visited)
{
assert(sizeof(unsigned) == 4);
unsigned size_sub = index_e - index_s + 1;
visited.clear();
visited.resize(N);
vector<unsigned> pivot_h;
pivot_h.resize(size_sub);
unsigned *d_V, *d_E, *d_p;
unsigned *d_F, *d_X, *d_Fu;
unsigned *activeMask, *prefixSums;
//unsigned **prefixSums;
size_t memSize = (N + 1) * sizeof(unsigned);
size_t memSizeE = M * sizeof(unsigned);
gpuErrchk(hipMalloc(&d_F, memSize));
gpuErrchk(hipMemset(d_F, FALSE, memSize));
//setUInt(d_F + piv, TRUE); // add source to frontier
gpuErrchk(hipMalloc(&d_X, memSize));
gpuErrchk(hipMemset(d_X, FALSE, memSize));
//setUInt(d_X + sourceVertex, TRUE); // set source as visited
gpuErrchk(hipMalloc(&d_Fu, memSize));
gpuErrchk(hipMemset(d_Fu, FALSE, memSize));
gpuErrchk(hipMalloc(&d_V, memSize));
gpuErrchk(hipMemcpy(d_V, V.data(), memSize, hipMemcpyHostToDevice));
gpuErrchk(hipMalloc(&d_E, memSizeE));
gpuErrchk(hipMemcpy(d_E, E.data(), memSizeE, hipMemcpyHostToDevice));
gpuErrchk(hipMalloc(&activeMask, memSize));
//setUInt(activeMask + 0, sourceVertex); // set thread #source as active
gpuErrchk(hipMalloc(&d_p, size_sub*sizeof(unsigned)));
//gpuErrchk(cudaMemset(d_p, FALSE, N*sizeof(unsigned)));
gpuErrchk(hipMalloc(&prefixSums, memSize));
preallocBlockSums(N + 1);
const size_t gridSizeK2 =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;
const size_t gridSizeK3 =
(size_sub + BLOCK_SIZE - 1) / BLOCK_SIZE;
ReadPivot<<<gridSizeK3,BLOCK_SIZE>>>(size_sub,d_p);
gpuErrchk(hipDeviceSynchronize());
hipMemcpy(pivot_h.data(), d_p, size_sub*sizeof(unsigned), hipMemcpyDeviceToHost);
unsigned numActiveThreadsHost = 0;
for(int i = index_s;i <= index_e;i++){
if(pivot_h.at(i) == MAX_NUM)
continue;
setUInt(d_F + pivot_h.at(i), TRUE);
setUInt(d_X + pivot_h.at(i), TRUE);
setUInt(activeMask + numActiveThreadsHost, pivot_h.at(i));
numActiveThreadsHost++;
}
gpuErrchk(hipMemcpyToSymbol(HIP_SYMBOL(numActiveThreads), &numActiveThreadsHost, sizeof(unsigned)));
// Main loop
/*const size_t prefixSumGridSize =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;*/
while (true) {
// Terminate <- TRUE
unsigned terminateHost = TRUE;
gpuErrchk(hipMemcpyToSymbol(HIP_SYMBOL(Mterminate), &terminateHost, sizeof(unsigned)));
// Kernel 1: need to assign ACTIVE vertices to SIMD lanes (threads)
//gpuErrchk(cudaMemcpyFromSymbol(&numActiveThreadsHost, numActiveThreads, sizeof(unsigned)));
const size_t gridSizeK1 =
(numActiveThreadsHost + BLOCK_SIZE - 1) / BLOCK_SIZE;
// launch kernel 1
BFSKernel1 <<<gridSizeK1, BLOCK_SIZE >>> (N,activeMask, d_V, d_E, d_F, d_X,d_Fu);
//gpuErrchk(cudaPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Kernel 2: need to assign ALL vertices to SIMD lanes
// launch kernel 2
BFSKernel2 <<<gridSizeK2, BLOCK_SIZE >>> (N, d_F, d_X, d_Fu);
//gpuErrchk(cudaPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpyFromSymbol(&terminateHost, HIP_SYMBOL(Mterminate), sizeof(unsigned)));
if (terminateHost) {
break;
}
else {
// Get prefix sums of F
prescanArray(prefixSums, d_F, N + 1);
//cudaMemcpy(&numActiveThreads, prefixSums + N, sizeof(unsigned), cudaMemcpyDeviceToDevice);
hipMemcpy(&numActiveThreadsHost, prefixSums + N, sizeof(unsigned), hipMemcpyDeviceToHost);
gpuErrchk(hipMemcpyToSymbol(HIP_SYMBOL(numActiveThreads), &numActiveThreadsHost, sizeof(unsigned)));
compactSIMD <<<gridSizeK2, BLOCK_SIZE>>> (N, prefixSums, activeMask, BLOCK_SIZE);
//gpuErrchk(cudaPeekAtLastError());
//getActiveMaskTemp<<<1,1>>>(N,d_F,activeMask);
gpuErrchk(hipDeviceSynchronize());
//gpuErrchk(cudaPeekAtLastError());
}
}
// Download result
//gpuErrchk(cudaMemcpy(distances.data(), d_C, memSize - sizeof(unsigned), cudaMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(visited.data(), d_X, memSize - sizeof(unsigned), hipMemcpyDeviceToHost));
// Free memory
gpuErrchk(hipFree(d_F));
gpuErrchk(hipFree(d_X));
gpuErrchk(hipFree(d_Fu));
gpuErrchk(hipFree(d_V));
gpuErrchk(hipFree(d_E));
gpuErrchk(hipFree(d_p));
gpuErrchk(hipFree(activeMask));
deallocBlockSums();
gpuErrchk(hipFree(prefixSums));
}
__host__
void TRIMMING(vector<unsigned> &VF, vector<unsigned> &EF, vector<unsigned> &VB, vector<unsigned> &EB)
{
unsigned *d_VF, *d_EF, *d_VB, *d_EB;
size_t memSize = (N + 1) * sizeof(unsigned);
size_t memSizeE = M * sizeof(unsigned);
gpuErrchk(hipMalloc(&d_VF, memSize));
gpuErrchk(hipMemcpy(d_VF, VF.data(), memSize, hipMemcpyHostToDevice));
gpuErrchk(hipMalloc(&d_EF, memSizeE));
gpuErrchk(hipMemcpy(d_EF, EF.data(), memSizeE, hipMemcpyHostToDevice));
gpuErrchk(hipMalloc(&d_VB, memSize));
gpuErrchk(hipMemcpy(d_VB, VB.data(), memSize, hipMemcpyHostToDevice));
gpuErrchk(hipMalloc(&d_EB, memSizeE));
gpuErrchk(hipMemcpy(d_EB, EB.data(), memSizeE, hipMemcpyHostToDevice));
const size_t gridSizeK2 =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;
while (true) {
// Terminate <- TRUE
unsigned terminateHost = TRUE;
gpuErrchk(hipMemcpyToSymbol(HIP_SYMBOL(Mterminate), &terminateHost, sizeof(unsigned)));
// launch kernel 2
TRIMKernel <<<gridSizeK2, BLOCK_SIZE >>> (N, d_VF, d_EF, d_VB, d_EB);
//gpuErrchk(cudaPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpyFromSymbol(&terminateHost, HIP_SYMBOL(Mterminate), sizeof(unsigned)));
if (terminateHost) {
break;
}
}
gpuErrchk(hipFree(d_VF));
gpuErrchk(hipFree(d_EF));
gpuErrchk(hipFree(d_VB));
gpuErrchk(hipFree(d_EB));
}
__host__
void PIVOTS_SEL()
{
const size_t gridSizeK2 =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;
PIVOTS_SEL_Kernel<<<gridSizeK2, BLOCK_SIZE>>>(N);
//gpuErrchk(cudaGetLastError());
gpuErrchk(hipDeviceSynchronize());
}
__host__
void Update(vector<unsigned> &visF,vector<unsigned> &visB,unsigned &termin)
{
unsigned *d_vf,*d_vb;
size_t memSize = N * sizeof(unsigned);
gpuErrchk(hipMalloc(&d_vf, memSize));
gpuErrchk(hipMemcpy(d_vf, visF.data(), memSize, hipMemcpyHostToDevice));
gpuErrchk(hipMalloc(&d_vb, memSize));
gpuErrchk(hipMemcpy(d_vb, visB.data(), memSize, hipMemcpyHostToDevice));
// Terminate <- TRUE
termin = TRUE;
gpuErrchk(hipMemcpyToSymbol(HIP_SYMBOL(Mterminate), &termin, sizeof(unsigned)));
const size_t gridSizeK2 =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;
UpdateKernel<<<gridSizeK2, BLOCK_SIZE>>>(N,d_vf,d_vb);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpyFromSymbol(&termin, HIP_SYMBOL(Mterminate), sizeof(unsigned)));
gpuErrchk(hipFree(d_vf));
gpuErrchk(hipFree(d_vb));
}
__host__
void Update1(vector<unsigned> &visF,vector<unsigned> &visB,vector<unsigned> &prefixsum, unsigned &termin)
{
prefixsum.clear();
prefixsum.resize(N+1);
unsigned *d_vf,*d_vb,*d_ac;
unsigned *prefixSums;
size_t memSize = (N + 1) * sizeof(unsigned);
gpuErrchk(hipMalloc(&d_ac, memSize));
gpuErrchk(hipMemset(d_ac, FALSE, memSize));
gpuErrchk(hipMalloc(&d_vf, memSize));
gpuErrchk(hipMemcpy(d_vf, visF.data(), memSize - sizeof(unsigned), hipMemcpyHostToDevice));
gpuErrchk(hipMalloc(&d_vb, memSize));
gpuErrchk(hipMemcpy(d_vb, visB.data(), memSize - sizeof(unsigned), hipMemcpyHostToDevice));
gpuErrchk(hipMalloc(&prefixSums, memSize));
preallocBlockSums(N + 1);
// Terminate <- TRUE
termin = TRUE;
gpuErrchk(hipMemcpyToSymbol(HIP_SYMBOL(Mterminate), &termin, sizeof(unsigned)));
const size_t gridSizeK2 =
(N + BLOCK_SIZE - 1) / BLOCK_SIZE;
UpdateKernel1<<<gridSizeK2, BLOCK_SIZE>>>(N,d_vf,d_vb,d_ac);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpyFromSymbol(&termin, HIP_SYMBOL(Mterminate), sizeof(unsigned)));
if(termin == FALSE){
prescanArray(prefixSums, d_ac, N + 1);
//gpuErrchk(cudaMemcpyToSymbol(prefixsum, &prefixSums, sizeof(unsigned *),size_t(0), cudaMemcpyHostToDevice));
gpuErrchk(hipMemcpy(prefixsum.data(), prefixSums, memSize, hipMemcpyDeviceToHost));
}
gpuErrchk(hipFree(d_vf));
gpuErrchk(hipFree(d_vb));
gpuErrchk(hipFree(d_ac));
deallocBlockSums();
gpuErrchk(hipFree(prefixSums));
}
__host__
void preallocBlockSums(unsigned maxNumElements) {
numEltsAllocated = maxNumElements;
unsigned blockSize = BLOCK_SIZE;
unsigned numElts = maxNumElements;
int level = 0;
do {
unsigned numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) {
level++;
}
numElts = numBlocks;
} while (numElts > 1);
scanBlockSums = (unsigned**)malloc(level * sizeof(unsigned*));
numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) {
gpuErrchk(hipMalloc(&scanBlockSums[level++], numBlocks * sizeof(unsigned)));
}
numElts = numBlocks;
} while (numElts > 1);
}
__host__
void deallocBlockSums() {
for (unsigned i = 0; i < numLevelsAllocated; i++) {
hipFree(scanBlockSums[i]);
}
free(scanBlockSums);
scanBlockSums = 0;
numEltsAllocated = 0;
numLevelsAllocated = 0;
}
__host__
void prescanArrayRecursive(unsigned *outArray,
const unsigned *inArray,
int numElements,
int level) {
unsigned blockSize = BLOCK_SIZE;
unsigned numBlocks =
max(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned numEltsPerBlock = numThreads * 2;
unsigned numEltsLastBlock =
numElements - (numBlocks - 1) * numEltsPerBlock;
unsigned numThreadsLastBlock = max(1u, numEltsLastBlock / 2);
unsigned np2LastBlock = 0;
unsigned sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if (!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock =
sizeof(unsigned)* (2 * numThreadsLastBlock + extraSpace);
}
// Avoid shared memory bank conflicts
unsigned extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned sharedMemSize =
sizeof(unsigned)* (numEltsPerBlock + extraSpace);
dim3 grid(max(1u, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// Main action
if (numBlocks > 1) {
prescan<true, false> <<< grid, threads, sharedMemSize >>> (
outArray, inArray, scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
prescan<true, true> <<< 1, numThreadsLastBlock, sharedMemLastBlock >>> (
outArray, inArray, scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
}
prescanArrayRecursive(scanBlockSums[level], scanBlockSums[level], numBlocks, level + 1);
uniformAdd <<< grid, threads >>> (
outArray, scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
uniformAdd <<<1, numThreadsLastBlock >>> (
outArray, scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
}
}
else if (isPowerOfTwo(numElements)) {
prescan<false, false> <<<grid, threads, sharedMemSize >>> (
outArray, inArray, 0, numThreads * 2, 0, 0);
}
else {
prescan<false, true> <<<grid, threads, sharedMemSize >>> (
outArray, inArray, 0, numElements, 0, 0);
}
}
__host__
void prescanArray(unsigned *outArray, unsigned *inArray, int numElements) {
prescanArrayRecursive(outArray, inArray, numElements, 0);
}
template <bool isNP2>
__device__ void loadSharedChunkFromMem(unsigned *s_data,
const unsigned *idata,
int n, int baseIndex,
int& ai, int& bi,
int& mem_ai, int& mem_bi,
int& bankOffsetA, int& bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
s_data[ai + bankOffsetA] = idata[mem_ai];
if (isNP2) {
s_data[bi + bankOffsetB] = (bi < n) ? idata[mem_bi] : 0;
}
else {
s_data[bi + bankOffsetB] = idata[mem_bi];
}
}
template <bool isNP2>
__device__
void storeSharedChunkToMem(unsigned* odata,
const unsigned* s_data,
int n,
int ai, int bi,
int mem_ai, int mem_bi,
int bankOffsetA, int bankOffsetB) {
__syncthreads();
odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) {
if (bi < n)
odata[mem_bi] = s_data[bi + bankOffsetB];
}
else {
odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__
void clearLastElement(unsigned* s_data,
unsigned *blockSums,
int blockIndex) {
if (threadIdx.x == 0) {
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) {
blockSums[blockIndex] = s_data[index];
}
s_data[index] = 0;
}
}
__device__
unsigned int buildSum(unsigned *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__
void scanRootToLeaves(unsigned *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d)
{
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__
void prescanBlock(unsigned *data, int blockIndex, unsigned *blockSums) {
int stride = buildSum(data);
clearLastElement<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride);
}
template <bool storeSum, bool isNP2>
__global__
void prescan(unsigned *odata,
const unsigned *idata,
unsigned *blockSums,
int n,
int blockIndex,
int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ unsigned s_data[];
loadSharedChunkFromMem<isNP2>(s_data, idata, n,
(baseIndex == 0) ?
__mul24(blockIdx.x, (blockDim.x << 1)) : baseIndex,
ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
prescanBlock<storeSum>(s_data, blockIndex, blockSums);
storeSharedChunkToMem<isNP2>(odata, s_data, n,
ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
}
__global__
void uniformAdd(unsigned *data,
unsigned *uniforms,
int n,
int blockOffset,
int baseIndex) {
__shared__ unsigned uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
data[address] += uni;
data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
__global__ void kernel(double *a, int n, double k) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int i, j;
for(i = idx; i < n; i += offsetx)
for(j = idy; j < n; j += offsety)
a[j * n + i] *= k;
}
int main() {
int i, n = 1024;
double *a = (double*)malloc(sizeof(double) * n * n);
for(i = 0; i < n * n; i++)
a[i] = i;
double *dev_a;
cudaMalloc(&dev_a, sizeof(double) * n * n);
cudaMemcpy(dev_a, a, sizeof(double) * n * n, cudaMemcpyHostToDevice);
kernel<<<dim3(16, 16), dim3(32, 8)>>>(dev_a, n, 2.3);
kernel<<<dim3(16, 16), dim3(32, 8)>>>(dev_a, n, 2.3);
cudaMemcpy(a, dev_a, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
// a...
// for(i = 0; i < n * n; i++)
// printf("%f ", a[i]);
// printf("\n");
cudaFree(dev_a);
free(a);
return 0;
} | code for sm_80
Function : _Z6kernelPdid
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fe400078e0203 */
/*0040*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e260000002600 */
/*0050*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x168], PT ; /* 0x00005a0004007a0c */
/* 0x000fda0003f06270 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x4] ; /* 0x00000100ff007624 */
/* 0x000fe200078e00ff */
/*0080*/ S2R R8, SR_TID.Y ; /* 0x0000000000087919 */
/* 0x000e620000002200 */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00a0*/ IMAD R0, R0, c[0x0][0x10], RZ ; /* 0x0000040000007a24 */
/* 0x000fc800078e02ff */
/*00b0*/ I2F.U32.RP R2, R0 ; /* 0x0000000000027306 */
/* 0x000ea20000209000 */
/*00c0*/ IADD3 R11, RZ, -R0, RZ ; /* 0x80000000ff0b7210 */
/* 0x000fe40007ffe0ff */
/*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fca0003f45070 */
/*00e0*/ MUFU.RCP R2, R2 ; /* 0x0000000200027308 */
/* 0x004ea20000001000 */
/*00f0*/ IMAD R3, R3, c[0x0][0x4], R8 ; /* 0x0000010003037a24 */
/* 0x003fc800078e0208 */
/*0100*/ IMAD.IADD R5, R3, 0x1, R0 ; /* 0x0000000103057824 */
/* 0x000fca00078e0200 */
/*0110*/ LOP3.LUT R9, RZ, R5, RZ, 0x33, !PT ; /* 0x00000005ff097212 */
/* 0x000fc800078e33ff */
/*0120*/ IADD3 R9, R9, c[0x0][0x168], R0 ; /* 0x00005a0009097a10 */
/* 0x000fe40007ffe000 */
/*0130*/ IADD3 R6, R2, 0xffffffe, RZ ; /* 0x0ffffffe02067810 */
/* 0x004fc80007ffe0ff */
/*0140*/ F2I.FTZ.U32.TRUNC.NTZ R7, R6 ; /* 0x0000000600077305 */
/* 0x000064000021f000 */
/*0150*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */
/* 0x001fe200000001ff */
/*0160*/ IMAD R11, R11, R7, RZ ; /* 0x000000070b0b7224 */
/* 0x002fd200078e02ff */
/*0170*/ IMAD.HI.U32 R2, R7, R11, R6 ; /* 0x0000000b07027227 */
/* 0x000fc800078e0006 */
/*0180*/ IMAD.IADD R7, R0, 0x1, R5 ; /* 0x0000000100077824 */
/* 0x000fe400078e0205 */
/*0190*/ IMAD.HI.U32 R2, R2, R9, RZ ; /* 0x0000000902027227 */
/* 0x000fc600078e00ff */
/*01a0*/ IADD3 R8, R0, R7, RZ ; /* 0x0000000700087210 */
/* 0x000fe20007ffe0ff */
/*01b0*/ IMAD.MOV R6, RZ, RZ, -R2 ; /* 0x000000ffff067224 */
/* 0x000fc800078e0a02 */
/*01c0*/ IMAD R9, R0.reuse, R6, R9 ; /* 0x0000000600097224 */
/* 0x040fe400078e0209 */
/*01d0*/ IMAD R6, R0, c[0x0][0x168], RZ ; /* 0x00005a0000067a24 */
/* 0x000fc600078e02ff */
/*01e0*/ ISETP.GE.U32.AND P0, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x000fda0003f06070 */
/*01f0*/ @P0 IADD3 R9, -R0, R9, RZ ; /* 0x0000000900090210 */
/* 0x000fe40007ffe1ff */
/*0200*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */
/* 0x000fe40007ffe0ff */
/*0210*/ ISETP.GE.U32.AND P1, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x000fda0003f26070 */
/*0220*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */
/* 0x000fe40007ffe0ff */
/*0230*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */
/* 0x000fc800078e33ff */
/*0240*/ IADD3 R9, R2, 0x1, RZ ; /* 0x0000000102097810 */
/* 0x000fc80007ffe0ff */
/*0250*/ LOP3.LUT R9, R9, 0x3, RZ, 0xc0, !PT ; /* 0x0000000309097812 */
/* 0x000fe400078ec0ff */
/*0260*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x168], PT ; /* 0x00005a0003007a0c */
/* 0x000fe20003f06270 */
/*0270*/ BSSY B0, 0x5e0 ; /* 0x0000036000007945 */
/* 0x000fd80003800000 */
/*0280*/ @P0 BRA 0x5d0 ; /* 0x0000034000000947 */
/* 0x000fea0003800000 */
/*0290*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f05270 */
/*02a0*/ BSSY B1, 0x450 ; /* 0x000001a000017945 */
/* 0x000fe20003800000 */
/*02b0*/ IMAD.MOV.U32 R15, RZ, RZ, R3 ; /* 0x000000ffff0f7224 */
/* 0x000fd600078e0003 */
/*02c0*/ @!P0 BRA 0x440 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*02d0*/ HFMA2.MMA R17, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff117435 */
/* 0x000fe200000001ff */
/*02e0*/ IMAD R10, R3, c[0x0][0x168], R4 ; /* 0x00005a00030a7a24 */
/* 0x000fd200078e0204 */
/*02f0*/ IMAD.WIDE R10, R10, R17, c[0x0][0x160] ; /* 0x000058000a0a7625 */
/* 0x000fca00078e0211 */
/*0300*/ LDG.E.64 R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1b00 */
/*0310*/ ISETP.NE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */
/* 0x000fe20003f05270 */
/*0320*/ IMAD.MOV.U32 R15, RZ, RZ, R5 ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e0005 */
/*0330*/ DMUL R12, R12, c[0x0][0x170] ; /* 0x00005c000c0c7a28 */
/* 0x004e0e0000000000 */
/*0340*/ STG.E.64 [R10.64], R12 ; /* 0x0000000c0a007986 */
/* 0x0011e8000c101b04 */
/*0350*/ @!P0 BRA 0x440 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0360*/ IMAD R10, R5, c[0x0][0x168], R4 ; /* 0x00005a00050a7a24 */
/* 0x001fc800078e0204 */
/*0370*/ IMAD.WIDE R10, R10, R17, c[0x0][0x160] ; /* 0x000058000a0a7625 */
/* 0x000fca00078e0211 */
/*0380*/ LDG.E.64 R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1b00 */
/*0390*/ ISETP.NE.AND P0, PT, R9, 0x2, PT ; /* 0x000000020900780c */
/* 0x000fe40003f05270 */
/*03a0*/ MOV R15, R7 ; /* 0x00000007000f7202 */
/* 0x000fe20000000f00 */
/*03b0*/ DMUL R12, R12, c[0x0][0x170] ; /* 0x00005c000c0c7a28 */
/* 0x004e0e0000000000 */
/*03c0*/ STG.E.64 [R10.64], R12 ; /* 0x0000000c0a007986 */
/* 0x0011e6000c101b04 */
/*03d0*/ @!P0 BRA 0x440 ; /* 0x0000006000008947 */
/* 0x000fea0003800000 */
/*03e0*/ IMAD R10, R7, c[0x0][0x168], R4 ; /* 0x00005a00070a7a24 */
/* 0x001fc800078e0204 */
/*03f0*/ IMAD.WIDE R10, R10, R17, c[0x0][0x160] ; /* 0x000058000a0a7625 */
/* 0x000fca00078e0211 */
/*0400*/ LDG.E.64 R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1b00 */
/*0410*/ IMAD.MOV.U32 R15, RZ, RZ, R8 ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e0008 */
/*0420*/ DMUL R12, R12, c[0x0][0x170] ; /* 0x00005c000c0c7a28 */
/* 0x004e0e0000000000 */
/*0430*/ STG.E.64 [R10.64], R12 ; /* 0x0000000c0a007986 */
/* 0x0011e8000c101b04 */
/*0440*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0450*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fda0003f06070 */
/*0460*/ @!P0 BRA 0x5d0 ; /* 0x0000016000008947 */
/* 0x000fea0003800000 */
/*0470*/ MOV R19, 0x8 ; /* 0x0000000800137802 */
/* 0x001fe20000000f00 */
/*0480*/ IMAD R18, R15, c[0x0][0x168], R4 ; /* 0x00005a000f127a24 */
/* 0x000fc800078e0204 */
/*0490*/ IMAD.WIDE R18, R18, R19, c[0x0][0x160] ; /* 0x0000580012127625 */
/* 0x000fca00078e0213 */
/*04a0*/ LDG.E.64 R10, [R18.64] ; /* 0x00000004120a7981 */
/* 0x001ea4000c1e1b00 */
/*04b0*/ DMUL R20, R10, c[0x0][0x170] ; /* 0x00005c000a147a28 */
/* 0x0040640000000000 */
/*04c0*/ IMAD.WIDE R10, R6, 0x8, R18 ; /* 0x00000008060a7825 */
/* 0x001fca00078e0212 */
/*04d0*/ STG.E.64 [R18.64], R20 ; /* 0x0000001412007986 */
/* 0x0021e8000c101b04 */
/*04e0*/ LDG.E.64 R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea4000c1e1b00 */
/*04f0*/ DMUL R22, R12, c[0x0][0x170] ; /* 0x00005c000c167a28 */
/* 0x0042a40000000000 */
/*0500*/ IMAD.WIDE R12, R6, 0x8, R10 ; /* 0x00000008060c7825 */
/* 0x002fca00078e020a */
/*0510*/ STG.E.64 [R10.64], R22 ; /* 0x000000160a007986 */
/* 0x0041e8000c101b04 */
/*0520*/ LDG.E.64 R16, [R12.64] ; /* 0x000000040c107981 */
/* 0x000ea4000c1e1b00 */
/*0530*/ DMUL R24, R16, c[0x0][0x170] ; /* 0x00005c0010187a28 */
/* 0x0042a40000000000 */
/*0540*/ IMAD.WIDE R16, R6, 0x8, R12 ; /* 0x0000000806107825 */
/* 0x002fca00078e020c */
/*0550*/ STG.E.64 [R12.64], R24 ; /* 0x000000180c007986 */
/* 0x0041e8000c101b04 */
/*0560*/ LDG.E.64 R26, [R16.64] ; /* 0x00000004101a7981 */
/* 0x000ea2000c1e1b00 */
/*0570*/ IADD3 R15, R0, R15, R0 ; /* 0x0000000f000f7210 */
/* 0x000fc80007ffe000 */
/*0580*/ IADD3 R15, R0, R15, R0 ; /* 0x0000000f000f7210 */
/* 0x000fc80007ffe000 */
/*0590*/ ISETP.GE.AND P0, PT, R15, c[0x0][0x168], PT ; /* 0x00005a000f007a0c */
/* 0x000fe20003f06270 */
/*05a0*/ DMUL R26, R26, c[0x0][0x170] ; /* 0x00005c001a1a7a28 */
/* 0x004e4e0000000000 */
/*05b0*/ STG.E.64 [R16.64], R26 ; /* 0x0000001a10007986 */
/* 0x0021ea000c101b04 */
/*05c0*/ @!P0 BRA 0x470 ; /* 0xfffffea000008947 */
/* 0x000fea000383ffff */
/*05d0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*05e0*/ MOV R11, c[0x0][0x0] ; /* 0x00000000000b7a02 */
/* 0x001fca0000000f00 */
/*05f0*/ IMAD R4, R11, c[0x0][0xc], R4 ; /* 0x000003000b047a24 */
/* 0x000fca00078e0204 */
/*0600*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x168], PT ; /* 0x00005a0004007a0c */
/* 0x000fda0003f06270 */
/*0610*/ @!P0 BRA 0x260 ; /* 0xfffffc4000008947 */
/* 0x000fea000383ffff */
/*0620*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0630*/ BRA 0x630; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0640*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0650*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0660*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0680*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0690*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
__global__ void kernel(double *a, int n, double k) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int i, j;
for(i = idx; i < n; i += offsetx)
for(j = idy; j < n; j += offsety)
a[j * n + i] *= k;
}
int main() {
int i, n = 1024;
double *a = (double*)malloc(sizeof(double) * n * n);
for(i = 0; i < n * n; i++)
a[i] = i;
double *dev_a;
cudaMalloc(&dev_a, sizeof(double) * n * n);
cudaMemcpy(dev_a, a, sizeof(double) * n * n, cudaMemcpyHostToDevice);
kernel<<<dim3(16, 16), dim3(32, 8)>>>(dev_a, n, 2.3);
kernel<<<dim3(16, 16), dim3(32, 8)>>>(dev_a, n, 2.3);
cudaMemcpy(a, dev_a, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
// a...
// for(i = 0; i < n * n; i++)
// printf("%f ", a[i]);
// printf("\n");
cudaFree(dev_a);
free(a);
return 0;
} | .file "tmpxft_000bded3_00000000-6_prof.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z6kernelPdidPdid
.type _Z27__device_stub__Z6kernelPdidPdid, @function
_Z27__device_stub__Z6kernelPdidPdid:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movsd %xmm0, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6kernelPdid(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z6kernelPdidPdid, .-_Z27__device_stub__Z6kernelPdidPdid
.globl _Z6kernelPdid
.type _Z6kernelPdid, @function
_Z6kernelPdid:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z6kernelPdidPdid
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6kernelPdid, .-_Z6kernelPdid
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $48, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $8388608, %edi
call malloc@PLT
movq %rax, %rbx
movl $0, %eax
.L12:
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
movsd %xmm0, (%rbx,%rax,8)
addq $1, %rax
cmpq $1048576, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $8388608, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $8388608, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $32, 28(%rsp)
movl $8, 32(%rsp)
movl $1, 36(%rsp)
movl $16, 16(%rsp)
movl $16, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L13:
movl $32, 28(%rsp)
movl $8, 32(%rsp)
movl $1, 36(%rsp)
movl $16, 16(%rsp)
movl $16, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L14:
movl $2, %ecx
movl $8388608, %edx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq %rbx, %rdi
call free@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
movsd .LC0(%rip), %xmm0
movl $1024, %esi
movq 8(%rsp), %rdi
call _Z27__device_stub__Z6kernelPdidPdid
jmp .L13
.L19:
movsd .LC0(%rip), %xmm0
movl $1024, %esi
movq 8(%rsp), %rdi
call _Z27__device_stub__Z6kernelPdidPdid
jmp .L14
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z6kernelPdid"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPdid(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 1717986918
.long 1073899110
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
__global__ void kernel(double *a, int n, double k) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int i, j;
for(i = idx; i < n; i += offsetx)
for(j = idy; j < n; j += offsety)
a[j * n + i] *= k;
}
int main() {
int i, n = 1024;
double *a = (double*)malloc(sizeof(double) * n * n);
for(i = 0; i < n * n; i++)
a[i] = i;
double *dev_a;
cudaMalloc(&dev_a, sizeof(double) * n * n);
cudaMemcpy(dev_a, a, sizeof(double) * n * n, cudaMemcpyHostToDevice);
kernel<<<dim3(16, 16), dim3(32, 8)>>>(dev_a, n, 2.3);
kernel<<<dim3(16, 16), dim3(32, 8)>>>(dev_a, n, 2.3);
cudaMemcpy(a, dev_a, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
// a...
// for(i = 0; i < n * n; i++)
// printf("%f ", a[i]);
// printf("\n");
cudaFree(dev_a);
free(a);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void kernel(double *a, int n, double k) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int i, j;
for(i = idx; i < n; i += offsetx)
for(j = idy; j < n; j += offsety)
a[j * n + i] *= k;
}
int main() {
int i, n = 1024;
double *a = (double*)malloc(sizeof(double) * n * n);
for(i = 0; i < n * n; i++)
a[i] = i;
double *dev_a;
hipMalloc(&dev_a, sizeof(double) * n * n);
hipMemcpy(dev_a, a, sizeof(double) * n * n, hipMemcpyHostToDevice);
kernel<<<dim3(16, 16), dim3(32, 8)>>>(dev_a, n, 2.3);
kernel<<<dim3(16, 16), dim3(32, 8)>>>(dev_a, n, 2.3);
hipMemcpy(a, dev_a, sizeof(double) * n * n, hipMemcpyDeviceToHost);
// a...
// for(i = 0; i < n * n; i++)
// printf("%f ", a[i]);
// printf("\n");
hipFree(dev_a);
free(a);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void kernel(double *a, int n, double k) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int i, j;
for(i = idx; i < n; i += offsetx)
for(j = idy; j < n; j += offsety)
a[j * n + i] *= k;
}
int main() {
int i, n = 1024;
double *a = (double*)malloc(sizeof(double) * n * n);
for(i = 0; i < n * n; i++)
a[i] = i;
double *dev_a;
hipMalloc(&dev_a, sizeof(double) * n * n);
hipMemcpy(dev_a, a, sizeof(double) * n * n, hipMemcpyHostToDevice);
kernel<<<dim3(16, 16), dim3(32, 8)>>>(dev_a, n, 2.3);
kernel<<<dim3(16, 16), dim3(32, 8)>>>(dev_a, n, 2.3);
hipMemcpy(a, dev_a, sizeof(double) * n * n, hipMemcpyDeviceToHost);
// a...
// for(i = 0; i < n * n; i++)
// printf("%f ", a[i]);
// printf("\n");
hipFree(dev_a);
free(a);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelPdid
.globl _Z6kernelPdid
.p2align 8
.type _Z6kernelPdid,@function
_Z6kernelPdid:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s6, s[0:1], 0x8
v_and_b32_e32 v2, 0x3ff, v0
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s7, s4, 0xffff
s_mov_b32 s4, exec_lo
s_mul_i32 s14, s14, s7
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v3, s14, v2
v_cmpx_gt_i32_e64 s6, v3
s_cbranch_execz .LBB0_6
s_clause 0x1
s_load_b32 s4, s[2:3], 0xc
s_load_b64 s[8:9], s[2:3], 0x0
v_bfe_u32 v4, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s10, s4, 16
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b64 s[4:5], s[0:1], 0x10
v_mad_u64_u32 v[0:1], null, s15, s10, v[4:5]
s_mul_i32 s1, s9, s10
s_mul_i32 s7, s8, s7
s_mul_i32 s8, s1, s6
s_mov_b32 s9, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_lo_u32 v1, s6, v0
v_cmp_gt_i32_e32 vcc_lo, s6, v0
v_add3_u32 v4, v2, v1, s14
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s10
v_add_nc_u32_e32 v3, s7, v3
v_add_nc_u32_e32 v4, s7, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_le_i32_e64 s0, s6, v3
s_or_b32 s9, s0, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execz .LBB0_6
.LBB0_3:
s_and_saveexec_b32 s10, vcc_lo
s_cbranch_execz .LBB0_2
v_mov_b32_e32 v1, v4
v_mov_b32_e32 v5, v0
s_mov_b32 s11, 0
.p2align 6
.LBB0_5:
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
v_add_nc_u32_e32 v5, s1, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[6:7], 3, v[1:2]
v_add_nc_u32_e32 v1, s8, v1
s_waitcnt lgkmcnt(0)
v_add_co_u32 v6, s0, s2, v6
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v7, s0, s3, v7, s0
v_cmp_le_i32_e64 s0, s6, v5
global_load_b64 v[8:9], v[6:7], off
s_or_b32 s11, s0, s11
s_waitcnt vmcnt(0)
v_mul_f64 v[8:9], v[8:9], s[4:5]
global_store_b64 v[6:7], v[8:9], off
s_and_not1_b32 exec_lo, exec_lo, s11
s_cbranch_execnz .LBB0_5
s_branch .LBB0_2
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelPdid
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelPdid, .Lfunc_end0-_Z6kernelPdid
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelPdid
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6kernelPdid.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void kernel(double *a, int n, double k) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int i, j;
for(i = idx; i < n; i += offsetx)
for(j = idy; j < n; j += offsety)
a[j * n + i] *= k;
}
int main() {
int i, n = 1024;
double *a = (double*)malloc(sizeof(double) * n * n);
for(i = 0; i < n * n; i++)
a[i] = i;
double *dev_a;
hipMalloc(&dev_a, sizeof(double) * n * n);
hipMemcpy(dev_a, a, sizeof(double) * n * n, hipMemcpyHostToDevice);
kernel<<<dim3(16, 16), dim3(32, 8)>>>(dev_a, n, 2.3);
kernel<<<dim3(16, 16), dim3(32, 8)>>>(dev_a, n, 2.3);
hipMemcpy(a, dev_a, sizeof(double) * n * n, hipMemcpyDeviceToHost);
// a...
// for(i = 0; i < n * n; i++)
// printf("%f ", a[i]);
// printf("\n");
hipFree(dev_a);
free(a);
return 0;
} | .text
.file "prof.hip"
.globl _Z21__device_stub__kernelPdid # -- Begin function _Z21__device_stub__kernelPdid
.p2align 4, 0x90
.type _Z21__device_stub__kernelPdid,@function
_Z21__device_stub__kernelPdid: # @_Z21__device_stub__kernelPdid
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movsd %xmm0, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelPdid, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPdid, .Lfunc_end0-_Z21__device_stub__kernelPdid
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $8388608, %edi # imm = 0x800000
callq malloc
movq %rax, %rbx
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
movsd %xmm0, (%rbx,%rax,8)
incq %rax
cmpq $1048576, %rax # imm = 0x100000
jne .LBB1_1
# %bb.2:
movabsq $68719476752, %r15 # imm = 0x1000000010
movabsq $34359738400, %r14 # imm = 0x800000020
leaq 8(%rsp), %rdi
movl $8388608, %esi # imm = 0x800000
callq hipMalloc
movq 8(%rsp), %rdi
movl $8388608, %edx # imm = 0x800000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
movl $1024, 4(%rsp) # imm = 0x400
movabsq $4612361558371493478, %rax # imm = 0x4002666666666666
movq %rax, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelPdid, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_6
# %bb.5:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
movl $1024, 4(%rsp) # imm = 0x400
movabsq $4612361558371493478, %rax # imm = 0x4002666666666666
movq %rax, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelPdid, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_6:
movq 8(%rsp), %rsi
movl $8388608, %edx # imm = 0x800000
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
xorl %eax, %eax
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPdid, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPdid,@object # @_Z6kernelPdid
.section .rodata,"a",@progbits
.globl _Z6kernelPdid
.p2align 3, 0x0
_Z6kernelPdid:
.quad _Z21__device_stub__kernelPdid
.size _Z6kernelPdid, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6kernelPdid"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPdid
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPdid
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6kernelPdid
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fe400078e0203 */
/*0040*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e260000002600 */
/*0050*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x168], PT ; /* 0x00005a0004007a0c */
/* 0x000fda0003f06270 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x4] ; /* 0x00000100ff007624 */
/* 0x000fe200078e00ff */
/*0080*/ S2R R8, SR_TID.Y ; /* 0x0000000000087919 */
/* 0x000e620000002200 */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00a0*/ IMAD R0, R0, c[0x0][0x10], RZ ; /* 0x0000040000007a24 */
/* 0x000fc800078e02ff */
/*00b0*/ I2F.U32.RP R2, R0 ; /* 0x0000000000027306 */
/* 0x000ea20000209000 */
/*00c0*/ IADD3 R11, RZ, -R0, RZ ; /* 0x80000000ff0b7210 */
/* 0x000fe40007ffe0ff */
/*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fca0003f45070 */
/*00e0*/ MUFU.RCP R2, R2 ; /* 0x0000000200027308 */
/* 0x004ea20000001000 */
/*00f0*/ IMAD R3, R3, c[0x0][0x4], R8 ; /* 0x0000010003037a24 */
/* 0x003fc800078e0208 */
/*0100*/ IMAD.IADD R5, R3, 0x1, R0 ; /* 0x0000000103057824 */
/* 0x000fca00078e0200 */
/*0110*/ LOP3.LUT R9, RZ, R5, RZ, 0x33, !PT ; /* 0x00000005ff097212 */
/* 0x000fc800078e33ff */
/*0120*/ IADD3 R9, R9, c[0x0][0x168], R0 ; /* 0x00005a0009097a10 */
/* 0x000fe40007ffe000 */
/*0130*/ IADD3 R6, R2, 0xffffffe, RZ ; /* 0x0ffffffe02067810 */
/* 0x004fc80007ffe0ff */
/*0140*/ F2I.FTZ.U32.TRUNC.NTZ R7, R6 ; /* 0x0000000600077305 */
/* 0x000064000021f000 */
/*0150*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */
/* 0x001fe200000001ff */
/*0160*/ IMAD R11, R11, R7, RZ ; /* 0x000000070b0b7224 */
/* 0x002fd200078e02ff */
/*0170*/ IMAD.HI.U32 R2, R7, R11, R6 ; /* 0x0000000b07027227 */
/* 0x000fc800078e0006 */
/*0180*/ IMAD.IADD R7, R0, 0x1, R5 ; /* 0x0000000100077824 */
/* 0x000fe400078e0205 */
/*0190*/ IMAD.HI.U32 R2, R2, R9, RZ ; /* 0x0000000902027227 */
/* 0x000fc600078e00ff */
/*01a0*/ IADD3 R8, R0, R7, RZ ; /* 0x0000000700087210 */
/* 0x000fe20007ffe0ff */
/*01b0*/ IMAD.MOV R6, RZ, RZ, -R2 ; /* 0x000000ffff067224 */
/* 0x000fc800078e0a02 */
/*01c0*/ IMAD R9, R0.reuse, R6, R9 ; /* 0x0000000600097224 */
/* 0x040fe400078e0209 */
/*01d0*/ IMAD R6, R0, c[0x0][0x168], RZ ; /* 0x00005a0000067a24 */
/* 0x000fc600078e02ff */
/*01e0*/ ISETP.GE.U32.AND P0, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x000fda0003f06070 */
/*01f0*/ @P0 IADD3 R9, -R0, R9, RZ ; /* 0x0000000900090210 */
/* 0x000fe40007ffe1ff */
/*0200*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */
/* 0x000fe40007ffe0ff */
/*0210*/ ISETP.GE.U32.AND P1, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x000fda0003f26070 */
/*0220*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */
/* 0x000fe40007ffe0ff */
/*0230*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */
/* 0x000fc800078e33ff */
/*0240*/ IADD3 R9, R2, 0x1, RZ ; /* 0x0000000102097810 */
/* 0x000fc80007ffe0ff */
/*0250*/ LOP3.LUT R9, R9, 0x3, RZ, 0xc0, !PT ; /* 0x0000000309097812 */
/* 0x000fe400078ec0ff */
/*0260*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x168], PT ; /* 0x00005a0003007a0c */
/* 0x000fe20003f06270 */
/*0270*/ BSSY B0, 0x5e0 ; /* 0x0000036000007945 */
/* 0x000fd80003800000 */
/*0280*/ @P0 BRA 0x5d0 ; /* 0x0000034000000947 */
/* 0x000fea0003800000 */
/*0290*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f05270 */
/*02a0*/ BSSY B1, 0x450 ; /* 0x000001a000017945 */
/* 0x000fe20003800000 */
/*02b0*/ IMAD.MOV.U32 R15, RZ, RZ, R3 ; /* 0x000000ffff0f7224 */
/* 0x000fd600078e0003 */
/*02c0*/ @!P0 BRA 0x440 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*02d0*/ HFMA2.MMA R17, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff117435 */
/* 0x000fe200000001ff */
/*02e0*/ IMAD R10, R3, c[0x0][0x168], R4 ; /* 0x00005a00030a7a24 */
/* 0x000fd200078e0204 */
/*02f0*/ IMAD.WIDE R10, R10, R17, c[0x0][0x160] ; /* 0x000058000a0a7625 */
/* 0x000fca00078e0211 */
/*0300*/ LDG.E.64 R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1b00 */
/*0310*/ ISETP.NE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */
/* 0x000fe20003f05270 */
/*0320*/ IMAD.MOV.U32 R15, RZ, RZ, R5 ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e0005 */
/*0330*/ DMUL R12, R12, c[0x0][0x170] ; /* 0x00005c000c0c7a28 */
/* 0x004e0e0000000000 */
/*0340*/ STG.E.64 [R10.64], R12 ; /* 0x0000000c0a007986 */
/* 0x0011e8000c101b04 */
/*0350*/ @!P0 BRA 0x440 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0360*/ IMAD R10, R5, c[0x0][0x168], R4 ; /* 0x00005a00050a7a24 */
/* 0x001fc800078e0204 */
/*0370*/ IMAD.WIDE R10, R10, R17, c[0x0][0x160] ; /* 0x000058000a0a7625 */
/* 0x000fca00078e0211 */
/*0380*/ LDG.E.64 R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1b00 */
/*0390*/ ISETP.NE.AND P0, PT, R9, 0x2, PT ; /* 0x000000020900780c */
/* 0x000fe40003f05270 */
/*03a0*/ MOV R15, R7 ; /* 0x00000007000f7202 */
/* 0x000fe20000000f00 */
/*03b0*/ DMUL R12, R12, c[0x0][0x170] ; /* 0x00005c000c0c7a28 */
/* 0x004e0e0000000000 */
/*03c0*/ STG.E.64 [R10.64], R12 ; /* 0x0000000c0a007986 */
/* 0x0011e6000c101b04 */
/*03d0*/ @!P0 BRA 0x440 ; /* 0x0000006000008947 */
/* 0x000fea0003800000 */
/*03e0*/ IMAD R10, R7, c[0x0][0x168], R4 ; /* 0x00005a00070a7a24 */
/* 0x001fc800078e0204 */
/*03f0*/ IMAD.WIDE R10, R10, R17, c[0x0][0x160] ; /* 0x000058000a0a7625 */
/* 0x000fca00078e0211 */
/*0400*/ LDG.E.64 R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1b00 */
/*0410*/ IMAD.MOV.U32 R15, RZ, RZ, R8 ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e0008 */
/*0420*/ DMUL R12, R12, c[0x0][0x170] ; /* 0x00005c000c0c7a28 */
/* 0x004e0e0000000000 */
/*0430*/ STG.E.64 [R10.64], R12 ; /* 0x0000000c0a007986 */
/* 0x0011e8000c101b04 */
/*0440*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0450*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fda0003f06070 */
/*0460*/ @!P0 BRA 0x5d0 ; /* 0x0000016000008947 */
/* 0x000fea0003800000 */
/*0470*/ MOV R19, 0x8 ; /* 0x0000000800137802 */
/* 0x001fe20000000f00 */
/*0480*/ IMAD R18, R15, c[0x0][0x168], R4 ; /* 0x00005a000f127a24 */
/* 0x000fc800078e0204 */
/*0490*/ IMAD.WIDE R18, R18, R19, c[0x0][0x160] ; /* 0x0000580012127625 */
/* 0x000fca00078e0213 */
/*04a0*/ LDG.E.64 R10, [R18.64] ; /* 0x00000004120a7981 */
/* 0x001ea4000c1e1b00 */
/*04b0*/ DMUL R20, R10, c[0x0][0x170] ; /* 0x00005c000a147a28 */
/* 0x0040640000000000 */
/*04c0*/ IMAD.WIDE R10, R6, 0x8, R18 ; /* 0x00000008060a7825 */
/* 0x001fca00078e0212 */
/*04d0*/ STG.E.64 [R18.64], R20 ; /* 0x0000001412007986 */
/* 0x0021e8000c101b04 */
/*04e0*/ LDG.E.64 R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea4000c1e1b00 */
/*04f0*/ DMUL R22, R12, c[0x0][0x170] ; /* 0x00005c000c167a28 */
/* 0x0042a40000000000 */
/*0500*/ IMAD.WIDE R12, R6, 0x8, R10 ; /* 0x00000008060c7825 */
/* 0x002fca00078e020a */
/*0510*/ STG.E.64 [R10.64], R22 ; /* 0x000000160a007986 */
/* 0x0041e8000c101b04 */
/*0520*/ LDG.E.64 R16, [R12.64] ; /* 0x000000040c107981 */
/* 0x000ea4000c1e1b00 */
/*0530*/ DMUL R24, R16, c[0x0][0x170] ; /* 0x00005c0010187a28 */
/* 0x0042a40000000000 */
/*0540*/ IMAD.WIDE R16, R6, 0x8, R12 ; /* 0x0000000806107825 */
/* 0x002fca00078e020c */
/*0550*/ STG.E.64 [R12.64], R24 ; /* 0x000000180c007986 */
/* 0x0041e8000c101b04 */
/*0560*/ LDG.E.64 R26, [R16.64] ; /* 0x00000004101a7981 */
/* 0x000ea2000c1e1b00 */
/*0570*/ IADD3 R15, R0, R15, R0 ; /* 0x0000000f000f7210 */
/* 0x000fc80007ffe000 */
/*0580*/ IADD3 R15, R0, R15, R0 ; /* 0x0000000f000f7210 */
/* 0x000fc80007ffe000 */
/*0590*/ ISETP.GE.AND P0, PT, R15, c[0x0][0x168], PT ; /* 0x00005a000f007a0c */
/* 0x000fe20003f06270 */
/*05a0*/ DMUL R26, R26, c[0x0][0x170] ; /* 0x00005c001a1a7a28 */
/* 0x004e4e0000000000 */
/*05b0*/ STG.E.64 [R16.64], R26 ; /* 0x0000001a10007986 */
/* 0x0021ea000c101b04 */
/*05c0*/ @!P0 BRA 0x470 ; /* 0xfffffea000008947 */
/* 0x000fea000383ffff */
/*05d0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*05e0*/ MOV R11, c[0x0][0x0] ; /* 0x00000000000b7a02 */
/* 0x001fca0000000f00 */
/*05f0*/ IMAD R4, R11, c[0x0][0xc], R4 ; /* 0x000003000b047a24 */
/* 0x000fca00078e0204 */
/*0600*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x168], PT ; /* 0x00005a0004007a0c */
/* 0x000fda0003f06270 */
/*0610*/ @!P0 BRA 0x260 ; /* 0xfffffc4000008947 */
/* 0x000fea000383ffff */
/*0620*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0630*/ BRA 0x630; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0640*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0650*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0660*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0680*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0690*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelPdid
.globl _Z6kernelPdid
.p2align 8
.type _Z6kernelPdid,@function
_Z6kernelPdid:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s6, s[0:1], 0x8
v_and_b32_e32 v2, 0x3ff, v0
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s7, s4, 0xffff
s_mov_b32 s4, exec_lo
s_mul_i32 s14, s14, s7
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v3, s14, v2
v_cmpx_gt_i32_e64 s6, v3
s_cbranch_execz .LBB0_6
s_clause 0x1
s_load_b32 s4, s[2:3], 0xc
s_load_b64 s[8:9], s[2:3], 0x0
v_bfe_u32 v4, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s10, s4, 16
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b64 s[4:5], s[0:1], 0x10
v_mad_u64_u32 v[0:1], null, s15, s10, v[4:5]
s_mul_i32 s1, s9, s10
s_mul_i32 s7, s8, s7
s_mul_i32 s8, s1, s6
s_mov_b32 s9, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_lo_u32 v1, s6, v0
v_cmp_gt_i32_e32 vcc_lo, s6, v0
v_add3_u32 v4, v2, v1, s14
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s10
v_add_nc_u32_e32 v3, s7, v3
v_add_nc_u32_e32 v4, s7, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_le_i32_e64 s0, s6, v3
s_or_b32 s9, s0, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execz .LBB0_6
.LBB0_3:
s_and_saveexec_b32 s10, vcc_lo
s_cbranch_execz .LBB0_2
v_mov_b32_e32 v1, v4
v_mov_b32_e32 v5, v0
s_mov_b32 s11, 0
.p2align 6
.LBB0_5:
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
v_add_nc_u32_e32 v5, s1, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[6:7], 3, v[1:2]
v_add_nc_u32_e32 v1, s8, v1
s_waitcnt lgkmcnt(0)
v_add_co_u32 v6, s0, s2, v6
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v7, s0, s3, v7, s0
v_cmp_le_i32_e64 s0, s6, v5
global_load_b64 v[8:9], v[6:7], off
s_or_b32 s11, s0, s11
s_waitcnt vmcnt(0)
v_mul_f64 v[8:9], v[8:9], s[4:5]
global_store_b64 v[6:7], v[8:9], off
s_and_not1_b32 exec_lo, exec_lo, s11
s_cbranch_execnz .LBB0_5
s_branch .LBB0_2
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelPdid
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelPdid, .Lfunc_end0-_Z6kernelPdid
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelPdid
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6kernelPdid.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000bded3_00000000-6_prof.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z6kernelPdidPdid
.type _Z27__device_stub__Z6kernelPdidPdid, @function
_Z27__device_stub__Z6kernelPdidPdid:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movsd %xmm0, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6kernelPdid(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z6kernelPdidPdid, .-_Z27__device_stub__Z6kernelPdidPdid
.globl _Z6kernelPdid
.type _Z6kernelPdid, @function
_Z6kernelPdid:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z6kernelPdidPdid
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6kernelPdid, .-_Z6kernelPdid
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $48, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $8388608, %edi
call malloc@PLT
movq %rax, %rbx
movl $0, %eax
.L12:
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
movsd %xmm0, (%rbx,%rax,8)
addq $1, %rax
cmpq $1048576, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $8388608, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $8388608, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $32, 28(%rsp)
movl $8, 32(%rsp)
movl $1, 36(%rsp)
movl $16, 16(%rsp)
movl $16, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L13:
movl $32, 28(%rsp)
movl $8, 32(%rsp)
movl $1, 36(%rsp)
movl $16, 16(%rsp)
movl $16, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L14:
movl $2, %ecx
movl $8388608, %edx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq %rbx, %rdi
call free@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
movsd .LC0(%rip), %xmm0
movl $1024, %esi
movq 8(%rsp), %rdi
call _Z27__device_stub__Z6kernelPdidPdid
jmp .L13
.L19:
movsd .LC0(%rip), %xmm0
movl $1024, %esi
movq 8(%rsp), %rdi
call _Z27__device_stub__Z6kernelPdidPdid
jmp .L14
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z6kernelPdid"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPdid(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 1717986918
.long 1073899110
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "prof.hip"
.globl _Z21__device_stub__kernelPdid # -- Begin function _Z21__device_stub__kernelPdid
.p2align 4, 0x90
.type _Z21__device_stub__kernelPdid,@function
_Z21__device_stub__kernelPdid: # @_Z21__device_stub__kernelPdid
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movsd %xmm0, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelPdid, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPdid, .Lfunc_end0-_Z21__device_stub__kernelPdid
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $8388608, %edi # imm = 0x800000
callq malloc
movq %rax, %rbx
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
movsd %xmm0, (%rbx,%rax,8)
incq %rax
cmpq $1048576, %rax # imm = 0x100000
jne .LBB1_1
# %bb.2:
movabsq $68719476752, %r15 # imm = 0x1000000010
movabsq $34359738400, %r14 # imm = 0x800000020
leaq 8(%rsp), %rdi
movl $8388608, %esi # imm = 0x800000
callq hipMalloc
movq 8(%rsp), %rdi
movl $8388608, %edx # imm = 0x800000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
movl $1024, 4(%rsp) # imm = 0x400
movabsq $4612361558371493478, %rax # imm = 0x4002666666666666
movq %rax, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelPdid, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_6
# %bb.5:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
movl $1024, 4(%rsp) # imm = 0x400
movabsq $4612361558371493478, %rax # imm = 0x4002666666666666
movq %rax, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelPdid, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_6:
movq 8(%rsp), %rsi
movl $8388608, %edx # imm = 0x800000
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
xorl %eax, %eax
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPdid, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPdid,@object # @_Z6kernelPdid
.section .rodata,"a",@progbits
.globl _Z6kernelPdid
.p2align 3, 0x0
_Z6kernelPdid:
.quad _Z21__device_stub__kernelPdid
.size _Z6kernelPdid, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6kernelPdid"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPdid
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPdid
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
Group info:
hkhetaw Harsh Khetawat
asiddiq Anas Siddiqui
rkrish11 Rahul Krishna
*/
#include <math.h>
/* floating point precision type definitions */
typedef double FP_PREC;
//returns the function y(x) = fn
FP_PREC fn(FP_PREC x)
{
return x*x;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
Group info:
hkhetaw Harsh Khetawat
asiddiq Anas Siddiqui
rkrish11 Rahul Krishna
*/
#include <math.h>
/* floating point precision type definitions */
typedef double FP_PREC;
//returns the function y(x) = fn
FP_PREC fn(FP_PREC x)
{
return x*x;
} | .file "tmpxft_0016381e_00000000-6_p2_func.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z2fnd
.type _Z2fnd, @function
_Z2fnd:
.LFB2027:
.cfi_startproc
endbr64
mulsd %xmm0, %xmm0
ret
.cfi_endproc
.LFE2027:
.size _Z2fnd, .-_Z2fnd
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
Group info:
hkhetaw Harsh Khetawat
asiddiq Anas Siddiqui
rkrish11 Rahul Krishna
*/
#include <math.h>
/* floating point precision type definitions */
typedef double FP_PREC;
//returns the function y(x) = fn
FP_PREC fn(FP_PREC x)
{
return x*x;
} | /*
Group info:
hkhetaw Harsh Khetawat
asiddiq Anas Siddiqui
rkrish11 Rahul Krishna
*/
#include <hip/hip_runtime.h>
#include <math.h>
/* floating point precision type definitions */
typedef double FP_PREC;
//returns the function y(x) = fn
FP_PREC fn(FP_PREC x)
{
return x*x;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
Group info:
hkhetaw Harsh Khetawat
asiddiq Anas Siddiqui
rkrish11 Rahul Krishna
*/
#include <hip/hip_runtime.h>
#include <math.h>
/* floating point precision type definitions */
typedef double FP_PREC;
//returns the function y(x) = fn
FP_PREC fn(FP_PREC x)
{
return x*x;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
Group info:
hkhetaw Harsh Khetawat
asiddiq Anas Siddiqui
rkrish11 Rahul Krishna
*/
#include <hip/hip_runtime.h>
#include <math.h>
/* floating point precision type definitions */
typedef double FP_PREC;
//returns the function y(x) = fn
FP_PREC fn(FP_PREC x)
{
return x*x;
} | .text
.file "p2_func.hip"
.globl _Z2fnd # -- Begin function _Z2fnd
.p2align 4, 0x90
.type _Z2fnd,@function
_Z2fnd: # @_Z2fnd
.cfi_startproc
# %bb.0:
mulsd %xmm0, %xmm0
retq
.Lfunc_end0:
.size _Z2fnd, .Lfunc_end0-_Z2fnd
.cfi_endproc
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0016381e_00000000-6_p2_func.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z2fnd
.type _Z2fnd, @function
_Z2fnd:
.LFB2027:
.cfi_startproc
endbr64
mulsd %xmm0, %xmm0
ret
.cfi_endproc
.LFE2027:
.size _Z2fnd, .-_Z2fnd
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "p2_func.hip"
.globl _Z2fnd # -- Begin function _Z2fnd
.p2align 4, 0x90
.type _Z2fnd,@function
_Z2fnd: # @_Z2fnd
.cfi_startproc
# %bb.0:
mulsd %xmm0, %xmm0
retq
.Lfunc_end0:
.size _Z2fnd, .Lfunc_end0-_Z2fnd
.cfi_endproc
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | __global__
void neg_kernel(int n, const float *x, float *z)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) z[i] = -x[i];
}
void neg(int n, const float *x, float *z) {
neg_kernel<<<(n+255)/256, 256>>>(n, x, z);
} | code for sm_80
Function : _Z10neg_kerneliPKfPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fcc00078e0205 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fc800078e0205 */
/*00b0*/ FADD R7, -R2, -RZ ; /* 0x800000ff02077221 */
/* 0x004fca0000000100 */
/*00c0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | __global__
void neg_kernel(int n, const float *x, float *z)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) z[i] = -x[i];
}
void neg(int n, const float *x, float *z) {
neg_kernel<<<(n+255)/256, 256>>>(n, x, z);
} | .file "tmpxft_0003222d_00000000-6_neg.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf
.type _Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf, @function
_Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf:
.LFB2052:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10neg_kerneliPKfPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf, .-_Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf
.globl _Z10neg_kerneliPKfPf
.type _Z10neg_kerneliPKfPf, @function
_Z10neg_kerneliPKfPf:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z10neg_kerneliPKfPf, .-_Z10neg_kerneliPKfPf
.globl _Z3negiPKfPf
.type _Z3negiPKfPf, @function
_Z3negiPKfPf:
.LFB2027:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movl %edi, %ebx
movq %rsi, %rbp
movq %rdx, %r12
movl $256, 20(%rsp)
movl $1, 24(%rsp)
leal 510(%rdi), %eax
movl %edi, %edx
addl $255, %edx
cmovns %edx, %eax
sarl $8, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movq %r12, %rdx
movq %rbp, %rsi
movl %ebx, %edi
call _Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf
jmp .L11
.cfi_endproc
.LFE2027:
.size _Z3negiPKfPf, .-_Z3negiPKfPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10neg_kerneliPKfPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10neg_kerneliPKfPf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __global__
void neg_kernel(int n, const float *x, float *z)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) z[i] = -x[i];
}
void neg(int n, const float *x, float *z) {
neg_kernel<<<(n+255)/256, 256>>>(n, x, z);
} | #include <hip/hip_runtime.h>
__global__
void neg_kernel(int n, const float *x, float *z)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) z[i] = -x[i];
}
void neg(int n, const float *x, float *z) {
neg_kernel<<<(n+255)/256, 256>>>(n, x, z);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__
void neg_kernel(int n, const float *x, float *z)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) z[i] = -x[i];
}
void neg(int n, const float *x, float *z) {
neg_kernel<<<(n+255)/256, 256>>>(n, x, z);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10neg_kerneliPKfPf
.globl _Z10neg_kerneliPKfPf
.p2align 8
.type _Z10neg_kerneliPKfPf,@function
_Z10neg_kerneliPKfPf:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_xor_b32_e32 v2, 0x80000000, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10neg_kerneliPKfPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10neg_kerneliPKfPf, .Lfunc_end0-_Z10neg_kerneliPKfPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10neg_kerneliPKfPf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10neg_kerneliPKfPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__
void neg_kernel(int n, const float *x, float *z)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) z[i] = -x[i];
}
void neg(int n, const float *x, float *z) {
neg_kernel<<<(n+255)/256, 256>>>(n, x, z);
} | .text
.file "neg.hip"
.globl _Z25__device_stub__neg_kerneliPKfPf # -- Begin function _Z25__device_stub__neg_kerneliPKfPf
.p2align 4, 0x90
.type _Z25__device_stub__neg_kerneliPKfPf,@function
_Z25__device_stub__neg_kerneliPKfPf: # @_Z25__device_stub__neg_kerneliPKfPf
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10neg_kerneliPKfPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z25__device_stub__neg_kerneliPKfPf, .Lfunc_end0-_Z25__device_stub__neg_kerneliPKfPf
.cfi_endproc
# -- End function
.globl _Z3negiPKfPf # -- Begin function _Z3negiPKfPf
.p2align 4, 0x90
.type _Z3negiPKfPf,@function
_Z3negiPKfPf: # @_Z3negiPKfPf
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdx, %rbx
movq %rsi, %r14
movl %edi, %r15d
leal 255(%r15), %eax
leal 510(%r15), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $8, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $256, %rdx # imm = 0x100
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movl %r15d, 12(%rsp)
movq %r14, 72(%rsp)
movq %rbx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10neg_kerneliPKfPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z3negiPKfPf, .Lfunc_end1-_Z3negiPKfPf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10neg_kerneliPKfPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10neg_kerneliPKfPf,@object # @_Z10neg_kerneliPKfPf
.section .rodata,"a",@progbits
.globl _Z10neg_kerneliPKfPf
.p2align 3, 0x0
_Z10neg_kerneliPKfPf:
.quad _Z25__device_stub__neg_kerneliPKfPf
.size _Z10neg_kerneliPKfPf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10neg_kerneliPKfPf"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__neg_kerneliPKfPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10neg_kerneliPKfPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10neg_kerneliPKfPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fcc00078e0205 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fc800078e0205 */
/*00b0*/ FADD R7, -R2, -RZ ; /* 0x800000ff02077221 */
/* 0x004fca0000000100 */
/*00c0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10neg_kerneliPKfPf
.globl _Z10neg_kerneliPKfPf
.p2align 8
.type _Z10neg_kerneliPKfPf,@function
_Z10neg_kerneliPKfPf:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_xor_b32_e32 v2, 0x80000000, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10neg_kerneliPKfPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10neg_kerneliPKfPf, .Lfunc_end0-_Z10neg_kerneliPKfPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10neg_kerneliPKfPf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10neg_kerneliPKfPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0003222d_00000000-6_neg.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf
.type _Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf, @function
_Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf:
.LFB2052:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10neg_kerneliPKfPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf, .-_Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf
.globl _Z10neg_kerneliPKfPf
.type _Z10neg_kerneliPKfPf, @function
_Z10neg_kerneliPKfPf:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z10neg_kerneliPKfPf, .-_Z10neg_kerneliPKfPf
.globl _Z3negiPKfPf
.type _Z3negiPKfPf, @function
_Z3negiPKfPf:
.LFB2027:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movl %edi, %ebx
movq %rsi, %rbp
movq %rdx, %r12
movl $256, 20(%rsp)
movl $1, 24(%rsp)
leal 510(%rdi), %eax
movl %edi, %edx
addl $255, %edx
cmovns %edx, %eax
sarl $8, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movq %r12, %rdx
movq %rbp, %rsi
movl %ebx, %edi
call _Z34__device_stub__Z10neg_kerneliPKfPfiPKfPf
jmp .L11
.cfi_endproc
.LFE2027:
.size _Z3negiPKfPf, .-_Z3negiPKfPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10neg_kerneliPKfPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10neg_kerneliPKfPf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "neg.hip"
.globl _Z25__device_stub__neg_kerneliPKfPf # -- Begin function _Z25__device_stub__neg_kerneliPKfPf
.p2align 4, 0x90
.type _Z25__device_stub__neg_kerneliPKfPf,@function
_Z25__device_stub__neg_kerneliPKfPf: # @_Z25__device_stub__neg_kerneliPKfPf
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10neg_kerneliPKfPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z25__device_stub__neg_kerneliPKfPf, .Lfunc_end0-_Z25__device_stub__neg_kerneliPKfPf
.cfi_endproc
# -- End function
.globl _Z3negiPKfPf # -- Begin function _Z3negiPKfPf
.p2align 4, 0x90
.type _Z3negiPKfPf,@function
_Z3negiPKfPf: # @_Z3negiPKfPf
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdx, %rbx
movq %rsi, %r14
movl %edi, %r15d
leal 255(%r15), %eax
leal 510(%r15), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $8, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $256, %rdx # imm = 0x100
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movl %r15d, 12(%rsp)
movq %r14, 72(%rsp)
movq %rbx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10neg_kerneliPKfPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z3negiPKfPf, .Lfunc_end1-_Z3negiPKfPf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10neg_kerneliPKfPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10neg_kerneliPKfPf,@object # @_Z10neg_kerneliPKfPf
.section .rodata,"a",@progbits
.globl _Z10neg_kerneliPKfPf
.p2align 3, 0x0
_Z10neg_kerneliPKfPf:
.quad _Z25__device_stub__neg_kerneliPKfPf
.size _Z10neg_kerneliPKfPf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10neg_kerneliPKfPf"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__neg_kerneliPKfPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10neg_kerneliPKfPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
// filename: vsquare.cu
// a simple CUDA kernel to element multiply vector with itself
extern "C" // ensure function name to be exactly "vsquare"
{
}
__global__ void expkernel(const int lengthA, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
b[i] = exp(a[i]);
}
} | code for sm_80
Function : _Z9expkerneliPKdPd
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R0, R3, c[0x0][0x0], R0 ; /* 0x0000000003007a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0203 */
/*0090*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*00a0*/ IMAD.MOV.U32 R4, RZ, RZ, 0x652b82fe ; /* 0x652b82feff047424 */
/* 0x000fe200078e00ff */
/*00b0*/ MOV R11, 0x3e5ade15 ; /* 0x3e5ade15000b7802 */
/* 0x000fe20000000f00 */
/*00c0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x3ff71547 ; /* 0x3ff71547ff057424 */
/* 0x000fe200078e00ff */
/*00d0*/ BSSY B0, 0x320 ; /* 0x0000024000007945 */
/* 0x000fe20003800000 */
/*00e0*/ IMAD.MOV.U32 R10, RZ, RZ, 0x69ce2bdf ; /* 0x69ce2bdfff0a7424 */
/* 0x000fc800078e00ff */
/*00f0*/ DFMA R4, R2, R4, 6.75539944105574400000e+15 ; /* 0x433800000204742b */
/* 0x004e220000000004 */
/*0100*/ FSETP.GEU.AND P0, PT, |R3|, 4.1917929649353027344, PT ; /* 0x4086232b0300780b */
/* 0x000fca0003f0e200 */
/*0110*/ DADD R6, R4, -6.75539944105574400000e+15 ; /* 0xc338000004067429 */
/* 0x001e0c0000000000 */
/*0120*/ DFMA R8, R6, c[0x2][0x0], R2 ; /* 0x0080000006087a2b */
/* 0x001e0c0000000002 */
/*0130*/ DFMA R6, R6, c[0x2][0x8], R8 ; /* 0x0080020006067a2b */
/* 0x001e0c0000000008 */
/*0140*/ DFMA R8, R6, R10, c[0x2][0x10] ; /* 0x008004000608762b */
/* 0x001064000000000a */
/*0150*/ LEA R10, P1, R0, c[0x0][0x170], 0x3 ; /* 0x00005c00000a7a11 */
/* 0x001fc800078218ff */
/*0160*/ DFMA R8, R6, R8, c[0x2][0x18] ; /* 0x008006000608762b */
/* 0x002e0c0000000008 */
/*0170*/ DFMA R8, R6, R8, c[0x2][0x20] ; /* 0x008008000608762b */
/* 0x001e0c0000000008 */
/*0180*/ DFMA R8, R6, R8, c[0x2][0x28] ; /* 0x00800a000608762b */
/* 0x001e0c0000000008 */
/*0190*/ DFMA R8, R6, R8, c[0x2][0x30] ; /* 0x00800c000608762b */
/* 0x001e0c0000000008 */
/*01a0*/ DFMA R8, R6, R8, c[0x2][0x38] ; /* 0x00800e000608762b */
/* 0x001e0c0000000008 */
/*01b0*/ DFMA R8, R6, R8, c[0x2][0x40] ; /* 0x008010000608762b */
/* 0x001e0c0000000008 */
/*01c0*/ DFMA R8, R6, R8, c[0x2][0x48] ; /* 0x008012000608762b */
/* 0x001e0c0000000008 */
/*01d0*/ DFMA R8, R6, R8, c[0x2][0x50] ; /* 0x008014000608762b */
/* 0x001e0c0000000008 */
/*01e0*/ DFMA R8, R6, R8, 1 ; /* 0x3ff000000608742b */
/* 0x001e0c0000000008 */
/*01f0*/ DFMA R8, R6, R8, 1 ; /* 0x3ff000000608742b */
/* 0x0010640000000008 */
/*0200*/ SHF.R.S32.HI R7, RZ, 0x1f, R0 ; /* 0x0000001fff077819 */
/* 0x001fc80000011400 */
/*0210*/ LEA.HI.X R11, R0, c[0x0][0x174], R7, 0x3, P1 ; /* 0x00005d00000b7a11 */
/* 0x000fc800008f1c07 */
/*0220*/ IMAD R7, R4, 0x100000, R9 ; /* 0x0010000004077824 */
/* 0x002fe400078e0209 */
/*0230*/ IMAD.MOV.U32 R6, RZ, RZ, R8 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0008 */
/*0240*/ @!P0 BRA 0x310 ; /* 0x000000c000008947 */
/* 0x000fea0003800000 */
/*0250*/ FSETP.GEU.AND P1, PT, |R3|, 4.2275390625, PT ; /* 0x408748000300780b */
/* 0x000fe20003f2e200 */
/*0260*/ DADD R6, R2, +INF ; /* 0x7ff0000002067429 */
/* 0x000fc80000000000 */
/*0270*/ DSETP.GEU.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200722a */
/* 0x000e0c0003f0e000 */
/*0280*/ FSEL R6, R6, RZ, P0 ; /* 0x000000ff06067208 */
/* 0x001fe40000000000 */
/*0290*/ @!P1 LEA.HI R0, R4, R4, RZ, 0x1 ; /* 0x0000000404009211 */
/* 0x000fe400078f08ff */
/*02a0*/ FSEL R7, R7, RZ, P0 ; /* 0x000000ff07077208 */
/* 0x000fe40000000000 */
/*02b0*/ @!P1 SHF.R.S32.HI R3, RZ, 0x1, R0 ; /* 0x00000001ff039819 */
/* 0x000fc80000011400 */
/*02c0*/ @!P1 LEA R9, R3, R9, 0x14 ; /* 0x0000000903099211 */
/* 0x000fe200078ea0ff */
/*02d0*/ @!P1 IMAD.IADD R2, R4, 0x1, -R3 ; /* 0x0000000104029824 */
/* 0x000fca00078e0a03 */
/*02e0*/ @!P1 LEA R3, R2, 0x3ff00000, 0x14 ; /* 0x3ff0000002039811 */
/* 0x000fe200078ea0ff */
/*02f0*/ @!P1 IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff029224 */
/* 0x000fcc00078e00ff */
/*0300*/ @!P1 DMUL R6, R8, R2 ; /* 0x0000000208069228 */
/* 0x00004c0000000000 */
/*0310*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0320*/ STG.E.64 [R10.64], R6 ; /* 0x000000060a007986 */
/* 0x002fe2000c101b04 */
/*0330*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0340*/ BRA 0x340; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
// filename: vsquare.cu
// a simple CUDA kernel to element multiply vector with itself
extern "C" // ensure function name to be exactly "vsquare"
{
}
__global__ void expkernel(const int lengthA, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
b[i] = exp(a[i]);
}
} | .file "tmpxft_000d34c3_00000000-6_expkernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9expkerneliPKdPdiPKdPd
.type _Z32__device_stub__Z9expkerneliPKdPdiPKdPd, @function
_Z32__device_stub__Z9expkerneliPKdPdiPKdPd:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9expkerneliPKdPd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z9expkerneliPKdPdiPKdPd, .-_Z32__device_stub__Z9expkerneliPKdPdiPKdPd
.globl _Z9expkerneliPKdPd
.type _Z9expkerneliPKdPd, @function
_Z9expkerneliPKdPd:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9expkerneliPKdPdiPKdPd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9expkerneliPKdPd, .-_Z9expkerneliPKdPd
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9expkerneliPKdPd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9expkerneliPKdPd(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
// filename: vsquare.cu
// a simple CUDA kernel to element multiply vector with itself
extern "C" // ensure function name to be exactly "vsquare"
{
}
__global__ void expkernel(const int lengthA, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
b[i] = exp(a[i]);
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
// filename: vsquare.cu
// a simple CUDA kernel to element multiply vector with itself
extern "C" // ensure function name to be exactly "vsquare"
{
}
__global__ void expkernel(const int lengthA, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
b[i] = exp(a[i]);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
// filename: vsquare.cu
// a simple CUDA kernel to element multiply vector with itself
extern "C" // ensure function name to be exactly "vsquare"
{
}
__global__ void expkernel(const int lengthA, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
b[i] = exp(a[i]);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9expkerneliPKdPd
.globl _Z9expkerneliPKdPd
.p2align 8
.type _Z9expkerneliPKdPd,@function
_Z9expkerneliPKdPd:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s5, 0x3e5ade15
s_mov_b32 s4, 0x6a5dcb37
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
s_mov_b32 s1, 0x3ff71547
s_mov_b32 s0, 0x652b82fe
global_load_b64 v[2:3], v[2:3], off
s_waitcnt vmcnt(0)
v_mul_f64 v[4:5], v[2:3], s[0:1]
s_mov_b32 s1, 0xbfe62e42
s_mov_b32 s0, 0xfefa39ef
v_cmp_nlt_f64_e32 vcc_lo, 0x40900000, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rndne_f64_e32 v[4:5], v[4:5]
v_fma_f64 v[6:7], v[4:5], s[0:1], v[2:3]
s_mov_b32 s1, 0xbc7abc9e
s_mov_b32 s0, 0x3b39803f
v_cvt_i32_f64_e32 v10, v[4:5]
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[6:7], v[4:5], s[0:1], v[6:7]
s_mov_b32 s1, 0x3e928af3
s_mov_b32 s0, 0xfca7ab0c
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], s[4:5], s[0:1]
s_mov_b32 s1, 0x3ec71dee
s_mov_b32 s0, 0x623fde64
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3efa0199
s_mov_b32 s0, 0x7c89e6b0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3f2a01a0
s_mov_b32 s0, 0x14761f6e
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3f56c16c
s_mov_b32 s0, 0x1852b7b0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3f811111
s_mov_b32 s0, 0x11122322
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3fa55555
s_mov_b32 s0, 0x555502a1
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3fc55555
s_mov_b32 s0, 0x55555511
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3fe00000
s_mov_b32 s0, 11
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
v_cmp_ngt_f64_e64 s0, 0xc090cc00, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], 1.0
v_fma_f64 v[4:5], v[6:7], v[8:9], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ldexp_f64 v[4:5], v[4:5], v10
v_cndmask_b32_e32 v5, 0x7ff00000, v5, vcc_lo
s_and_b32 vcc_lo, s0, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v2, 0, v4, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_cndmask_b32_e64 v3, 0, v5, s0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b64 v[0:1], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9expkerneliPKdPd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9expkerneliPKdPd, .Lfunc_end0-_Z9expkerneliPKdPd
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9expkerneliPKdPd
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9expkerneliPKdPd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
// filename: vsquare.cu
// a simple CUDA kernel to element multiply vector with itself
extern "C" // ensure function name to be exactly "vsquare"
{
}
__global__ void expkernel(const int lengthA, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
b[i] = exp(a[i]);
}
} | .text
.file "expkernel.hip"
.globl _Z24__device_stub__expkerneliPKdPd # -- Begin function _Z24__device_stub__expkerneliPKdPd
.p2align 4, 0x90
.type _Z24__device_stub__expkerneliPKdPd,@function
_Z24__device_stub__expkerneliPKdPd: # @_Z24__device_stub__expkerneliPKdPd
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9expkerneliPKdPd, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub__expkerneliPKdPd, .Lfunc_end0-_Z24__device_stub__expkerneliPKdPd
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9expkerneliPKdPd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9expkerneliPKdPd,@object # @_Z9expkerneliPKdPd
.section .rodata,"a",@progbits
.globl _Z9expkerneliPKdPd
.p2align 3, 0x0
_Z9expkerneliPKdPd:
.quad _Z24__device_stub__expkerneliPKdPd
.size _Z9expkerneliPKdPd, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9expkerneliPKdPd"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__expkerneliPKdPd
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9expkerneliPKdPd
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9expkerneliPKdPd
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R0, R3, c[0x0][0x0], R0 ; /* 0x0000000003007a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0203 */
/*0090*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*00a0*/ IMAD.MOV.U32 R4, RZ, RZ, 0x652b82fe ; /* 0x652b82feff047424 */
/* 0x000fe200078e00ff */
/*00b0*/ MOV R11, 0x3e5ade15 ; /* 0x3e5ade15000b7802 */
/* 0x000fe20000000f00 */
/*00c0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x3ff71547 ; /* 0x3ff71547ff057424 */
/* 0x000fe200078e00ff */
/*00d0*/ BSSY B0, 0x320 ; /* 0x0000024000007945 */
/* 0x000fe20003800000 */
/*00e0*/ IMAD.MOV.U32 R10, RZ, RZ, 0x69ce2bdf ; /* 0x69ce2bdfff0a7424 */
/* 0x000fc800078e00ff */
/*00f0*/ DFMA R4, R2, R4, 6.75539944105574400000e+15 ; /* 0x433800000204742b */
/* 0x004e220000000004 */
/*0100*/ FSETP.GEU.AND P0, PT, |R3|, 4.1917929649353027344, PT ; /* 0x4086232b0300780b */
/* 0x000fca0003f0e200 */
/*0110*/ DADD R6, R4, -6.75539944105574400000e+15 ; /* 0xc338000004067429 */
/* 0x001e0c0000000000 */
/*0120*/ DFMA R8, R6, c[0x2][0x0], R2 ; /* 0x0080000006087a2b */
/* 0x001e0c0000000002 */
/*0130*/ DFMA R6, R6, c[0x2][0x8], R8 ; /* 0x0080020006067a2b */
/* 0x001e0c0000000008 */
/*0140*/ DFMA R8, R6, R10, c[0x2][0x10] ; /* 0x008004000608762b */
/* 0x001064000000000a */
/*0150*/ LEA R10, P1, R0, c[0x0][0x170], 0x3 ; /* 0x00005c00000a7a11 */
/* 0x001fc800078218ff */
/*0160*/ DFMA R8, R6, R8, c[0x2][0x18] ; /* 0x008006000608762b */
/* 0x002e0c0000000008 */
/*0170*/ DFMA R8, R6, R8, c[0x2][0x20] ; /* 0x008008000608762b */
/* 0x001e0c0000000008 */
/*0180*/ DFMA R8, R6, R8, c[0x2][0x28] ; /* 0x00800a000608762b */
/* 0x001e0c0000000008 */
/*0190*/ DFMA R8, R6, R8, c[0x2][0x30] ; /* 0x00800c000608762b */
/* 0x001e0c0000000008 */
/*01a0*/ DFMA R8, R6, R8, c[0x2][0x38] ; /* 0x00800e000608762b */
/* 0x001e0c0000000008 */
/*01b0*/ DFMA R8, R6, R8, c[0x2][0x40] ; /* 0x008010000608762b */
/* 0x001e0c0000000008 */
/*01c0*/ DFMA R8, R6, R8, c[0x2][0x48] ; /* 0x008012000608762b */
/* 0x001e0c0000000008 */
/*01d0*/ DFMA R8, R6, R8, c[0x2][0x50] ; /* 0x008014000608762b */
/* 0x001e0c0000000008 */
/*01e0*/ DFMA R8, R6, R8, 1 ; /* 0x3ff000000608742b */
/* 0x001e0c0000000008 */
/*01f0*/ DFMA R8, R6, R8, 1 ; /* 0x3ff000000608742b */
/* 0x0010640000000008 */
/*0200*/ SHF.R.S32.HI R7, RZ, 0x1f, R0 ; /* 0x0000001fff077819 */
/* 0x001fc80000011400 */
/*0210*/ LEA.HI.X R11, R0, c[0x0][0x174], R7, 0x3, P1 ; /* 0x00005d00000b7a11 */
/* 0x000fc800008f1c07 */
/*0220*/ IMAD R7, R4, 0x100000, R9 ; /* 0x0010000004077824 */
/* 0x002fe400078e0209 */
/*0230*/ IMAD.MOV.U32 R6, RZ, RZ, R8 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0008 */
/*0240*/ @!P0 BRA 0x310 ; /* 0x000000c000008947 */
/* 0x000fea0003800000 */
/*0250*/ FSETP.GEU.AND P1, PT, |R3|, 4.2275390625, PT ; /* 0x408748000300780b */
/* 0x000fe20003f2e200 */
/*0260*/ DADD R6, R2, +INF ; /* 0x7ff0000002067429 */
/* 0x000fc80000000000 */
/*0270*/ DSETP.GEU.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200722a */
/* 0x000e0c0003f0e000 */
/*0280*/ FSEL R6, R6, RZ, P0 ; /* 0x000000ff06067208 */
/* 0x001fe40000000000 */
/*0290*/ @!P1 LEA.HI R0, R4, R4, RZ, 0x1 ; /* 0x0000000404009211 */
/* 0x000fe400078f08ff */
/*02a0*/ FSEL R7, R7, RZ, P0 ; /* 0x000000ff07077208 */
/* 0x000fe40000000000 */
/*02b0*/ @!P1 SHF.R.S32.HI R3, RZ, 0x1, R0 ; /* 0x00000001ff039819 */
/* 0x000fc80000011400 */
/*02c0*/ @!P1 LEA R9, R3, R9, 0x14 ; /* 0x0000000903099211 */
/* 0x000fe200078ea0ff */
/*02d0*/ @!P1 IMAD.IADD R2, R4, 0x1, -R3 ; /* 0x0000000104029824 */
/* 0x000fca00078e0a03 */
/*02e0*/ @!P1 LEA R3, R2, 0x3ff00000, 0x14 ; /* 0x3ff0000002039811 */
/* 0x000fe200078ea0ff */
/*02f0*/ @!P1 IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff029224 */
/* 0x000fcc00078e00ff */
/*0300*/ @!P1 DMUL R6, R8, R2 ; /* 0x0000000208069228 */
/* 0x00004c0000000000 */
/*0310*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0320*/ STG.E.64 [R10.64], R6 ; /* 0x000000060a007986 */
/* 0x002fe2000c101b04 */
/*0330*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0340*/ BRA 0x340; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9expkerneliPKdPd
.globl _Z9expkerneliPKdPd
.p2align 8
.type _Z9expkerneliPKdPd,@function
_Z9expkerneliPKdPd:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s5, 0x3e5ade15
s_mov_b32 s4, 0x6a5dcb37
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
s_mov_b32 s1, 0x3ff71547
s_mov_b32 s0, 0x652b82fe
global_load_b64 v[2:3], v[2:3], off
s_waitcnt vmcnt(0)
v_mul_f64 v[4:5], v[2:3], s[0:1]
s_mov_b32 s1, 0xbfe62e42
s_mov_b32 s0, 0xfefa39ef
v_cmp_nlt_f64_e32 vcc_lo, 0x40900000, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rndne_f64_e32 v[4:5], v[4:5]
v_fma_f64 v[6:7], v[4:5], s[0:1], v[2:3]
s_mov_b32 s1, 0xbc7abc9e
s_mov_b32 s0, 0x3b39803f
v_cvt_i32_f64_e32 v10, v[4:5]
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[6:7], v[4:5], s[0:1], v[6:7]
s_mov_b32 s1, 0x3e928af3
s_mov_b32 s0, 0xfca7ab0c
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], s[4:5], s[0:1]
s_mov_b32 s1, 0x3ec71dee
s_mov_b32 s0, 0x623fde64
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3efa0199
s_mov_b32 s0, 0x7c89e6b0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3f2a01a0
s_mov_b32 s0, 0x14761f6e
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3f56c16c
s_mov_b32 s0, 0x1852b7b0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3f811111
s_mov_b32 s0, 0x11122322
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3fa55555
s_mov_b32 s0, 0x555502a1
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3fc55555
s_mov_b32 s0, 0x55555511
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3fe00000
s_mov_b32 s0, 11
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
v_cmp_ngt_f64_e64 s0, 0xc090cc00, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], 1.0
v_fma_f64 v[4:5], v[6:7], v[8:9], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ldexp_f64 v[4:5], v[4:5], v10
v_cndmask_b32_e32 v5, 0x7ff00000, v5, vcc_lo
s_and_b32 vcc_lo, s0, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v2, 0, v4, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_cndmask_b32_e64 v3, 0, v5, s0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b64 v[0:1], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9expkerneliPKdPd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9expkerneliPKdPd, .Lfunc_end0-_Z9expkerneliPKdPd
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9expkerneliPKdPd
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9expkerneliPKdPd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000d34c3_00000000-6_expkernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9expkerneliPKdPdiPKdPd
.type _Z32__device_stub__Z9expkerneliPKdPdiPKdPd, @function
_Z32__device_stub__Z9expkerneliPKdPdiPKdPd:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9expkerneliPKdPd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z9expkerneliPKdPdiPKdPd, .-_Z32__device_stub__Z9expkerneliPKdPdiPKdPd
.globl _Z9expkerneliPKdPd
.type _Z9expkerneliPKdPd, @function
_Z9expkerneliPKdPd:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9expkerneliPKdPdiPKdPd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9expkerneliPKdPd, .-_Z9expkerneliPKdPd
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9expkerneliPKdPd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9expkerneliPKdPd(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "expkernel.hip"
.globl _Z24__device_stub__expkerneliPKdPd # -- Begin function _Z24__device_stub__expkerneliPKdPd
.p2align 4, 0x90
.type _Z24__device_stub__expkerneliPKdPd,@function
_Z24__device_stub__expkerneliPKdPd: # @_Z24__device_stub__expkerneliPKdPd
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9expkerneliPKdPd, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub__expkerneliPKdPd, .Lfunc_end0-_Z24__device_stub__expkerneliPKdPd
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9expkerneliPKdPd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9expkerneliPKdPd,@object # @_Z9expkerneliPKdPd
.section .rodata,"a",@progbits
.globl _Z9expkerneliPKdPd
.p2align 3, 0x0
_Z9expkerneliPKdPd:
.quad _Z24__device_stub__expkerneliPKdPd
.size _Z9expkerneliPKdPd, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9expkerneliPKdPd"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__expkerneliPKdPd
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9expkerneliPKdPd
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <cstdio>
#define BLOCK_SIZE 16
#define SIZE_RATE 10
struct matrix{
int height;
int width;
double *elements;
};
void printShape(matrix &m, std::string name){
std::cout << name << ": " << m.height << ", " << m.width << std::endl;
}
void printMatrix(matrix M){
for(int i = 0; i < M.height; i++){
for(int j = 0; j < M.width; j++){
if(j != 0) std::cout << " ";
std::cout << M.elements[i*M.width+j];
}
std::cout << std::endl;
}
std::cout << std::endl;
}
__global__ void matrixZero_cuda(matrix M){
//行列Cにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] = 0;
}
}
static void matrixZero(matrix& d_m_in){
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixZero_cuda<<<gld, blk>>>(d_m_in);
}
__global__ void matrixCpy_cuda(matrix M, matrix org){
//行列Cにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] = org.elements[row*org.width+col];
}
}
static void matrixCpy(matrix& d_m_in, matrix& d_m_ac){
//入力のサイズをコピー元と合わせる。
cudaFree(d_m_in.elements);
d_m_in.height = d_m_ac.height; d_m_in.width = d_m_ac.width;
cudaMalloc((void**)&d_m_in.elements, d_m_in.height*d_m_in.width*sizeof(double));
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixCpy_cuda<<<gld, blk>>>(d_m_in, d_m_ac);
}
__global__ void matrixAdd_cuda(matrix M, matrix add){
//行列Cにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] += add.elements[row*add.width+col];
}
}
static void matrixAdd(matrix& d_m_in, matrix& d_m_ac){
if(d_m_in.height != d_m_ac.height || d_m_in.width != d_m_ac.width){
std::cout << "add err." << '\n';
exit(1);
}
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixAdd_cuda<<<gld, blk>>>(d_m_in, d_m_ac);
}
__global__ void matrixMinus_cuda(matrix M, matrix minus){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] -= minus.elements[row*minus.width+col];
}
}
static void matrixMinus(matrix& d_m_in, matrix& d_m_ac){
if(d_m_in.height != d_m_ac.height || d_m_in.width != d_m_ac.width){
std::cout << "minus err." << '\n';
exit(1);
}
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixMinus_cuda<<<gld, blk>>>(d_m_in, d_m_ac);
}
__global__ void matrixConstMul_cuda(matrix M, double rate){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] *= rate;
}
}
static void matrixConstMul(matrix& d_m_in, double rate){
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixConstMul_cuda<<<gld, blk>>>(d_m_in, rate);
}
__global__ void matrixMul_cuda(matrix A, matrix B, matrix C){
//行列Cにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < C.height && col < C.width){
double x = 0.0f;
for (int i = 0; i < A.width; i++) {
x += A.elements[row*A.width+i]*B.elements[i*B.width+col];
}
C.elements[row*C.width+col] = x;
}
}
static void matrixMul(matrix& d_m_in, matrix& d_m_ac){
if(d_m_in.width != d_m_ac.height){
std::cout << "mul err." << '\n';
exit(1);
}
//デバイスに演算結果の領域を確保
matrix d_ans;
d_ans.width = d_m_ac.width; d_ans.height = d_m_in.height;
int size;
//デバイスにメモリ確保
size = d_ans.width*d_ans.height*sizeof(double);
cudaMalloc((void**)&d_ans.elements, size);
//Cのサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_ans.width-1+blk.x)/blk.x, (d_ans.height-1+blk.y)/blk.y);
matrixMul_cuda<<<gld, blk>>>(d_m_in, d_m_ac, d_ans);
//不要になった入力のメモリの開放
cudaFree(d_m_in.elements);
//演算結果を引き継ぐ
d_m_in = d_ans;
}
__global__ void matrixAddBias_cuda(matrix M, matrix bias){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] += bias.elements[col];
}
}
static void matrixAddBias(matrix& d_m_in, matrix& d_m_ac){
if(d_m_in.width != d_m_ac.width){
std::cout << "bias err." << '\n';
exit(1);
}
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixAddBias_cuda<<<gld, blk>>>(d_m_in, d_m_ac);
}
__global__ void matrixRelu_cuda(matrix M){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] = (M.elements[row*M.width+col] < 0)? 0: M.elements[row*M.width+col];
}
}
static void matrixRelu(matrix& d_m_in){
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixRelu_cuda<<<gld, blk>>>(d_m_in);
}
__global__ void matrixSoftmax_cuda(matrix M){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
//計算が必要なスレッドか確認
if(row < M.height){
double sum = 0;
double max = M.elements[row*M.width];
for (int j = 0; j < M.width; j++) {
max = (M.elements[row*M.width+j] > max)? M.elements[row*M.width+j]: max;
}
for (int j = 0; j < M.width; j++) {
sum += exp(M.elements[row*M.width+j] - max + 350);
if(isinf(sum)){
printf("softmax の分母がinf。\n");
}else if(isnan(sum)){
printf("softmax の分母がnan。\n");
}
}
for (int j = 0; j < M.width; j++) {
double bunshi = exp(M.elements[row*M.width+j] - max + 350);
if(isinf(bunshi)){
printf("softmax の分子がinf, value = %e\n", M.elements[row*M.width+j]);
}else if(isnan(bunshi)){
printf("softmax の分子がnan, value = %e\n", M.elements[row*M.width+j]);
}
M.elements[row*M.width+j] = bunshi/sum;
}
}
}
static void matrixSoftmax(matrix& d_m_in){
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(1, BLOCK_SIZE);
dim3 gld(1, (d_m_in.height-1+blk.y)/blk.y);
matrixSoftmax_cuda<<<gld, blk>>>(d_m_in);
}
__global__ void matrixReluWithOther_cuda(matrix M, matrix relufrom){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] = (relufrom.elements[row*relufrom.width+col] < 0)? 0: M.elements[row*M.width+col];
}
}
static void matrixReluWithOther(matrix& d_m_in, matrix& d_m_ac){
if(d_m_in.height != d_m_ac.height || d_m_in.width != d_m_ac.width){
std::cout << "relu with other err." << '\n';
exit(1);
}
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixReluWithOther_cuda<<<gld, blk>>>(d_m_in, d_m_ac);
}
__global__ void matrixTranspose_cuda(matrix M, matrix trans){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < trans.height && col < trans.width){
trans.elements[row*trans.width+col] = M.elements[col*M.width+row];
}
}
static void matrixTranspose(matrix& d_m_in){
//デバイスに演算結果の領域を確保
matrix d_ans;
d_ans.height = d_m_in.width; d_ans.width = d_m_in.height;
int size = d_ans.width*d_ans.height*sizeof(double);
cudaMalloc((void**)&d_ans.elements, size);
//Cのサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_ans.width-1+blk.x)/blk.x, (d_ans.height-1+blk.y)/blk.y);
matrixTranspose_cuda<<<gld, blk>>>(d_m_in, d_ans);
//不要になった入力のメモリの開放
cudaFree(d_m_in.elements);
//演算結果を引き継ぐ
d_m_in = d_ans;
}
__global__ void matrixSumColumn_cuda(matrix M, matrix ans){
//行列Mにおけるどこを計算するスレッドか確定する。
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(col < ans.width){
double x = 0;
for(int i = 0; i < M.height; i++){
x += M.elements[i*M.width+col];
}
ans.elements[col] = x;
}
}
static void matrixSumColumn(matrix& d_m_in){
//デバイスに演算結果の領域を確保
matrix d_ans;
d_ans.height = 1; d_ans.width = d_m_in.width;
int size = d_ans.width*d_ans.height*sizeof(double);
cudaMalloc((void**)&d_ans.elements, size);
//Cのサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, 1);
dim3 gld((d_ans.width-1+blk.x)/blk.x, 1);
matrixSumColumn_cuda<<<gld, blk>>>(d_m_in, d_ans);
//不要になった入力のメモリの開放
cudaFree(d_m_in.elements);
//演算結果を引き継ぐ
d_m_in = d_ans;
}
__global__ void matrixSumRow_cuda(matrix M, matrix ans){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
//計算が必要なスレッドか確認
if(row < ans.height){
double x = 0;
for(int i = 0; i < M.width; i++){
x += M.elements[row*M.width+i];
}
ans.elements[row] = x;
}
}
static void matrixSumRow(matrix& d_m_in){
//デバイスに演算結果の領域を確保
matrix d_ans;
d_ans.height = d_m_in.height; d_ans.width = 1;
int size = d_ans.width*d_ans.height*sizeof(double);
cudaMalloc((void**)&d_ans.elements, size);
//Cのサイズに合わせてブロックとグリッドの設定
dim3 blk(1, BLOCK_SIZE);
dim3 gld(1, (d_ans.height-1+blk.y)/blk.y);
matrixSumRow_cuda<<<gld, blk>>>(d_m_in, d_ans);
//不要になった入力のメモリの開放
cudaFree(d_m_in.elements);
//演算結果を引き継ぐ
d_m_in = d_ans;
}
__global__ void matrixCrossE_cuda(matrix err, matrix result, matrix teacher){
//行列Cにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
int idx = row*err.width+col;
//計算が必要なスレッドか確認
if(row < err.height && col < err.width){
err.elements[idx] = (teacher.elements[idx] < 0.00000001)? 0: -teacher.elements[idx]*log(result.elements[idx]);
}
}
static void matrixCrossE(matrix& err, matrix& result, matrix& teacher){
if(result.height != teacher.height || result.width != teacher.width){
std::cout << "cross ent err." << '\n';
exit(1);
}
//デバイスに演算結果の領域を確保
err.width = result.width; err.height = result.height;
int size = err.width*err.height*sizeof(double);
cudaMalloc((void**)&err.elements, size);
//Cのサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((err.width-1+blk.x)/blk.x, (err.height-1+blk.y)/blk.y);
matrixCrossE_cuda<<<gld, blk>>>(err, result, teacher);
}
__global__ void matrixAdam_cuda(double leaning_rate, int sequence, matrix ada_grad, matrix velocity_matrix, matrix prime_w_list, matrix w_list){
//cudaの処理
//どこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
int idx = row*w_list.width + col;
//計算が必要なスレッドか確認
if(row < w_list.height && col < w_list.width){
velocity_matrix.elements[idx] = 0.9*velocity_matrix.elements[idx] + 0.1*prime_w_list.elements[idx];
ada_grad.elements[idx] = 0.999*ada_grad.elements[idx] + 0.001*prime_w_list.elements[idx]*prime_w_list.elements[idx];
double v_hat = velocity_matrix.elements[idx]/(1 - pow(0.9, sequence));
double a_hat = ada_grad.elements[idx]/(1 - pow(0.999, sequence));
w_list.elements[idx] -= (leaning_rate*v_hat)/(sqrt(a_hat)+0.00000001);
}
}
static void matrixAdam(double leaning_rate, int sequence, matrix& ada_grad, matrix& velocity_matrix, matrix& prime_w_list, matrix& w_list){
//デバイスでの処理
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((w_list.width-1+blk.x)/blk.x, (w_list.height-1+blk.y)/blk.y);
matrixAdam_cuda<<<gld, blk>>>(leaning_rate, sequence, ada_grad, velocity_matrix, prime_w_list, w_list);
}
void randomInit(matrix m, int maxVal){
for(int i = 0; i < m.height*m.width; i++) m.elements[i] = int(maxVal*(double(rand())/RAND_MAX)) - maxVal/2.0;
}
void checkFunction(void (*func)(matrix&, matrix&), int ah, int aw, int bh, int bw){
matrix A, B;
A.height = ah; A.width = aw;
B.height = bh; B.width = bw;
A.elements = new double[A.width*A.height];
B.elements = new double[B.width*B.height];
randomInit(A, 10);
randomInit(B, 10);
//演算前確認
std::cout << "matrix:in(" << A.height << ", " << A.width << ") =" << std::endl;
printMatrix(A);
std::cout << "matrix:act(" << B.height << ", " << B.width << ") =" << std::endl;
printMatrix(B);
matrix dA, dB;
dA.width = A.width; dA.height = A.height;
dB.width = B.width; dB.height = B.height;
int size = dA.width*dA.height*sizeof(double);
cudaMalloc((void**)&dA.elements, size);
cudaMemcpy(dA.elements, A.elements, size, cudaMemcpyHostToDevice);
size = dB.width*dB.height*sizeof(double);
cudaMalloc((void**)&dB.elements, size);
cudaMemcpy(dB.elements, B.elements, size, cudaMemcpyHostToDevice);
func(dA, dB);
//Aのサイズを変更されたdAのサイズに合わせる。
delete [] A.elements;
A.height = dA.height;
A.width = dA.width;
A.elements = new double[A.height*A.width];
size = dA.width*dA.height*sizeof(double);
cudaMemcpy(A.elements, dA.elements, size, cudaMemcpyDeviceToHost);
std::cout << "matrix:ans(" << A.height << ", " << A.width << ") =" << std::endl;
printMatrix(A);
// ホストメモリ解放
delete [] A.elements;
delete [] B.elements;
for(int i = 0; i < 25; i++) std::cout << "-";
std::cout << std::endl;
}
void checkFunction2(void (*func)(matrix&), int ah, int aw){
//行列作成
matrix A;
A.height = ah; A.width = aw;
A.elements = new double[A.width*A.height];
randomInit(A, 10);
//演算前確認
std::cout << "matrix:in(" << A.height << ", " << A.width << ") =" << std::endl;
printMatrix(A);
matrix dA;
dA.width = A.width; dA.height = A.height;
int size = dA.width*dA.height*sizeof(double);
cudaMalloc((void**)&dA.elements, size);
cudaMemcpy(dA.elements, A.elements, size, cudaMemcpyHostToDevice);
func(dA);
//Aのサイズを変更されたdAのサイズに合わせる。
delete [] A.elements;
A.height = dA.height;
A.width = dA.width;
A.elements = new double[A.height*A.width];
size = dA.width*dA.height*sizeof(double);
cudaMemcpy(A.elements, dA.elements, size, cudaMemcpyDeviceToHost);
std::cout << "matrix:ans(" << A.height << ", " << A.width << ") =" << std::endl;
printMatrix(A);
// ホストメモリ解放
delete [] A.elements;
for(int i = 0; i < 25; i++) std::cout << "-";
std::cout << std::endl;
}
void checkFunction3(void (*func)(matrix&, double), int ah, int aw, double rate){
//行列作成
matrix A;
A.height = ah; A.width = aw;
A.elements = new double[A.width*A.height];
randomInit(A, 10);
//演算前確認
std::cout << "matrix:in(" << A.height << ", " << A.width << ") =" << std::endl;
printMatrix(A);
matrix dA;
dA.width = A.width; dA.height = A.height;
int size = dA.width*dA.height*sizeof(double);
cudaMalloc((void**)&dA.elements, size);
cudaMemcpy(dA.elements, A.elements, size, cudaMemcpyHostToDevice);
func(dA, rate);
//Aのサイズを変更されたdAのサイズに合わせる。
delete [] A.elements;
A.height = dA.height;
A.width = dA.width;
A.elements = new double[A.height*A.width];
size = dA.width*dA.height*sizeof(double);
cudaMemcpy(A.elements, dA.elements, size, cudaMemcpyDeviceToHost);
std::cout << "matrix:ans(" << A.height << ", " << A.width << ") =" << std::endl;
printMatrix(A);
// ホストメモリ解放
delete [] A.elements;
for(int i = 0; i < 25; i++) std::cout << "-";
std::cout << std::endl;
}
void checkAll(){
srand((unsigned int)time(0));
std::cout << "cpy" << std::endl;
checkFunction(matrixCpy, 3,3,2,2);
std::cout << "add" << std::endl;
checkFunction(matrixAdd, 2,2,2,2);
std::cout << "minus" << std::endl;
checkFunction(matrixMinus, 2,2,2,2);
std::cout << "mul" << std::endl;
checkFunction(matrixMul, 2,3,3,2);
std::cout << "bias" << std::endl;
checkFunction(matrixAddBias, 2,3,1,3);
std::cout << "reluwithother" << std::endl;
checkFunction(matrixReluWithOther, 2,3,2,3);
std::cout << "relu" << std::endl;
checkFunction2(matrixRelu, 2,3);
std::cout << "softmax" << std::endl;
checkFunction2(matrixSoftmax, 2,3);
std::cout << "trans" << std::endl;
checkFunction2(matrixTranspose, 2,3);
std::cout << "sumcol" << std::endl;
checkFunction2(matrixSumColumn, 3,4);
std::cout << "sumrow" << std::endl;
checkFunction2(matrixSumRow, 4,3);
std::cout << "const Mul " << 2 << std::endl;
checkFunction3(matrixConstMul, 2,2,2);//最後の引数は倍率ß
}
//int main(){
// checkAll();
//} | #include <hip/hip_runtime.h>
#include <iostream>
#include <cstdio>
#define BLOCK_SIZE 16
#define SIZE_RATE 10
struct matrix{
int height;
int width;
double *elements;
};
void printShape(matrix &m, std::string name){
std::cout << name << ": " << m.height << ", " << m.width << std::endl;
}
void printMatrix(matrix M){
for(int i = 0; i < M.height; i++){
for(int j = 0; j < M.width; j++){
if(j != 0) std::cout << " ";
std::cout << M.elements[i*M.width+j];
}
std::cout << std::endl;
}
std::cout << std::endl;
}
__global__ void matrixZero_cuda(matrix M){
//行列Cにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] = 0;
}
}
static void matrixZero(matrix& d_m_in){
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixZero_cuda<<<gld, blk>>>(d_m_in);
}
__global__ void matrixCpy_cuda(matrix M, matrix org){
//行列Cにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] = org.elements[row*org.width+col];
}
}
static void matrixCpy(matrix& d_m_in, matrix& d_m_ac){
//入力のサイズをコピー元と合わせる。
hipFree(d_m_in.elements);
d_m_in.height = d_m_ac.height; d_m_in.width = d_m_ac.width;
hipMalloc((void**)&d_m_in.elements, d_m_in.height*d_m_in.width*sizeof(double));
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixCpy_cuda<<<gld, blk>>>(d_m_in, d_m_ac);
}
__global__ void matrixAdd_cuda(matrix M, matrix add){
//行列Cにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] += add.elements[row*add.width+col];
}
}
static void matrixAdd(matrix& d_m_in, matrix& d_m_ac){
if(d_m_in.height != d_m_ac.height || d_m_in.width != d_m_ac.width){
std::cout << "add err." << '\n';
exit(1);
}
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixAdd_cuda<<<gld, blk>>>(d_m_in, d_m_ac);
}
__global__ void matrixMinus_cuda(matrix M, matrix minus){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] -= minus.elements[row*minus.width+col];
}
}
static void matrixMinus(matrix& d_m_in, matrix& d_m_ac){
if(d_m_in.height != d_m_ac.height || d_m_in.width != d_m_ac.width){
std::cout << "minus err." << '\n';
exit(1);
}
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixMinus_cuda<<<gld, blk>>>(d_m_in, d_m_ac);
}
__global__ void matrixConstMul_cuda(matrix M, double rate){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] *= rate;
}
}
static void matrixConstMul(matrix& d_m_in, double rate){
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixConstMul_cuda<<<gld, blk>>>(d_m_in, rate);
}
__global__ void matrixMul_cuda(matrix A, matrix B, matrix C){
//行列Cにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < C.height && col < C.width){
double x = 0.0f;
for (int i = 0; i < A.width; i++) {
x += A.elements[row*A.width+i]*B.elements[i*B.width+col];
}
C.elements[row*C.width+col] = x;
}
}
static void matrixMul(matrix& d_m_in, matrix& d_m_ac){
if(d_m_in.width != d_m_ac.height){
std::cout << "mul err." << '\n';
exit(1);
}
//デバイスに演算結果の領域を確保
matrix d_ans;
d_ans.width = d_m_ac.width; d_ans.height = d_m_in.height;
int size;
//デバイスにメモリ確保
size = d_ans.width*d_ans.height*sizeof(double);
hipMalloc((void**)&d_ans.elements, size);
//Cのサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_ans.width-1+blk.x)/blk.x, (d_ans.height-1+blk.y)/blk.y);
matrixMul_cuda<<<gld, blk>>>(d_m_in, d_m_ac, d_ans);
//不要になった入力のメモリの開放
hipFree(d_m_in.elements);
//演算結果を引き継ぐ
d_m_in = d_ans;
}
__global__ void matrixAddBias_cuda(matrix M, matrix bias){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] += bias.elements[col];
}
}
static void matrixAddBias(matrix& d_m_in, matrix& d_m_ac){
if(d_m_in.width != d_m_ac.width){
std::cout << "bias err." << '\n';
exit(1);
}
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixAddBias_cuda<<<gld, blk>>>(d_m_in, d_m_ac);
}
__global__ void matrixRelu_cuda(matrix M){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] = (M.elements[row*M.width+col] < 0)? 0: M.elements[row*M.width+col];
}
}
static void matrixRelu(matrix& d_m_in){
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixRelu_cuda<<<gld, blk>>>(d_m_in);
}
__global__ void matrixSoftmax_cuda(matrix M){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
//計算が必要なスレッドか確認
if(row < M.height){
double sum = 0;
double max = M.elements[row*M.width];
for (int j = 0; j < M.width; j++) {
max = (M.elements[row*M.width+j] > max)? M.elements[row*M.width+j]: max;
}
for (int j = 0; j < M.width; j++) {
sum += exp(M.elements[row*M.width+j] - max + 350);
if(isinf(sum)){
printf("softmax の分母がinf。\n");
}else if(isnan(sum)){
printf("softmax の分母がnan。\n");
}
}
for (int j = 0; j < M.width; j++) {
double bunshi = exp(M.elements[row*M.width+j] - max + 350);
if(isinf(bunshi)){
printf("softmax の分子がinf, value = %e\n", M.elements[row*M.width+j]);
}else if(isnan(bunshi)){
printf("softmax の分子がnan, value = %e\n", M.elements[row*M.width+j]);
}
M.elements[row*M.width+j] = bunshi/sum;
}
}
}
static void matrixSoftmax(matrix& d_m_in){
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(1, BLOCK_SIZE);
dim3 gld(1, (d_m_in.height-1+blk.y)/blk.y);
matrixSoftmax_cuda<<<gld, blk>>>(d_m_in);
}
__global__ void matrixReluWithOther_cuda(matrix M, matrix relufrom){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < M.height && col < M.width){
M.elements[row*M.width+col] = (relufrom.elements[row*relufrom.width+col] < 0)? 0: M.elements[row*M.width+col];
}
}
static void matrixReluWithOther(matrix& d_m_in, matrix& d_m_ac){
if(d_m_in.height != d_m_ac.height || d_m_in.width != d_m_ac.width){
std::cout << "relu with other err." << '\n';
exit(1);
}
//入力のサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_m_in.width-1+blk.x)/blk.x, (d_m_in.height-1+blk.y)/blk.y);
matrixReluWithOther_cuda<<<gld, blk>>>(d_m_in, d_m_ac);
}
__global__ void matrixTranspose_cuda(matrix M, matrix trans){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(row < trans.height && col < trans.width){
trans.elements[row*trans.width+col] = M.elements[col*M.width+row];
}
}
static void matrixTranspose(matrix& d_m_in){
//デバイスに演算結果の領域を確保
matrix d_ans;
d_ans.height = d_m_in.width; d_ans.width = d_m_in.height;
int size = d_ans.width*d_ans.height*sizeof(double);
hipMalloc((void**)&d_ans.elements, size);
//Cのサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((d_ans.width-1+blk.x)/blk.x, (d_ans.height-1+blk.y)/blk.y);
matrixTranspose_cuda<<<gld, blk>>>(d_m_in, d_ans);
//不要になった入力のメモリの開放
hipFree(d_m_in.elements);
//演算結果を引き継ぐ
d_m_in = d_ans;
}
__global__ void matrixSumColumn_cuda(matrix M, matrix ans){
//行列Mにおけるどこを計算するスレッドか確定する。
int col = blockIdx.x*blockDim.x + threadIdx.x;
//計算が必要なスレッドか確認
if(col < ans.width){
double x = 0;
for(int i = 0; i < M.height; i++){
x += M.elements[i*M.width+col];
}
ans.elements[col] = x;
}
}
static void matrixSumColumn(matrix& d_m_in){
//デバイスに演算結果の領域を確保
matrix d_ans;
d_ans.height = 1; d_ans.width = d_m_in.width;
int size = d_ans.width*d_ans.height*sizeof(double);
hipMalloc((void**)&d_ans.elements, size);
//Cのサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, 1);
dim3 gld((d_ans.width-1+blk.x)/blk.x, 1);
matrixSumColumn_cuda<<<gld, blk>>>(d_m_in, d_ans);
//不要になった入力のメモリの開放
hipFree(d_m_in.elements);
//演算結果を引き継ぐ
d_m_in = d_ans;
}
__global__ void matrixSumRow_cuda(matrix M, matrix ans){
//行列Mにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
//計算が必要なスレッドか確認
if(row < ans.height){
double x = 0;
for(int i = 0; i < M.width; i++){
x += M.elements[row*M.width+i];
}
ans.elements[row] = x;
}
}
static void matrixSumRow(matrix& d_m_in){
//デバイスに演算結果の領域を確保
matrix d_ans;
d_ans.height = d_m_in.height; d_ans.width = 1;
int size = d_ans.width*d_ans.height*sizeof(double);
hipMalloc((void**)&d_ans.elements, size);
//Cのサイズに合わせてブロックとグリッドの設定
dim3 blk(1, BLOCK_SIZE);
dim3 gld(1, (d_ans.height-1+blk.y)/blk.y);
matrixSumRow_cuda<<<gld, blk>>>(d_m_in, d_ans);
//不要になった入力のメモリの開放
hipFree(d_m_in.elements);
//演算結果を引き継ぐ
d_m_in = d_ans;
}
__global__ void matrixCrossE_cuda(matrix err, matrix result, matrix teacher){
//行列Cにおけるどこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
int idx = row*err.width+col;
//計算が必要なスレッドか確認
if(row < err.height && col < err.width){
err.elements[idx] = (teacher.elements[idx] < 0.00000001)? 0: -teacher.elements[idx]*log(result.elements[idx]);
}
}
static void matrixCrossE(matrix& err, matrix& result, matrix& teacher){
if(result.height != teacher.height || result.width != teacher.width){
std::cout << "cross ent err." << '\n';
exit(1);
}
//デバイスに演算結果の領域を確保
err.width = result.width; err.height = result.height;
int size = err.width*err.height*sizeof(double);
hipMalloc((void**)&err.elements, size);
//Cのサイズに合わせてブロックとグリッドの設定
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((err.width-1+blk.x)/blk.x, (err.height-1+blk.y)/blk.y);
matrixCrossE_cuda<<<gld, blk>>>(err, result, teacher);
}
__global__ void matrixAdam_cuda(double leaning_rate, int sequence, matrix ada_grad, matrix velocity_matrix, matrix prime_w_list, matrix w_list){
//cudaの処理
//どこを計算するスレッドか確定する。
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
int idx = row*w_list.width + col;
//計算が必要なスレッドか確認
if(row < w_list.height && col < w_list.width){
velocity_matrix.elements[idx] = 0.9*velocity_matrix.elements[idx] + 0.1*prime_w_list.elements[idx];
ada_grad.elements[idx] = 0.999*ada_grad.elements[idx] + 0.001*prime_w_list.elements[idx]*prime_w_list.elements[idx];
double v_hat = velocity_matrix.elements[idx]/(1 - pow(0.9, sequence));
double a_hat = ada_grad.elements[idx]/(1 - pow(0.999, sequence));
w_list.elements[idx] -= (leaning_rate*v_hat)/(sqrt(a_hat)+0.00000001);
}
}
static void matrixAdam(double leaning_rate, int sequence, matrix& ada_grad, matrix& velocity_matrix, matrix& prime_w_list, matrix& w_list){
//デバイスでの処理
dim3 blk(BLOCK_SIZE, BLOCK_SIZE);
dim3 gld((w_list.width-1+blk.x)/blk.x, (w_list.height-1+blk.y)/blk.y);
matrixAdam_cuda<<<gld, blk>>>(leaning_rate, sequence, ada_grad, velocity_matrix, prime_w_list, w_list);
}
void randomInit(matrix m, int maxVal){
for(int i = 0; i < m.height*m.width; i++) m.elements[i] = int(maxVal*(double(rand())/RAND_MAX)) - maxVal/2.0;
}
void checkFunction(void (*func)(matrix&, matrix&), int ah, int aw, int bh, int bw){
matrix A, B;
A.height = ah; A.width = aw;
B.height = bh; B.width = bw;
A.elements = new double[A.width*A.height];
B.elements = new double[B.width*B.height];
randomInit(A, 10);
randomInit(B, 10);
//演算前確認
std::cout << "matrix:in(" << A.height << ", " << A.width << ") =" << std::endl;
printMatrix(A);
std::cout << "matrix:act(" << B.height << ", " << B.width << ") =" << std::endl;
printMatrix(B);
matrix dA, dB;
dA.width = A.width; dA.height = A.height;
dB.width = B.width; dB.height = B.height;
int size = dA.width*dA.height*sizeof(double);
hipMalloc((void**)&dA.elements, size);
hipMemcpy(dA.elements, A.elements, size, hipMemcpyHostToDevice);
size = dB.width*dB.height*sizeof(double);
hipMalloc((void**)&dB.elements, size);
hipMemcpy(dB.elements, B.elements, size, hipMemcpyHostToDevice);
func(dA, dB);
//Aのサイズを変更されたdAのサイズに合わせる。
delete [] A.elements;
A.height = dA.height;
A.width = dA.width;
A.elements = new double[A.height*A.width];
size = dA.width*dA.height*sizeof(double);
hipMemcpy(A.elements, dA.elements, size, hipMemcpyDeviceToHost);
std::cout << "matrix:ans(" << A.height << ", " << A.width << ") =" << std::endl;
printMatrix(A);
// ホストメモリ解放
delete [] A.elements;
delete [] B.elements;
for(int i = 0; i < 25; i++) std::cout << "-";
std::cout << std::endl;
}
void checkFunction2(void (*func)(matrix&), int ah, int aw){
//行列作成
matrix A;
A.height = ah; A.width = aw;
A.elements = new double[A.width*A.height];
randomInit(A, 10);
//演算前確認
std::cout << "matrix:in(" << A.height << ", " << A.width << ") =" << std::endl;
printMatrix(A);
matrix dA;
dA.width = A.width; dA.height = A.height;
int size = dA.width*dA.height*sizeof(double);
hipMalloc((void**)&dA.elements, size);
hipMemcpy(dA.elements, A.elements, size, hipMemcpyHostToDevice);
func(dA);
//Aのサイズを変更されたdAのサイズに合わせる。
delete [] A.elements;
A.height = dA.height;
A.width = dA.width;
A.elements = new double[A.height*A.width];
size = dA.width*dA.height*sizeof(double);
hipMemcpy(A.elements, dA.elements, size, hipMemcpyDeviceToHost);
std::cout << "matrix:ans(" << A.height << ", " << A.width << ") =" << std::endl;
printMatrix(A);
// ホストメモリ解放
delete [] A.elements;
for(int i = 0; i < 25; i++) std::cout << "-";
std::cout << std::endl;
}
void checkFunction3(void (*func)(matrix&, double), int ah, int aw, double rate){
//行列作成
matrix A;
A.height = ah; A.width = aw;
A.elements = new double[A.width*A.height];
randomInit(A, 10);
//演算前確認
std::cout << "matrix:in(" << A.height << ", " << A.width << ") =" << std::endl;
printMatrix(A);
matrix dA;
dA.width = A.width; dA.height = A.height;
int size = dA.width*dA.height*sizeof(double);
hipMalloc((void**)&dA.elements, size);
hipMemcpy(dA.elements, A.elements, size, hipMemcpyHostToDevice);
func(dA, rate);
//Aのサイズを変更されたdAのサイズに合わせる。
delete [] A.elements;
A.height = dA.height;
A.width = dA.width;
A.elements = new double[A.height*A.width];
size = dA.width*dA.height*sizeof(double);
hipMemcpy(A.elements, dA.elements, size, hipMemcpyDeviceToHost);
std::cout << "matrix:ans(" << A.height << ", " << A.width << ") =" << std::endl;
printMatrix(A);
// ホストメモリ解放
delete [] A.elements;
for(int i = 0; i < 25; i++) std::cout << "-";
std::cout << std::endl;
}
void checkAll(){
srand((unsigned int)time(0));
std::cout << "cpy" << std::endl;
checkFunction(matrixCpy, 3,3,2,2);
std::cout << "add" << std::endl;
checkFunction(matrixAdd, 2,2,2,2);
std::cout << "minus" << std::endl;
checkFunction(matrixMinus, 2,2,2,2);
std::cout << "mul" << std::endl;
checkFunction(matrixMul, 2,3,3,2);
std::cout << "bias" << std::endl;
checkFunction(matrixAddBias, 2,3,1,3);
std::cout << "reluwithother" << std::endl;
checkFunction(matrixReluWithOther, 2,3,2,3);
std::cout << "relu" << std::endl;
checkFunction2(matrixRelu, 2,3);
std::cout << "softmax" << std::endl;
checkFunction2(matrixSoftmax, 2,3);
std::cout << "trans" << std::endl;
checkFunction2(matrixTranspose, 2,3);
std::cout << "sumcol" << std::endl;
checkFunction2(matrixSumColumn, 3,4);
std::cout << "sumrow" << std::endl;
checkFunction2(matrixSumRow, 4,3);
std::cout << "const Mul " << 2 << std::endl;
checkFunction3(matrixConstMul, 2,2,2);//最後の引数は倍率ß
}
//int main(){
// checkAll();
//} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | extern "C" __global__ void build_hashtable(int *R, int R_size, int *hash_table) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int key = R[offset];
int hash = key & (R_size-1);
if (offset < R_size) {
hash_table[hash] = key;
}
} | code for sm_80
Function : build_hashtable
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0205 */
/*0090*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000800 */
/*00b0*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */
/* 0x000fcc000fffe03f */
/*00c0*/ LOP3.LUT R4, R3, UR4, RZ, 0xc0, !PT ; /* 0x0000000403047c12 */
/* 0x004fca000f8ec0ff */
/*00d0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fca00078e0205 */
/*00e0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x000fe2000c101906 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | extern "C" __global__ void build_hashtable(int *R, int R_size, int *hash_table) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int key = R[offset];
int hash = key & (R_size-1);
if (offset < R_size) {
hash_table[hash] = key;
}
} | .file "tmpxft_0012306c_00000000-6_build_hashtable.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z15build_hashtablePiiS_PiiS_
.type _Z38__device_stub__Z15build_hashtablePiiS_PiiS_, @function
_Z38__device_stub__Z15build_hashtablePiiS_PiiS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq build_hashtable(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z15build_hashtablePiiS_PiiS_, .-_Z38__device_stub__Z15build_hashtablePiiS_PiiS_
.globl build_hashtable
.type build_hashtable, @function
build_hashtable:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z15build_hashtablePiiS_PiiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size build_hashtable, .-build_hashtable
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "build_hashtable"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq build_hashtable(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | extern "C" __global__ void build_hashtable(int *R, int R_size, int *hash_table) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int key = R[offset];
int hash = key & (R_size-1);
if (offset < R_size) {
hash_table[hash] = key;
}
} | #include <hip/hip_runtime.h>
extern "C" __global__ void build_hashtable(int *R, int R_size, int *hash_table) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int key = R[offset];
int hash = key & (R_size-1);
if (offset < R_size) {
hash_table[hash] = key;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
extern "C" __global__ void build_hashtable(int *R, int R_size, int *hash_table) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int key = R[offset];
int hash = key & (R_size-1);
if (offset < R_size) {
hash_table[hash] = key;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected build_hashtable
.globl build_hashtable
.p2align 8
.type build_hashtable,@function
build_hashtable:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s2, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
s_mov_b32 s3, exec_lo
v_cmpx_gt_i32_e64 s2, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[4:5], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_add_i32 s2, s2, -1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_and_b32_e32 v0, s2, v2
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel build_hashtable
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size build_hashtable, .Lfunc_end0-build_hashtable
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: build_hashtable
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: build_hashtable.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
extern "C" __global__ void build_hashtable(int *R, int R_size, int *hash_table) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int key = R[offset];
int hash = key & (R_size-1);
if (offset < R_size) {
hash_table[hash] = key;
}
} | .text
.file "build_hashtable.hip"
.globl __device_stub__build_hashtable # -- Begin function __device_stub__build_hashtable
.p2align 4, 0x90
.type __device_stub__build_hashtable,@function
__device_stub__build_hashtable: # @__device_stub__build_hashtable
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movq %rdx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $build_hashtable, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size __device_stub__build_hashtable, .Lfunc_end0-__device_stub__build_hashtable
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $build_hashtable, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type build_hashtable,@object # @build_hashtable
.section .rodata,"a",@progbits
.globl build_hashtable
.p2align 3, 0x0
build_hashtable:
.quad __device_stub__build_hashtable
.size build_hashtable, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "build_hashtable"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__build_hashtable
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym build_hashtable
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : build_hashtable
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0205 */
/*0090*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000800 */
/*00b0*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */
/* 0x000fcc000fffe03f */
/*00c0*/ LOP3.LUT R4, R3, UR4, RZ, 0xc0, !PT ; /* 0x0000000403047c12 */
/* 0x004fca000f8ec0ff */
/*00d0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fca00078e0205 */
/*00e0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x000fe2000c101906 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected build_hashtable
.globl build_hashtable
.p2align 8
.type build_hashtable,@function
build_hashtable:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s2, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
s_mov_b32 s3, exec_lo
v_cmpx_gt_i32_e64 s2, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[4:5], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_add_i32 s2, s2, -1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_and_b32_e32 v0, s2, v2
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel build_hashtable
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size build_hashtable, .Lfunc_end0-build_hashtable
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: build_hashtable
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: build_hashtable.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0012306c_00000000-6_build_hashtable.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z15build_hashtablePiiS_PiiS_
.type _Z38__device_stub__Z15build_hashtablePiiS_PiiS_, @function
_Z38__device_stub__Z15build_hashtablePiiS_PiiS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq build_hashtable(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z15build_hashtablePiiS_PiiS_, .-_Z38__device_stub__Z15build_hashtablePiiS_PiiS_
.globl build_hashtable
.type build_hashtable, @function
build_hashtable:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z15build_hashtablePiiS_PiiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size build_hashtable, .-build_hashtable
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "build_hashtable"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq build_hashtable(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "build_hashtable.hip"
.globl __device_stub__build_hashtable # -- Begin function __device_stub__build_hashtable
.p2align 4, 0x90
.type __device_stub__build_hashtable,@function
__device_stub__build_hashtable: # @__device_stub__build_hashtable
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movq %rdx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $build_hashtable, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size __device_stub__build_hashtable, .Lfunc_end0-__device_stub__build_hashtable
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $build_hashtable, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type build_hashtable,@object # @build_hashtable
.section .rodata,"a",@progbits
.globl build_hashtable
.p2align 3, 0x0
build_hashtable:
.quad __device_stub__build_hashtable
.size build_hashtable, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "build_hashtable"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__build_hashtable
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym build_hashtable
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | __global__ void set_chunk_data_vertices(
int x,
int y,
int halo_depth,
double dx,
double dy,
double x_min,
double y_min,
double* vertex_x,
double* vertex_y,
double* vertex_dx,
double* vertex_dy)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x+1)
{
vertex_x[gid] = x_min + dx*(gid-halo_depth);
vertex_dx[gid] = dx;
}
if(gid < y+1)
{
vertex_y[gid] = y_min + dy*(gid-halo_depth);
vertex_dy[gid] = dy;
}
}
// Extended kernel for the chunk initialisation
__global__ void set_chunk_data(
int x,
int y,
double dx,
double dy,
double* cell_x,
double* cell_y,
double* cell_dx,
double* cell_dy,
double* vertex_x,
double* vertex_y,
double* volume,
double* x_area,
double* y_area)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x)
{
cell_x[gid] = 0.5*(vertex_x[gid]+vertex_x[gid+1]);
cell_dx[gid] = dx;
}
if(gid < y)
{
cell_y[gid] = 0.5*(vertex_y[gid]+vertex_y[gid+1]);
cell_dy[gid] = dy;
}
if(gid < x*y)
{
volume[gid] = dx*dy;
}
if(gid < (x+1)*y)
{
x_area[gid] = dy;
}
if(gid < x*(y+1))
{
y_area[gid] = dx;
}
} | code for sm_80
Function : _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */
/* 0x000fe40000000a00 */
/*0030*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f26270 */
/*0070*/ @!P1 MOV R11, 0x8 ; /* 0x00000008000b9802 */
/* 0x000fca0000000f00 */
/*0080*/ @!P1 IMAD.WIDE R4, R0, R11, c[0x0][0x198] ; /* 0x0000660000049625 */
/* 0x000fca00078e020b */
/*0090*/ @!P1 LDG.E.64 R2, [R4.64+0x8] ; /* 0x0000080804029981 */
/* 0x000ea8000c1e1b00 */
/*00a0*/ @!P1 LDG.E.64 R6, [R4.64] ; /* 0x0000000804069981 */
/* 0x000ea2000c1e1b00 */
/*00b0*/ ISETP.GE.AND P0, PT, R0.reuse, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x040fe20003f06270 */
/*00c0*/ @!P1 IMAD.WIDE R8, R0, R11, c[0x0][0x178] ; /* 0x00005e0000089625 */
/* 0x000fe200078e020b */
/*00d0*/ @!P1 MOV R16, c[0x0][0x168] ; /* 0x00005a0000109a02 */
/* 0x000fc60000000f00 */
/*00e0*/ @!P1 IMAD.MOV.U32 R17, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff119624 */
/* 0x000fe400078e00ff */
/*00f0*/ @!P1 IMAD.WIDE R10, R0, R11, c[0x0][0x188] ; /* 0x00006200000a9625 */
/* 0x000fe200078e020b */
/*0100*/ @!P1 DADD R6, R2, R6 ; /* 0x0000000002069229 */
/* 0x00404a0000000006 */
/*0110*/ @!P0 IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff038424 */
/* 0x001fe200078e00ff */
/*0120*/ @!P1 DMUL R6, R6, 0.5 ; /* 0x3fe0000006069828 */
/* 0x002e060000000000 */
/*0130*/ @!P0 IMAD.WIDE R12, R0, R3, c[0x0][0x1a0] ; /* 0x00006800000c8625 */
/* 0x000fc800078e0203 */
/*0140*/ @!P1 STG.E.64 [R8.64], R6 ; /* 0x0000000608009986 */
/* 0x0011e8000c101b08 */
/*0150*/ @!P1 STG.E.64 [R10.64], R16 ; /* 0x000000100a009986 */
/* 0x0003e8000c101b08 */
/*0160*/ @!P0 LDG.E.64 R4, [R12.64+0x8] ; /* 0x000008080c048981 */
/* 0x0004e8000c1e1b00 */
/*0170*/ @!P0 LDG.E.64 R14, [R12.64] ; /* 0x000000080c0e8981 */
/* 0x0004e2000c1e1b00 */
/*0180*/ UIMAD UR4, UR7, UR6, URZ ; /* 0x00000006070472a4 */
/* 0x000fe2000f8e023f */
/*0190*/ @!P0 IMAD.WIDE R6, R0, R3, c[0x0][0x180] ; /* 0x0000600000068625 */
/* 0x001fc600078e0203 */
/*01a0*/ UIADD3 UR5, UR4, UR7, URZ ; /* 0x0000000704057290 */
/* 0x000fe2000fffe03f */
/*01b0*/ @!P0 IMAD.WIDE R2, R0.reuse, R3, c[0x0][0x190] ; /* 0x0000640000028625 */
/* 0x040fe200078e0203 */
/*01c0*/ ISETP.GE.AND P1, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe2000bf26270 */
/*01d0*/ UIADD3 UR4, UR4, UR6, URZ ; /* 0x0000000604047290 */
/* 0x000fc6000fffe03f */
/*01e0*/ ISETP.GE.AND P2, PT, R0, UR5, PT ; /* 0x0000000500007c0c */
/* 0x000fc6000bf46270 */
/*01f0*/ ISETP.GE.AND P3, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fcc000bf66270 */
/*0200*/ @!P1 MOV R18, c[0x0][0x168] ; /* 0x00005a0000129a02 */
/* 0x000fe20000000f00 */
/*0210*/ @!P1 IMAD.MOV.U32 R11, RZ, RZ, 0x8 ; /* 0x00000008ff0b9424 */
/* 0x002fe200078e00ff */
/*0220*/ @!P1 MOV R19, c[0x0][0x16c] ; /* 0x00005b0000139a02 */
/* 0x000fe40000000f00 */
/*0230*/ @!P2 IMAD.MOV.U32 R13, RZ, RZ, 0x8 ; /* 0x00000008ff0da424 */
/* 0x004fe200078e00ff */
/*0240*/ @!P2 MOV R16, c[0x0][0x170] ; /* 0x00005c000010aa02 */
/* 0x000fe20000000f00 */
/*0250*/ @!P1 IMAD.WIDE R10, R0.reuse, R11, c[0x0][0x1a8] ; /* 0x00006a00000a9625 */
/* 0x040fe200078e020b */
/*0260*/ @!P2 MOV R17, c[0x0][0x174] ; /* 0x00005d000011aa02 */
/* 0x000fe20000000f00 */
/*0270*/ @!P1 DMUL R8, R18, c[0x0][0x170] ; /* 0x00005c0012089a28 */
/* 0x000fe40000000000 */
/*0280*/ @!P2 IMAD.WIDE R12, R0, R13, c[0x0][0x1b0] ; /* 0x00006c00000ca625 */
/* 0x000fc400078e020d */
/*0290*/ @!P0 DADD R4, R4, R14 ; /* 0x0000000004048229 */
/* 0x008064000000000e */
/*02a0*/ @!P0 MOV R14, c[0x0][0x170] ; /* 0x00005c00000e8a02 */
/* 0x001fe40000000f00 */
/*02b0*/ @!P0 MOV R15, c[0x0][0x174] ; /* 0x00005d00000f8a02 */
/* 0x000fe40000000f00 */
/*02c0*/ @!P0 DMUL R4, R4, 0.5 ; /* 0x3fe0000004048828 */
/* 0x002e0e0000000000 */
/*02d0*/ @!P0 STG.E.64 [R6.64], R4 ; /* 0x0000000406008986 */
/* 0x0011e8000c101b08 */
/*02e0*/ @!P0 STG.E.64 [R2.64], R14 ; /* 0x0000000e02008986 */
/* 0x0001e8000c101b08 */
/*02f0*/ @!P1 STG.E.64 [R10.64], R8 ; /* 0x000000080a009986 */
/* 0x0001e8000c101b08 */
/*0300*/ @!P2 STG.E.64 [R12.64], R16 ; /* 0x000000100c00a986 */
/* 0x0001e2000c101b08 */
/*0310*/ @P3 EXIT ; /* 0x000000000000394d */
/* 0x000fea0003800000 */
/*0320*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */
/* 0x001fe200078e00ff */
/*0330*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */
/* 0x000fc40000000f00 */
/*0340*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */
/* 0x000fe20000000f00 */
/*0350*/ IMAD.WIDE R2, R0, R3, c[0x0][0x1b8] ; /* 0x00006e0000027625 */
/* 0x000fca00078e0203 */
/*0360*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */
/* 0x000fe2000c101b08 */
/*0370*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0380*/ BRA 0x380; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ BSSY B0, 0x140 ; /* 0x0000010000007945 */
/* 0x000fe40003800000 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GT.AND P0, PT, R0.reuse, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x040fe40003f04270 */
/*0070*/ ISETP.GT.AND P1, PT, R0, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x000fd60003f24270 */
/*0080*/ @P0 BRA 0x130 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*0090*/ IADD3 R10, R0.reuse, -c[0x0][0x168], RZ ; /* 0x80005a00000a7a10 */
/* 0x040fe20007ffe0ff */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ MOV R8, c[0x0][0x170] ; /* 0x00005c0000087a02 */
/* 0x000fe20000000f00 */
/*00c0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff097624 */
/* 0x000fe200078e00ff */
/*00d0*/ I2F.F64 R2, R10 ; /* 0x0000000a00027312 */
/* 0x000e2e0000201c00 */
/*00e0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x190] ; /* 0x0000640000047625 */
/* 0x000fc800078e0207 */
/*00f0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x1a0] ; /* 0x0000680000067625 */
/* 0x000fe200078e0207 */
/*0100*/ DFMA R2, R2, R8, c[0x0][0x180] ; /* 0x000060000202762b */
/* 0x001e0e0000000008 */
/*0110*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x0011e8000c101b04 */
/*0120*/ STG.E.64 [R6.64], R8 ; /* 0x0000000806007986 */
/* 0x0001e4000c101b04 */
/*0130*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0140*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0150*/ IADD3 R10, R0.reuse, -c[0x0][0x168], RZ ; /* 0x80005a00000a7a10 */
/* 0x040fe20007ffe0ff */
/*0160*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff087624 */
/* 0x001fe200078e00ff */
/*0170*/ MOV R9, c[0x0][0x17c] ; /* 0x00005f0000097a02 */
/* 0x000fe20000000f00 */
/*0180*/ IMAD.MOV.U32 R7, RZ, RZ, 0x8 ; /* 0x00000008ff077424 */
/* 0x000fe200078e00ff */
/*0190*/ I2F.F64 R2, R10 ; /* 0x0000000a00027312 */
/* 0x000e260000201c00 */
/*01a0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x198] ; /* 0x0000660000047625 */
/* 0x000fc800078e0207 */
/*01b0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x1a8] ; /* 0x00006a0000067625 */
/* 0x000fe200078e0207 */
/*01c0*/ DFMA R2, R2, R8, c[0x0][0x188] ; /* 0x000062000202762b */
/* 0x001e0e0000000008 */
/*01d0*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x001fe8000c101b04 */
/*01e0*/ STG.E.64 [R6.64], R8 ; /* 0x0000000806007986 */
/* 0x000fe2000c101b04 */
/*01f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0200*/ BRA 0x200; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | __global__ void set_chunk_data_vertices(
int x,
int y,
int halo_depth,
double dx,
double dy,
double x_min,
double y_min,
double* vertex_x,
double* vertex_y,
double* vertex_dx,
double* vertex_dy)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x+1)
{
vertex_x[gid] = x_min + dx*(gid-halo_depth);
vertex_dx[gid] = dx;
}
if(gid < y+1)
{
vertex_y[gid] = y_min + dy*(gid-halo_depth);
vertex_dy[gid] = dy;
}
}
// Extended kernel for the chunk initialisation
__global__ void set_chunk_data(
int x,
int y,
double dx,
double dy,
double* cell_x,
double* cell_y,
double* cell_dx,
double* cell_dy,
double* vertex_x,
double* vertex_y,
double* volume,
double* x_area,
double* y_area)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x)
{
cell_x[gid] = 0.5*(vertex_x[gid]+vertex_x[gid+1]);
cell_dx[gid] = dx;
}
if(gid < y)
{
cell_y[gid] = 0.5*(vertex_y[gid]+vertex_y[gid+1]);
cell_dy[gid] = dy;
}
if(gid < x*y)
{
volume[gid] = dx*dy;
}
if(gid < (x+1)*y)
{
x_area[gid] = dy;
}
if(gid < x*(y+1))
{
y_area[gid] = dx;
}
} | .file "tmpxft_001067a3_00000000-6_set_chunk_data.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z56__device_stub__Z23set_chunk_data_verticesiiiddddPdS_S_S_iiiddddPdS_S_S_
.type _Z56__device_stub__Z23set_chunk_data_verticesiiiddddPdS_S_S_iiiddddPdS_S_S_, @function
_Z56__device_stub__Z23set_chunk_data_verticesiiiddddPdS_S_S_iiiddddPdS_S_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $248, %rsp
.cfi_def_cfa_offset 256
movl %edi, 76(%rsp)
movl %esi, 72(%rsp)
movl %edx, 68(%rsp)
movsd %xmm0, 56(%rsp)
movsd %xmm1, 48(%rsp)
movsd %xmm2, 40(%rsp)
movsd %xmm3, 32(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq 256(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 232(%rsp)
xorl %eax, %eax
leaq 76(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rax
movq %rax, 152(%rsp)
leaq 68(%rsp), %rax
movq %rax, 160(%rsp)
leaq 56(%rsp), %rax
movq %rax, 168(%rsp)
leaq 48(%rsp), %rax
movq %rax, 176(%rsp)
leaq 40(%rsp), %rax
movq %rax, 184(%rsp)
leaq 32(%rsp), %rax
movq %rax, 192(%rsp)
leaq 24(%rsp), %rax
movq %rax, 200(%rsp)
leaq 16(%rsp), %rax
movq %rax, 208(%rsp)
leaq 8(%rsp), %rax
movq %rax, 216(%rsp)
movq %rsp, %rax
movq %rax, 224(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
leaq 88(%rsp), %rcx
leaq 80(%rsp), %rdx
leaq 108(%rsp), %rsi
leaq 96(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $248, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 88(%rsp)
.cfi_def_cfa_offset 264
pushq 88(%rsp)
.cfi_def_cfa_offset 272
leaq 160(%rsp), %r9
movq 124(%rsp), %rcx
movl 132(%rsp), %r8d
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
leaq _Z23set_chunk_data_verticesiiiddddPdS_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 256
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z56__device_stub__Z23set_chunk_data_verticesiiiddddPdS_S_S_iiiddddPdS_S_S_, .-_Z56__device_stub__Z23set_chunk_data_verticesiiiddddPdS_S_S_iiiddddPdS_S_S_
.globl _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.type _Z23set_chunk_data_verticesiiiddddPdS_S_S_, @function
_Z23set_chunk_data_verticesiiiddddPdS_S_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z56__device_stub__Z23set_chunk_data_verticesiiiddddPdS_S_S_iiiddddPdS_S_S_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z23set_chunk_data_verticesiiiddddPdS_S_S_, .-_Z23set_chunk_data_verticesiiiddddPdS_S_S_
.globl _Z54__device_stub__Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_iiddPdS_S_S_S_S_S_S_S_
.type _Z54__device_stub__Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_iiddPdS_S_S_S_S_S_S_S_, @function
_Z54__device_stub__Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_iiddPdS_S_S_S_S_S_S_S_:
.LFB2053:
.cfi_startproc
endbr64
subq $280, %rsp
.cfi_def_cfa_offset 288
movl %edi, 92(%rsp)
movl %esi, 88(%rsp)
movsd %xmm0, 80(%rsp)
movsd %xmm1, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 56(%rsp)
movq %r8, 48(%rsp)
movq %r9, 40(%rsp)
movq 288(%rsp), %rax
movq %rax, 32(%rsp)
movq 296(%rsp), %rax
movq %rax, 24(%rsp)
movq 304(%rsp), %rax
movq %rax, 16(%rsp)
movq 312(%rsp), %rax
movq %rax, 8(%rsp)
movq 320(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 264(%rsp)
xorl %eax, %eax
leaq 92(%rsp), %rax
movq %rax, 160(%rsp)
leaq 88(%rsp), %rax
movq %rax, 168(%rsp)
leaq 80(%rsp), %rax
movq %rax, 176(%rsp)
leaq 72(%rsp), %rax
movq %rax, 184(%rsp)
leaq 64(%rsp), %rax
movq %rax, 192(%rsp)
leaq 56(%rsp), %rax
movq %rax, 200(%rsp)
leaq 48(%rsp), %rax
movq %rax, 208(%rsp)
leaq 40(%rsp), %rax
movq %rax, 216(%rsp)
leaq 32(%rsp), %rax
movq %rax, 224(%rsp)
leaq 24(%rsp), %rax
movq %rax, 232(%rsp)
leaq 16(%rsp), %rax
movq %rax, 240(%rsp)
leaq 8(%rsp), %rax
movq %rax, 248(%rsp)
movq %rsp, %rax
movq %rax, 256(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 124(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
leaq 104(%rsp), %rcx
leaq 96(%rsp), %rdx
leaq 124(%rsp), %rsi
leaq 112(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 264(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $280, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 104(%rsp)
.cfi_def_cfa_offset 296
pushq 104(%rsp)
.cfi_def_cfa_offset 304
leaq 176(%rsp), %r9
movq 140(%rsp), %rcx
movl 148(%rsp), %r8d
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
leaq _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 288
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z54__device_stub__Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_iiddPdS_S_S_S_S_S_S_S_, .-_Z54__device_stub__Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_iiddPdS_S_S_S_S_S_S_S_
.globl _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.type _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, @function
_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_:
.LFB2054:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 56(%rsp)
.cfi_def_cfa_offset 32
pushq 56(%rsp)
.cfi_def_cfa_offset 40
pushq 56(%rsp)
.cfi_def_cfa_offset 48
pushq 56(%rsp)
.cfi_def_cfa_offset 56
pushq 56(%rsp)
.cfi_def_cfa_offset 64
call _Z54__device_stub__Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_iiddPdS_S_S_S_S_S_S_S_
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, .-_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_"
.align 8
.LC1:
.string "_Z23set_chunk_data_verticesiiiddddPdS_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z23set_chunk_data_verticesiiiddddPdS_S_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __global__ void set_chunk_data_vertices(
int x,
int y,
int halo_depth,
double dx,
double dy,
double x_min,
double y_min,
double* vertex_x,
double* vertex_y,
double* vertex_dx,
double* vertex_dy)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x+1)
{
vertex_x[gid] = x_min + dx*(gid-halo_depth);
vertex_dx[gid] = dx;
}
if(gid < y+1)
{
vertex_y[gid] = y_min + dy*(gid-halo_depth);
vertex_dy[gid] = dy;
}
}
// Extended kernel for the chunk initialisation
__global__ void set_chunk_data(
int x,
int y,
double dx,
double dy,
double* cell_x,
double* cell_y,
double* cell_dx,
double* cell_dy,
double* vertex_x,
double* vertex_y,
double* volume,
double* x_area,
double* y_area)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x)
{
cell_x[gid] = 0.5*(vertex_x[gid]+vertex_x[gid+1]);
cell_dx[gid] = dx;
}
if(gid < y)
{
cell_y[gid] = 0.5*(vertex_y[gid]+vertex_y[gid+1]);
cell_dy[gid] = dy;
}
if(gid < x*y)
{
volume[gid] = dx*dy;
}
if(gid < (x+1)*y)
{
x_area[gid] = dy;
}
if(gid < x*(y+1))
{
y_area[gid] = dx;
}
} | #include <hip/hip_runtime.h>
__global__ void set_chunk_data_vertices(
int x,
int y,
int halo_depth,
double dx,
double dy,
double x_min,
double y_min,
double* vertex_x,
double* vertex_y,
double* vertex_dx,
double* vertex_dy)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x+1)
{
vertex_x[gid] = x_min + dx*(gid-halo_depth);
vertex_dx[gid] = dx;
}
if(gid < y+1)
{
vertex_y[gid] = y_min + dy*(gid-halo_depth);
vertex_dy[gid] = dy;
}
}
// Extended kernel for the chunk initialisation
__global__ void set_chunk_data(
int x,
int y,
double dx,
double dy,
double* cell_x,
double* cell_y,
double* cell_dx,
double* cell_dy,
double* vertex_x,
double* vertex_y,
double* volume,
double* x_area,
double* y_area)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x)
{
cell_x[gid] = 0.5*(vertex_x[gid]+vertex_x[gid+1]);
cell_dx[gid] = dx;
}
if(gid < y)
{
cell_y[gid] = 0.5*(vertex_y[gid]+vertex_y[gid+1]);
cell_dy[gid] = dy;
}
if(gid < x*y)
{
volume[gid] = dx*dy;
}
if(gid < (x+1)*y)
{
x_area[gid] = dy;
}
if(gid < x*(y+1))
{
y_area[gid] = dx;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__ void set_chunk_data_vertices(
int x,
int y,
int halo_depth,
double dx,
double dy,
double x_min,
double y_min,
double* vertex_x,
double* vertex_y,
double* vertex_dx,
double* vertex_dy)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x+1)
{
vertex_x[gid] = x_min + dx*(gid-halo_depth);
vertex_dx[gid] = dx;
}
if(gid < y+1)
{
vertex_y[gid] = y_min + dy*(gid-halo_depth);
vertex_dy[gid] = dy;
}
}
// Extended kernel for the chunk initialisation
__global__ void set_chunk_data(
int x,
int y,
double dx,
double dy,
double* cell_x,
double* cell_y,
double* cell_dx,
double* cell_dy,
double* vertex_x,
double* vertex_y,
double* volume,
double* x_area,
double* y_area)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x)
{
cell_x[gid] = 0.5*(vertex_x[gid]+vertex_x[gid+1]);
cell_dx[gid] = dx;
}
if(gid < y)
{
cell_y[gid] = 0.5*(vertex_y[gid]+vertex_y[gid+1]);
cell_dy[gid] = dy;
}
if(gid < x*y)
{
volume[gid] = dx*dy;
}
if(gid < (x+1)*y)
{
x_area[gid] = dy;
}
if(gid < x*(y+1))
{
y_area[gid] = dx;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.globl _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.p2align 8
.type _Z23set_chunk_data_verticesiiiddddPdS_S_S_,@function
_Z23set_chunk_data_verticesiiiddddPdS_S_S_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x5c
s_load_b32 s3, s[0:1], 0x0
s_load_b32 s4, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_subrev_nc_u32_e32 v0, s4, v1
v_ashrrev_i32_e32 v2, 31, v1
v_cmpx_ge_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_delay_alu instid0(VALU_DEP_3)
v_cvt_f64_i32_e32 v[3:4], v0
s_clause 0x3
s_load_b64 s[4:5], s[0:1], 0x10
s_load_b64 s[6:7], s[0:1], 0x20
s_load_b64 s[8:9], s[0:1], 0x30
s_load_b64 s[10:11], s[0:1], 0x40
v_lshlrev_b64 v[5:6], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v10, s5 :: v_dual_mov_b32 v9, s4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s8, v5
v_add_co_ci_u32_e32 v8, vcc_lo, s9, v6, vcc_lo
v_add_co_u32 v5, vcc_lo, s10, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s11, v6, vcc_lo
v_fma_f64 v[3:4], v[3:4], s[4:5], s[6:7]
global_store_b64 v[7:8], v[3:4], off
global_store_b64 v[5:6], v[9:10], off
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s2
s_load_b32 s2, s[0:1], 0x4
s_waitcnt lgkmcnt(0)
v_cmp_ge_i32_e32 vcc_lo, s2, v1
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_4
v_cvt_f64_i32_e32 v[3:4], v0
s_clause 0x3
s_load_b64 s[2:3], s[0:1], 0x18
s_load_b64 s[4:5], s[0:1], 0x28
s_load_b64 s[6:7], s[0:1], 0x38
s_load_b64 s[0:1], s[0:1], 0x48
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v8, s3 :: v_dual_mov_b32 v7, s2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v5, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
v_fma_f64 v[3:4], v[3:4], s[2:3], s[4:5]
global_store_b64 v[5:6], v[3:4], off
global_store_b64 v[0:1], v[7:8], off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 336
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23set_chunk_data_verticesiiiddddPdS_S_S_, .Lfunc_end0-_Z23set_chunk_data_verticesiiiddddPdS_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.globl _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.p2align 8
.type _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_,@function
_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_:
s_clause 0x2
s_load_b32 s4, s[0:1], 0x6c
s_load_b32 s6, s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_mov_b32 s4, exec_lo
v_ashrrev_i32_e32 v2, 31, v1
v_cmpx_gt_i32_e64 s6, v1
s_cbranch_execz .LBB1_2
s_load_b64 s[8:9], s[0:1], 0x38
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[7:8], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s8, v7
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s9, v8, vcc_lo
global_load_b128 v[3:6], v[3:4], off
s_clause 0x1
s_load_b64 s[8:9], s[0:1], 0x18
s_load_b64 s[10:11], s[0:1], 0x28
s_waitcnt lgkmcnt(0)
v_add_co_u32 v9, vcc_lo, s8, v7
v_add_co_ci_u32_e32 v10, vcc_lo, s9, v8, vcc_lo
v_add_co_u32 v7, vcc_lo, s10, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s11, v8, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[3:4], v[3:4], v[5:6]
v_dual_mov_b32 v6, s3 :: v_dual_mov_b32 v5, s2
s_delay_alu instid0(VALU_DEP_2)
v_mul_f64 v[3:4], v[3:4], 0.5
global_store_b64 v[9:10], v[3:4], off
global_store_b64 v[7:8], v[5:6], off
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s4
s_clause 0x1
s_load_b32 s7, s[0:1], 0x4
s_load_b64 s[4:5], s[0:1], 0x10
s_mov_b32 s8, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s7, v1
s_cbranch_execz .LBB1_4
s_load_b64 s[10:11], s[0:1], 0x40
v_lshlrev_b64 v[7:8], 3, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s10, v7
v_add_co_ci_u32_e32 v4, vcc_lo, s11, v8, vcc_lo
global_load_b128 v[3:6], v[3:4], off
s_clause 0x1
s_load_b64 s[10:11], s[0:1], 0x20
s_load_b64 s[12:13], s[0:1], 0x30
s_waitcnt lgkmcnt(0)
v_add_co_u32 v9, vcc_lo, s10, v7
v_add_co_ci_u32_e32 v10, vcc_lo, s11, v8, vcc_lo
v_add_co_u32 v7, vcc_lo, s12, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s13, v8, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[3:4], v[3:4], v[5:6]
v_dual_mov_b32 v6, s5 :: v_dual_mov_b32 v5, s4
s_delay_alu instid0(VALU_DEP_2)
v_mul_f64 v[3:4], v[3:4], 0.5
global_store_b64 v[9:10], v[3:4], off
global_store_b64 v[7:8], v[5:6], off
.LBB1_4:
s_or_b32 exec_lo, exec_lo, s8
s_mul_i32 s8, s7, s6
s_delay_alu instid0(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s8, v1
s_and_saveexec_b32 s8, vcc_lo
s_cbranch_execz .LBB1_6
v_mul_f64 v[3:4], s[2:3], s[4:5]
s_load_b64 s[10:11], s[0:1], 0x48
v_lshlrev_b64 v[5:6], 3, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v5, vcc_lo, s10, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s11, v6, vcc_lo
global_store_b64 v[5:6], v[3:4], off
.LBB1_6:
s_or_b32 exec_lo, exec_lo, s8
s_add_i32 s8, s6, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s8, s8, s7
v_cmp_gt_i32_e32 vcc_lo, s8, v1
s_and_saveexec_b32 s8, vcc_lo
s_cbranch_execz .LBB1_8
s_load_b64 s[10:11], s[0:1], 0x50
v_lshlrev_b64 v[3:4], 3, v[1:2]
v_dual_mov_b32 v6, s5 :: v_dual_mov_b32 v5, s4
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s10, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s11, v4, vcc_lo
global_store_b64 v[3:4], v[5:6], off
.LBB1_8:
s_or_b32 exec_lo, exec_lo, s8
s_add_i32 s4, s7, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s4, s4, s6
v_cmp_gt_i32_e32 vcc_lo, s4, v1
s_and_saveexec_b32 s4, vcc_lo
s_cbranch_execz .LBB1_10
s_load_b64 s[0:1], s[0:1], 0x58
v_lshlrev_b64 v[0:1], 3, v[1:2]
v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b64 v[0:1], v[2:3], off
.LBB1_10:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 352
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, .Lfunc_end1-_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 8
.value_kind: by_value
- .offset: 32
.size: 8
.value_kind: by_value
- .offset: 40
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 64
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 72
.size: 8
.value_kind: global_buffer
- .offset: 80
.size: 4
.value_kind: hidden_block_count_x
- .offset: 84
.size: 4
.value_kind: hidden_block_count_y
- .offset: 88
.size: 4
.value_kind: hidden_block_count_z
- .offset: 92
.size: 2
.value_kind: hidden_group_size_x
- .offset: 94
.size: 2
.value_kind: hidden_group_size_y
- .offset: 96
.size: 2
.value_kind: hidden_group_size_z
- .offset: 98
.size: 2
.value_kind: hidden_remainder_x
- .offset: 100
.size: 2
.value_kind: hidden_remainder_y
- .offset: 102
.size: 2
.value_kind: hidden_remainder_z
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 128
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 136
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 144
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 336
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23set_chunk_data_verticesiiiddddPdS_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 64
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 72
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 80
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 88
.size: 8
.value_kind: global_buffer
- .offset: 96
.size: 4
.value_kind: hidden_block_count_x
- .offset: 100
.size: 4
.value_kind: hidden_block_count_y
- .offset: 104
.size: 4
.value_kind: hidden_block_count_z
- .offset: 108
.size: 2
.value_kind: hidden_group_size_x
- .offset: 110
.size: 2
.value_kind: hidden_group_size_y
- .offset: 112
.size: 2
.value_kind: hidden_group_size_z
- .offset: 114
.size: 2
.value_kind: hidden_remainder_x
- .offset: 116
.size: 2
.value_kind: hidden_remainder_y
- .offset: 118
.size: 2
.value_kind: hidden_remainder_z
- .offset: 136
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 144
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 152
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 160
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 352
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__ void set_chunk_data_vertices(
int x,
int y,
int halo_depth,
double dx,
double dy,
double x_min,
double y_min,
double* vertex_x,
double* vertex_y,
double* vertex_dx,
double* vertex_dy)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x+1)
{
vertex_x[gid] = x_min + dx*(gid-halo_depth);
vertex_dx[gid] = dx;
}
if(gid < y+1)
{
vertex_y[gid] = y_min + dy*(gid-halo_depth);
vertex_dy[gid] = dy;
}
}
// Extended kernel for the chunk initialisation
__global__ void set_chunk_data(
int x,
int y,
double dx,
double dy,
double* cell_x,
double* cell_y,
double* cell_dx,
double* cell_dy,
double* vertex_x,
double* vertex_y,
double* volume,
double* x_area,
double* y_area)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x)
{
cell_x[gid] = 0.5*(vertex_x[gid]+vertex_x[gid+1]);
cell_dx[gid] = dx;
}
if(gid < y)
{
cell_y[gid] = 0.5*(vertex_y[gid]+vertex_y[gid+1]);
cell_dy[gid] = dy;
}
if(gid < x*y)
{
volume[gid] = dx*dy;
}
if(gid < (x+1)*y)
{
x_area[gid] = dy;
}
if(gid < x*(y+1))
{
y_area[gid] = dx;
}
} | .text
.file "set_chunk_data.hip"
.globl _Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_ # -- Begin function _Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_
.p2align 4, 0x90
.type _Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_,@function
_Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_: # @_Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_
.cfi_startproc
# %bb.0:
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movsd %xmm0, 120(%rsp)
movsd %xmm1, 112(%rsp)
movsd %xmm2, 104(%rsp)
movsd %xmm3, 96(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 120(%rsp), %rax
movq %rax, 152(%rsp)
leaq 112(%rsp), %rax
movq %rax, 160(%rsp)
leaq 104(%rsp), %rax
movq %rax, 168(%rsp)
leaq 96(%rsp), %rax
movq %rax, 176(%rsp)
leaq 88(%rsp), %rax
movq %rax, 184(%rsp)
leaq 80(%rsp), %rax
movq %rax, 192(%rsp)
leaq 72(%rsp), %rax
movq %rax, 200(%rsp)
leaq 224(%rsp), %rax
movq %rax, 208(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z23set_chunk_data_verticesiiiddddPdS_S_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $232, %rsp
.cfi_adjust_cfa_offset -232
retq
.Lfunc_end0:
.size _Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_, .Lfunc_end0-_Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_
.cfi_endproc
# -- End function
.globl _Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_ # -- Begin function _Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.p2align 4, 0x90
.type _Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_,@function
_Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_: # @_Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.cfi_startproc
# %bb.0:
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movsd %xmm0, 104(%rsp)
movsd %xmm1, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 104(%rsp), %rax
movq %rax, 128(%rsp)
leaq 96(%rsp), %rax
movq %rax, 136(%rsp)
leaq 88(%rsp), %rax
movq %rax, 144(%rsp)
leaq 80(%rsp), %rax
movq %rax, 152(%rsp)
leaq 72(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rax
movq %rax, 168(%rsp)
leaq 224(%rsp), %rax
movq %rax, 176(%rsp)
leaq 232(%rsp), %rax
movq %rax, 184(%rsp)
leaq 240(%rsp), %rax
movq %rax, 192(%rsp)
leaq 248(%rsp), %rax
movq %rax, 200(%rsp)
leaq 256(%rsp), %rax
movq %rax, 208(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $232, %rsp
.cfi_adjust_cfa_offset -232
retq
.Lfunc_end1:
.size _Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, .Lfunc_end1-_Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23set_chunk_data_verticesiiiddddPdS_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z23set_chunk_data_verticesiiiddddPdS_S_S_,@object # @_Z23set_chunk_data_verticesiiiddddPdS_S_S_
.section .rodata,"a",@progbits
.globl _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.p2align 3, 0x0
_Z23set_chunk_data_verticesiiiddddPdS_S_S_:
.quad _Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_
.size _Z23set_chunk_data_verticesiiiddddPdS_S_S_, 8
.type _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_,@object # @_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.globl _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.p2align 3, 0x0
_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_:
.quad _Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.size _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z23set_chunk_data_verticesiiiddddPdS_S_S_"
.size .L__unnamed_1, 43
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_"
.size .L__unnamed_2, 41
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_
.addrsig_sym _Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.addrsig_sym _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */
/* 0x000fe40000000a00 */
/*0030*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f26270 */
/*0070*/ @!P1 MOV R11, 0x8 ; /* 0x00000008000b9802 */
/* 0x000fca0000000f00 */
/*0080*/ @!P1 IMAD.WIDE R4, R0, R11, c[0x0][0x198] ; /* 0x0000660000049625 */
/* 0x000fca00078e020b */
/*0090*/ @!P1 LDG.E.64 R2, [R4.64+0x8] ; /* 0x0000080804029981 */
/* 0x000ea8000c1e1b00 */
/*00a0*/ @!P1 LDG.E.64 R6, [R4.64] ; /* 0x0000000804069981 */
/* 0x000ea2000c1e1b00 */
/*00b0*/ ISETP.GE.AND P0, PT, R0.reuse, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x040fe20003f06270 */
/*00c0*/ @!P1 IMAD.WIDE R8, R0, R11, c[0x0][0x178] ; /* 0x00005e0000089625 */
/* 0x000fe200078e020b */
/*00d0*/ @!P1 MOV R16, c[0x0][0x168] ; /* 0x00005a0000109a02 */
/* 0x000fc60000000f00 */
/*00e0*/ @!P1 IMAD.MOV.U32 R17, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff119624 */
/* 0x000fe400078e00ff */
/*00f0*/ @!P1 IMAD.WIDE R10, R0, R11, c[0x0][0x188] ; /* 0x00006200000a9625 */
/* 0x000fe200078e020b */
/*0100*/ @!P1 DADD R6, R2, R6 ; /* 0x0000000002069229 */
/* 0x00404a0000000006 */
/*0110*/ @!P0 IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff038424 */
/* 0x001fe200078e00ff */
/*0120*/ @!P1 DMUL R6, R6, 0.5 ; /* 0x3fe0000006069828 */
/* 0x002e060000000000 */
/*0130*/ @!P0 IMAD.WIDE R12, R0, R3, c[0x0][0x1a0] ; /* 0x00006800000c8625 */
/* 0x000fc800078e0203 */
/*0140*/ @!P1 STG.E.64 [R8.64], R6 ; /* 0x0000000608009986 */
/* 0x0011e8000c101b08 */
/*0150*/ @!P1 STG.E.64 [R10.64], R16 ; /* 0x000000100a009986 */
/* 0x0003e8000c101b08 */
/*0160*/ @!P0 LDG.E.64 R4, [R12.64+0x8] ; /* 0x000008080c048981 */
/* 0x0004e8000c1e1b00 */
/*0170*/ @!P0 LDG.E.64 R14, [R12.64] ; /* 0x000000080c0e8981 */
/* 0x0004e2000c1e1b00 */
/*0180*/ UIMAD UR4, UR7, UR6, URZ ; /* 0x00000006070472a4 */
/* 0x000fe2000f8e023f */
/*0190*/ @!P0 IMAD.WIDE R6, R0, R3, c[0x0][0x180] ; /* 0x0000600000068625 */
/* 0x001fc600078e0203 */
/*01a0*/ UIADD3 UR5, UR4, UR7, URZ ; /* 0x0000000704057290 */
/* 0x000fe2000fffe03f */
/*01b0*/ @!P0 IMAD.WIDE R2, R0.reuse, R3, c[0x0][0x190] ; /* 0x0000640000028625 */
/* 0x040fe200078e0203 */
/*01c0*/ ISETP.GE.AND P1, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe2000bf26270 */
/*01d0*/ UIADD3 UR4, UR4, UR6, URZ ; /* 0x0000000604047290 */
/* 0x000fc6000fffe03f */
/*01e0*/ ISETP.GE.AND P2, PT, R0, UR5, PT ; /* 0x0000000500007c0c */
/* 0x000fc6000bf46270 */
/*01f0*/ ISETP.GE.AND P3, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fcc000bf66270 */
/*0200*/ @!P1 MOV R18, c[0x0][0x168] ; /* 0x00005a0000129a02 */
/* 0x000fe20000000f00 */
/*0210*/ @!P1 IMAD.MOV.U32 R11, RZ, RZ, 0x8 ; /* 0x00000008ff0b9424 */
/* 0x002fe200078e00ff */
/*0220*/ @!P1 MOV R19, c[0x0][0x16c] ; /* 0x00005b0000139a02 */
/* 0x000fe40000000f00 */
/*0230*/ @!P2 IMAD.MOV.U32 R13, RZ, RZ, 0x8 ; /* 0x00000008ff0da424 */
/* 0x004fe200078e00ff */
/*0240*/ @!P2 MOV R16, c[0x0][0x170] ; /* 0x00005c000010aa02 */
/* 0x000fe20000000f00 */
/*0250*/ @!P1 IMAD.WIDE R10, R0.reuse, R11, c[0x0][0x1a8] ; /* 0x00006a00000a9625 */
/* 0x040fe200078e020b */
/*0260*/ @!P2 MOV R17, c[0x0][0x174] ; /* 0x00005d000011aa02 */
/* 0x000fe20000000f00 */
/*0270*/ @!P1 DMUL R8, R18, c[0x0][0x170] ; /* 0x00005c0012089a28 */
/* 0x000fe40000000000 */
/*0280*/ @!P2 IMAD.WIDE R12, R0, R13, c[0x0][0x1b0] ; /* 0x00006c00000ca625 */
/* 0x000fc400078e020d */
/*0290*/ @!P0 DADD R4, R4, R14 ; /* 0x0000000004048229 */
/* 0x008064000000000e */
/*02a0*/ @!P0 MOV R14, c[0x0][0x170] ; /* 0x00005c00000e8a02 */
/* 0x001fe40000000f00 */
/*02b0*/ @!P0 MOV R15, c[0x0][0x174] ; /* 0x00005d00000f8a02 */
/* 0x000fe40000000f00 */
/*02c0*/ @!P0 DMUL R4, R4, 0.5 ; /* 0x3fe0000004048828 */
/* 0x002e0e0000000000 */
/*02d0*/ @!P0 STG.E.64 [R6.64], R4 ; /* 0x0000000406008986 */
/* 0x0011e8000c101b08 */
/*02e0*/ @!P0 STG.E.64 [R2.64], R14 ; /* 0x0000000e02008986 */
/* 0x0001e8000c101b08 */
/*02f0*/ @!P1 STG.E.64 [R10.64], R8 ; /* 0x000000080a009986 */
/* 0x0001e8000c101b08 */
/*0300*/ @!P2 STG.E.64 [R12.64], R16 ; /* 0x000000100c00a986 */
/* 0x0001e2000c101b08 */
/*0310*/ @P3 EXIT ; /* 0x000000000000394d */
/* 0x000fea0003800000 */
/*0320*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */
/* 0x001fe200078e00ff */
/*0330*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */
/* 0x000fc40000000f00 */
/*0340*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */
/* 0x000fe20000000f00 */
/*0350*/ IMAD.WIDE R2, R0, R3, c[0x0][0x1b8] ; /* 0x00006e0000027625 */
/* 0x000fca00078e0203 */
/*0360*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */
/* 0x000fe2000c101b08 */
/*0370*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0380*/ BRA 0x380; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ BSSY B0, 0x140 ; /* 0x0000010000007945 */
/* 0x000fe40003800000 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GT.AND P0, PT, R0.reuse, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x040fe40003f04270 */
/*0070*/ ISETP.GT.AND P1, PT, R0, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x000fd60003f24270 */
/*0080*/ @P0 BRA 0x130 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*0090*/ IADD3 R10, R0.reuse, -c[0x0][0x168], RZ ; /* 0x80005a00000a7a10 */
/* 0x040fe20007ffe0ff */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ MOV R8, c[0x0][0x170] ; /* 0x00005c0000087a02 */
/* 0x000fe20000000f00 */
/*00c0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff097624 */
/* 0x000fe200078e00ff */
/*00d0*/ I2F.F64 R2, R10 ; /* 0x0000000a00027312 */
/* 0x000e2e0000201c00 */
/*00e0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x190] ; /* 0x0000640000047625 */
/* 0x000fc800078e0207 */
/*00f0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x1a0] ; /* 0x0000680000067625 */
/* 0x000fe200078e0207 */
/*0100*/ DFMA R2, R2, R8, c[0x0][0x180] ; /* 0x000060000202762b */
/* 0x001e0e0000000008 */
/*0110*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x0011e8000c101b04 */
/*0120*/ STG.E.64 [R6.64], R8 ; /* 0x0000000806007986 */
/* 0x0001e4000c101b04 */
/*0130*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0140*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0150*/ IADD3 R10, R0.reuse, -c[0x0][0x168], RZ ; /* 0x80005a00000a7a10 */
/* 0x040fe20007ffe0ff */
/*0160*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff087624 */
/* 0x001fe200078e00ff */
/*0170*/ MOV R9, c[0x0][0x17c] ; /* 0x00005f0000097a02 */
/* 0x000fe20000000f00 */
/*0180*/ IMAD.MOV.U32 R7, RZ, RZ, 0x8 ; /* 0x00000008ff077424 */
/* 0x000fe200078e00ff */
/*0190*/ I2F.F64 R2, R10 ; /* 0x0000000a00027312 */
/* 0x000e260000201c00 */
/*01a0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x198] ; /* 0x0000660000047625 */
/* 0x000fc800078e0207 */
/*01b0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x1a8] ; /* 0x00006a0000067625 */
/* 0x000fe200078e0207 */
/*01c0*/ DFMA R2, R2, R8, c[0x0][0x188] ; /* 0x000062000202762b */
/* 0x001e0e0000000008 */
/*01d0*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x001fe8000c101b04 */
/*01e0*/ STG.E.64 [R6.64], R8 ; /* 0x0000000806007986 */
/* 0x000fe2000c101b04 */
/*01f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0200*/ BRA 0x200; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.globl _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.p2align 8
.type _Z23set_chunk_data_verticesiiiddddPdS_S_S_,@function
_Z23set_chunk_data_verticesiiiddddPdS_S_S_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x5c
s_load_b32 s3, s[0:1], 0x0
s_load_b32 s4, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_subrev_nc_u32_e32 v0, s4, v1
v_ashrrev_i32_e32 v2, 31, v1
v_cmpx_ge_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_delay_alu instid0(VALU_DEP_3)
v_cvt_f64_i32_e32 v[3:4], v0
s_clause 0x3
s_load_b64 s[4:5], s[0:1], 0x10
s_load_b64 s[6:7], s[0:1], 0x20
s_load_b64 s[8:9], s[0:1], 0x30
s_load_b64 s[10:11], s[0:1], 0x40
v_lshlrev_b64 v[5:6], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v10, s5 :: v_dual_mov_b32 v9, s4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s8, v5
v_add_co_ci_u32_e32 v8, vcc_lo, s9, v6, vcc_lo
v_add_co_u32 v5, vcc_lo, s10, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s11, v6, vcc_lo
v_fma_f64 v[3:4], v[3:4], s[4:5], s[6:7]
global_store_b64 v[7:8], v[3:4], off
global_store_b64 v[5:6], v[9:10], off
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s2
s_load_b32 s2, s[0:1], 0x4
s_waitcnt lgkmcnt(0)
v_cmp_ge_i32_e32 vcc_lo, s2, v1
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_4
v_cvt_f64_i32_e32 v[3:4], v0
s_clause 0x3
s_load_b64 s[2:3], s[0:1], 0x18
s_load_b64 s[4:5], s[0:1], 0x28
s_load_b64 s[6:7], s[0:1], 0x38
s_load_b64 s[0:1], s[0:1], 0x48
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v8, s3 :: v_dual_mov_b32 v7, s2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v5, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
v_fma_f64 v[3:4], v[3:4], s[2:3], s[4:5]
global_store_b64 v[5:6], v[3:4], off
global_store_b64 v[0:1], v[7:8], off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 336
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23set_chunk_data_verticesiiiddddPdS_S_S_, .Lfunc_end0-_Z23set_chunk_data_verticesiiiddddPdS_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.globl _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.p2align 8
.type _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_,@function
_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_:
s_clause 0x2
s_load_b32 s4, s[0:1], 0x6c
s_load_b32 s6, s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_mov_b32 s4, exec_lo
v_ashrrev_i32_e32 v2, 31, v1
v_cmpx_gt_i32_e64 s6, v1
s_cbranch_execz .LBB1_2
s_load_b64 s[8:9], s[0:1], 0x38
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[7:8], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s8, v7
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s9, v8, vcc_lo
global_load_b128 v[3:6], v[3:4], off
s_clause 0x1
s_load_b64 s[8:9], s[0:1], 0x18
s_load_b64 s[10:11], s[0:1], 0x28
s_waitcnt lgkmcnt(0)
v_add_co_u32 v9, vcc_lo, s8, v7
v_add_co_ci_u32_e32 v10, vcc_lo, s9, v8, vcc_lo
v_add_co_u32 v7, vcc_lo, s10, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s11, v8, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[3:4], v[3:4], v[5:6]
v_dual_mov_b32 v6, s3 :: v_dual_mov_b32 v5, s2
s_delay_alu instid0(VALU_DEP_2)
v_mul_f64 v[3:4], v[3:4], 0.5
global_store_b64 v[9:10], v[3:4], off
global_store_b64 v[7:8], v[5:6], off
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s4
s_clause 0x1
s_load_b32 s7, s[0:1], 0x4
s_load_b64 s[4:5], s[0:1], 0x10
s_mov_b32 s8, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s7, v1
s_cbranch_execz .LBB1_4
s_load_b64 s[10:11], s[0:1], 0x40
v_lshlrev_b64 v[7:8], 3, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s10, v7
v_add_co_ci_u32_e32 v4, vcc_lo, s11, v8, vcc_lo
global_load_b128 v[3:6], v[3:4], off
s_clause 0x1
s_load_b64 s[10:11], s[0:1], 0x20
s_load_b64 s[12:13], s[0:1], 0x30
s_waitcnt lgkmcnt(0)
v_add_co_u32 v9, vcc_lo, s10, v7
v_add_co_ci_u32_e32 v10, vcc_lo, s11, v8, vcc_lo
v_add_co_u32 v7, vcc_lo, s12, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s13, v8, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[3:4], v[3:4], v[5:6]
v_dual_mov_b32 v6, s5 :: v_dual_mov_b32 v5, s4
s_delay_alu instid0(VALU_DEP_2)
v_mul_f64 v[3:4], v[3:4], 0.5
global_store_b64 v[9:10], v[3:4], off
global_store_b64 v[7:8], v[5:6], off
.LBB1_4:
s_or_b32 exec_lo, exec_lo, s8
s_mul_i32 s8, s7, s6
s_delay_alu instid0(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s8, v1
s_and_saveexec_b32 s8, vcc_lo
s_cbranch_execz .LBB1_6
v_mul_f64 v[3:4], s[2:3], s[4:5]
s_load_b64 s[10:11], s[0:1], 0x48
v_lshlrev_b64 v[5:6], 3, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v5, vcc_lo, s10, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s11, v6, vcc_lo
global_store_b64 v[5:6], v[3:4], off
.LBB1_6:
s_or_b32 exec_lo, exec_lo, s8
s_add_i32 s8, s6, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s8, s8, s7
v_cmp_gt_i32_e32 vcc_lo, s8, v1
s_and_saveexec_b32 s8, vcc_lo
s_cbranch_execz .LBB1_8
s_load_b64 s[10:11], s[0:1], 0x50
v_lshlrev_b64 v[3:4], 3, v[1:2]
v_dual_mov_b32 v6, s5 :: v_dual_mov_b32 v5, s4
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s10, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s11, v4, vcc_lo
global_store_b64 v[3:4], v[5:6], off
.LBB1_8:
s_or_b32 exec_lo, exec_lo, s8
s_add_i32 s4, s7, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s4, s4, s6
v_cmp_gt_i32_e32 vcc_lo, s4, v1
s_and_saveexec_b32 s4, vcc_lo
s_cbranch_execz .LBB1_10
s_load_b64 s[0:1], s[0:1], 0x58
v_lshlrev_b64 v[0:1], 3, v[1:2]
v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b64 v[0:1], v[2:3], off
.LBB1_10:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 352
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, .Lfunc_end1-_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 8
.value_kind: by_value
- .offset: 32
.size: 8
.value_kind: by_value
- .offset: 40
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 64
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 72
.size: 8
.value_kind: global_buffer
- .offset: 80
.size: 4
.value_kind: hidden_block_count_x
- .offset: 84
.size: 4
.value_kind: hidden_block_count_y
- .offset: 88
.size: 4
.value_kind: hidden_block_count_z
- .offset: 92
.size: 2
.value_kind: hidden_group_size_x
- .offset: 94
.size: 2
.value_kind: hidden_group_size_y
- .offset: 96
.size: 2
.value_kind: hidden_group_size_z
- .offset: 98
.size: 2
.value_kind: hidden_remainder_x
- .offset: 100
.size: 2
.value_kind: hidden_remainder_y
- .offset: 102
.size: 2
.value_kind: hidden_remainder_z
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 128
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 136
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 144
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 336
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23set_chunk_data_verticesiiiddddPdS_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 64
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 72
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 80
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 88
.size: 8
.value_kind: global_buffer
- .offset: 96
.size: 4
.value_kind: hidden_block_count_x
- .offset: 100
.size: 4
.value_kind: hidden_block_count_y
- .offset: 104
.size: 4
.value_kind: hidden_block_count_z
- .offset: 108
.size: 2
.value_kind: hidden_group_size_x
- .offset: 110
.size: 2
.value_kind: hidden_group_size_y
- .offset: 112
.size: 2
.value_kind: hidden_group_size_z
- .offset: 114
.size: 2
.value_kind: hidden_remainder_x
- .offset: 116
.size: 2
.value_kind: hidden_remainder_y
- .offset: 118
.size: 2
.value_kind: hidden_remainder_z
- .offset: 136
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 144
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 152
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 160
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 352
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001067a3_00000000-6_set_chunk_data.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z56__device_stub__Z23set_chunk_data_verticesiiiddddPdS_S_S_iiiddddPdS_S_S_
.type _Z56__device_stub__Z23set_chunk_data_verticesiiiddddPdS_S_S_iiiddddPdS_S_S_, @function
_Z56__device_stub__Z23set_chunk_data_verticesiiiddddPdS_S_S_iiiddddPdS_S_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $248, %rsp
.cfi_def_cfa_offset 256
movl %edi, 76(%rsp)
movl %esi, 72(%rsp)
movl %edx, 68(%rsp)
movsd %xmm0, 56(%rsp)
movsd %xmm1, 48(%rsp)
movsd %xmm2, 40(%rsp)
movsd %xmm3, 32(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq 256(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 232(%rsp)
xorl %eax, %eax
leaq 76(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rax
movq %rax, 152(%rsp)
leaq 68(%rsp), %rax
movq %rax, 160(%rsp)
leaq 56(%rsp), %rax
movq %rax, 168(%rsp)
leaq 48(%rsp), %rax
movq %rax, 176(%rsp)
leaq 40(%rsp), %rax
movq %rax, 184(%rsp)
leaq 32(%rsp), %rax
movq %rax, 192(%rsp)
leaq 24(%rsp), %rax
movq %rax, 200(%rsp)
leaq 16(%rsp), %rax
movq %rax, 208(%rsp)
leaq 8(%rsp), %rax
movq %rax, 216(%rsp)
movq %rsp, %rax
movq %rax, 224(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
leaq 88(%rsp), %rcx
leaq 80(%rsp), %rdx
leaq 108(%rsp), %rsi
leaq 96(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $248, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 88(%rsp)
.cfi_def_cfa_offset 264
pushq 88(%rsp)
.cfi_def_cfa_offset 272
leaq 160(%rsp), %r9
movq 124(%rsp), %rcx
movl 132(%rsp), %r8d
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
leaq _Z23set_chunk_data_verticesiiiddddPdS_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 256
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z56__device_stub__Z23set_chunk_data_verticesiiiddddPdS_S_S_iiiddddPdS_S_S_, .-_Z56__device_stub__Z23set_chunk_data_verticesiiiddddPdS_S_S_iiiddddPdS_S_S_
.globl _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.type _Z23set_chunk_data_verticesiiiddddPdS_S_S_, @function
_Z23set_chunk_data_verticesiiiddddPdS_S_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z56__device_stub__Z23set_chunk_data_verticesiiiddddPdS_S_S_iiiddddPdS_S_S_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z23set_chunk_data_verticesiiiddddPdS_S_S_, .-_Z23set_chunk_data_verticesiiiddddPdS_S_S_
.globl _Z54__device_stub__Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_iiddPdS_S_S_S_S_S_S_S_
.type _Z54__device_stub__Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_iiddPdS_S_S_S_S_S_S_S_, @function
_Z54__device_stub__Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_iiddPdS_S_S_S_S_S_S_S_:
.LFB2053:
.cfi_startproc
endbr64
subq $280, %rsp
.cfi_def_cfa_offset 288
movl %edi, 92(%rsp)
movl %esi, 88(%rsp)
movsd %xmm0, 80(%rsp)
movsd %xmm1, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 56(%rsp)
movq %r8, 48(%rsp)
movq %r9, 40(%rsp)
movq 288(%rsp), %rax
movq %rax, 32(%rsp)
movq 296(%rsp), %rax
movq %rax, 24(%rsp)
movq 304(%rsp), %rax
movq %rax, 16(%rsp)
movq 312(%rsp), %rax
movq %rax, 8(%rsp)
movq 320(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 264(%rsp)
xorl %eax, %eax
leaq 92(%rsp), %rax
movq %rax, 160(%rsp)
leaq 88(%rsp), %rax
movq %rax, 168(%rsp)
leaq 80(%rsp), %rax
movq %rax, 176(%rsp)
leaq 72(%rsp), %rax
movq %rax, 184(%rsp)
leaq 64(%rsp), %rax
movq %rax, 192(%rsp)
leaq 56(%rsp), %rax
movq %rax, 200(%rsp)
leaq 48(%rsp), %rax
movq %rax, 208(%rsp)
leaq 40(%rsp), %rax
movq %rax, 216(%rsp)
leaq 32(%rsp), %rax
movq %rax, 224(%rsp)
leaq 24(%rsp), %rax
movq %rax, 232(%rsp)
leaq 16(%rsp), %rax
movq %rax, 240(%rsp)
leaq 8(%rsp), %rax
movq %rax, 248(%rsp)
movq %rsp, %rax
movq %rax, 256(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 124(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
leaq 104(%rsp), %rcx
leaq 96(%rsp), %rdx
leaq 124(%rsp), %rsi
leaq 112(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 264(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $280, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 104(%rsp)
.cfi_def_cfa_offset 296
pushq 104(%rsp)
.cfi_def_cfa_offset 304
leaq 176(%rsp), %r9
movq 140(%rsp), %rcx
movl 148(%rsp), %r8d
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
leaq _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 288
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z54__device_stub__Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_iiddPdS_S_S_S_S_S_S_S_, .-_Z54__device_stub__Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_iiddPdS_S_S_S_S_S_S_S_
.globl _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.type _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, @function
_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_:
.LFB2054:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 56(%rsp)
.cfi_def_cfa_offset 32
pushq 56(%rsp)
.cfi_def_cfa_offset 40
pushq 56(%rsp)
.cfi_def_cfa_offset 48
pushq 56(%rsp)
.cfi_def_cfa_offset 56
pushq 56(%rsp)
.cfi_def_cfa_offset 64
call _Z54__device_stub__Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_iiddPdS_S_S_S_S_S_S_S_
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, .-_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_"
.align 8
.LC1:
.string "_Z23set_chunk_data_verticesiiiddddPdS_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z23set_chunk_data_verticesiiiddddPdS_S_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "set_chunk_data.hip"
.globl _Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_ # -- Begin function _Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_
.p2align 4, 0x90
.type _Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_,@function
_Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_: # @_Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_
.cfi_startproc
# %bb.0:
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movsd %xmm0, 120(%rsp)
movsd %xmm1, 112(%rsp)
movsd %xmm2, 104(%rsp)
movsd %xmm3, 96(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 120(%rsp), %rax
movq %rax, 152(%rsp)
leaq 112(%rsp), %rax
movq %rax, 160(%rsp)
leaq 104(%rsp), %rax
movq %rax, 168(%rsp)
leaq 96(%rsp), %rax
movq %rax, 176(%rsp)
leaq 88(%rsp), %rax
movq %rax, 184(%rsp)
leaq 80(%rsp), %rax
movq %rax, 192(%rsp)
leaq 72(%rsp), %rax
movq %rax, 200(%rsp)
leaq 224(%rsp), %rax
movq %rax, 208(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z23set_chunk_data_verticesiiiddddPdS_S_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $232, %rsp
.cfi_adjust_cfa_offset -232
retq
.Lfunc_end0:
.size _Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_, .Lfunc_end0-_Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_
.cfi_endproc
# -- End function
.globl _Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_ # -- Begin function _Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.p2align 4, 0x90
.type _Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_,@function
_Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_: # @_Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.cfi_startproc
# %bb.0:
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movsd %xmm0, 104(%rsp)
movsd %xmm1, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 104(%rsp), %rax
movq %rax, 128(%rsp)
leaq 96(%rsp), %rax
movq %rax, 136(%rsp)
leaq 88(%rsp), %rax
movq %rax, 144(%rsp)
leaq 80(%rsp), %rax
movq %rax, 152(%rsp)
leaq 72(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rax
movq %rax, 168(%rsp)
leaq 224(%rsp), %rax
movq %rax, 176(%rsp)
leaq 232(%rsp), %rax
movq %rax, 184(%rsp)
leaq 240(%rsp), %rax
movq %rax, 192(%rsp)
leaq 248(%rsp), %rax
movq %rax, 200(%rsp)
leaq 256(%rsp), %rax
movq %rax, 208(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $232, %rsp
.cfi_adjust_cfa_offset -232
retq
.Lfunc_end1:
.size _Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, .Lfunc_end1-_Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23set_chunk_data_verticesiiiddddPdS_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z23set_chunk_data_verticesiiiddddPdS_S_S_,@object # @_Z23set_chunk_data_verticesiiiddddPdS_S_S_
.section .rodata,"a",@progbits
.globl _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.p2align 3, 0x0
_Z23set_chunk_data_verticesiiiddddPdS_S_S_:
.quad _Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_
.size _Z23set_chunk_data_verticesiiiddddPdS_S_S_, 8
.type _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_,@object # @_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.globl _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.p2align 3, 0x0
_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_:
.quad _Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.size _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z23set_chunk_data_verticesiiiddddPdS_S_S_"
.size .L__unnamed_1, 43
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_"
.size .L__unnamed_2, 41
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__set_chunk_data_verticesiiiddddPdS_S_S_
.addrsig_sym _Z29__device_stub__set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23set_chunk_data_verticesiiiddddPdS_S_S_
.addrsig_sym _Z14set_chunk_dataiiddPdS_S_S_S_S_S_S_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <time.h>
#define SIZE 1024
__global__ void VectorAdd(float *a, float *b, float *c, int n)
{
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
float *a, *b, *c;
float *d_a, *d_b, *d_c;
clock_t start, end;
double cpu_time_used;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c = (float *)malloc(SIZE*sizeof(float));
cudaMalloc( &d_a, SIZE*sizeof(float));
cudaMalloc( &d_b, SIZE*sizeof(float));
cudaMalloc( &d_c, SIZE*sizeof(float));
for( int i = 0; i < SIZE; ++i )
{
a[i] = (float) i;
b[i] = (float) i;
c[i] = 0.0;
}
cudaMemcpy( d_a, a, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_c, c, SIZE*sizeof(float), cudaMemcpyHostToDevice );
start = clock();
VectorAdd<<< 1, SIZE >>>(d_a, d_b, d_c, SIZE);
end = clock();
cudaMemcpy( c, d_c, SIZE*sizeof(float), cudaMemcpyDeviceToHost );
for( int i = 0; i < 10; ++i)
printf("c[%d] = %f\n", i, c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Time = %f seconds to execute.\n", cpu_time_used);
return 0;
} | code for sm_80
Function : _Z9VectorAddPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x001fda0003f06270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0060*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00b0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <time.h>
#define SIZE 1024
__global__ void VectorAdd(float *a, float *b, float *c, int n)
{
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
float *a, *b, *c;
float *d_a, *d_b, *d_c;
clock_t start, end;
double cpu_time_used;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c = (float *)malloc(SIZE*sizeof(float));
cudaMalloc( &d_a, SIZE*sizeof(float));
cudaMalloc( &d_b, SIZE*sizeof(float));
cudaMalloc( &d_c, SIZE*sizeof(float));
for( int i = 0; i < SIZE; ++i )
{
a[i] = (float) i;
b[i] = (float) i;
c[i] = 0.0;
}
cudaMemcpy( d_a, a, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_c, c, SIZE*sizeof(float), cudaMemcpyHostToDevice );
start = clock();
VectorAdd<<< 1, SIZE >>>(d_a, d_b, d_c, SIZE);
end = clock();
cudaMemcpy( c, d_c, SIZE*sizeof(float), cudaMemcpyDeviceToHost );
for( int i = 0; i < 10; ++i)
printf("c[%d] = %f\n", i, c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Time = %f seconds to execute.\n", cpu_time_used);
return 0;
} | .file "tmpxft_0014ecd4_00000000-6_first_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i
.type _Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i, @function
_Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9VectorAddPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i, .-_Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i
.globl _Z9VectorAddPfS_S_i
.type _Z9VectorAddPfS_S_i, @function
_Z9VectorAddPfS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9VectorAddPfS_S_i, .-_Z9VectorAddPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "c[%d] = %f\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "Time = %f seconds to execute.\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $4096, %edi
call malloc@PLT
movq %rax, %r12
movl $4096, %edi
call malloc@PLT
movq %rax, %rbp
movl $4096, %edi
call malloc@PLT
movq %rax, %rbx
leaq 24(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
movl $0, %eax
.L12:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%r12,%rax,4)
movss %xmm0, 0(%rbp,%rax,4)
movl $0x00000000, (%rbx,%rax,4)
addq $1, %rax
cmpq $1024, %rax
jne .L12
movl $1, %ecx
movl $4096, %edx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4096, %edx
movq %rbp, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4096, %edx
movq %rbx, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
call clock@PLT
movq %rax, %r15
movl $1024, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
call clock@PLT
movq %rax, 8(%rsp)
movl $2, %ecx
movl $4096, %edx
movq 40(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl $0, %r13d
leaq .LC1(%rip), %r14
.L14:
pxor %xmm0, %xmm0
cvtss2sd (%rbx,%r13,4), %xmm0
movl %r13d, %edx
movq %r14, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $1, %r13
cmpq $10, %r13
jne .L14
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rax
subq %r15, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC2(%rip), %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movl $1024, %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z9VectorAddPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z9VectorAddPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <time.h>
#define SIZE 1024
__global__ void VectorAdd(float *a, float *b, float *c, int n)
{
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
float *a, *b, *c;
float *d_a, *d_b, *d_c;
clock_t start, end;
double cpu_time_used;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c = (float *)malloc(SIZE*sizeof(float));
cudaMalloc( &d_a, SIZE*sizeof(float));
cudaMalloc( &d_b, SIZE*sizeof(float));
cudaMalloc( &d_c, SIZE*sizeof(float));
for( int i = 0; i < SIZE; ++i )
{
a[i] = (float) i;
b[i] = (float) i;
c[i] = 0.0;
}
cudaMemcpy( d_a, a, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_c, c, SIZE*sizeof(float), cudaMemcpyHostToDevice );
start = clock();
VectorAdd<<< 1, SIZE >>>(d_a, d_b, d_c, SIZE);
end = clock();
cudaMemcpy( c, d_c, SIZE*sizeof(float), cudaMemcpyDeviceToHost );
for( int i = 0; i < 10; ++i)
printf("c[%d] = %f\n", i, c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Time = %f seconds to execute.\n", cpu_time_used);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#define SIZE 1024
__global__ void VectorAdd(float *a, float *b, float *c, int n)
{
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
float *a, *b, *c;
float *d_a, *d_b, *d_c;
clock_t start, end;
double cpu_time_used;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c = (float *)malloc(SIZE*sizeof(float));
hipMalloc( &d_a, SIZE*sizeof(float));
hipMalloc( &d_b, SIZE*sizeof(float));
hipMalloc( &d_c, SIZE*sizeof(float));
for( int i = 0; i < SIZE; ++i )
{
a[i] = (float) i;
b[i] = (float) i;
c[i] = 0.0;
}
hipMemcpy( d_a, a, SIZE*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_b, b, SIZE*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_c, c, SIZE*sizeof(float), hipMemcpyHostToDevice );
start = clock();
VectorAdd<<< 1, SIZE >>>(d_a, d_b, d_c, SIZE);
end = clock();
hipMemcpy( c, d_c, SIZE*sizeof(float), hipMemcpyDeviceToHost );
for( int i = 0; i < 10; ++i)
printf("c[%d] = %f\n", i, c[i]);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Time = %f seconds to execute.\n", cpu_time_used);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#define SIZE 1024
__global__ void VectorAdd(float *a, float *b, float *c, int n)
{
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
float *a, *b, *c;
float *d_a, *d_b, *d_c;
clock_t start, end;
double cpu_time_used;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c = (float *)malloc(SIZE*sizeof(float));
hipMalloc( &d_a, SIZE*sizeof(float));
hipMalloc( &d_b, SIZE*sizeof(float));
hipMalloc( &d_c, SIZE*sizeof(float));
for( int i = 0; i < SIZE; ++i )
{
a[i] = (float) i;
b[i] = (float) i;
c[i] = 0.0;
}
hipMemcpy( d_a, a, SIZE*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_b, b, SIZE*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_c, c, SIZE*sizeof(float), hipMemcpyHostToDevice );
start = clock();
VectorAdd<<< 1, SIZE >>>(d_a, d_b, d_c, SIZE);
end = clock();
hipMemcpy( c, d_c, SIZE*sizeof(float), hipMemcpyDeviceToHost );
for( int i = 0; i < 10; ++i)
printf("c[%d] = %f\n", i, c[i]);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Time = %f seconds to execute.\n", cpu_time_used);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9VectorAddPfS_S_i
.globl _Z9VectorAddPfS_S_i
.p2align 8
.type _Z9VectorAddPfS_S_i,@function
_Z9VectorAddPfS_S_i:
s_load_b32 s2, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9VectorAddPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9VectorAddPfS_S_i, .Lfunc_end0-_Z9VectorAddPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9VectorAddPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 10
.sgpr_spill_count: 0
.symbol: _Z9VectorAddPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#define SIZE 1024
__global__ void VectorAdd(float *a, float *b, float *c, int n)
{
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
float *a, *b, *c;
float *d_a, *d_b, *d_c;
clock_t start, end;
double cpu_time_used;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c = (float *)malloc(SIZE*sizeof(float));
hipMalloc( &d_a, SIZE*sizeof(float));
hipMalloc( &d_b, SIZE*sizeof(float));
hipMalloc( &d_c, SIZE*sizeof(float));
for( int i = 0; i < SIZE; ++i )
{
a[i] = (float) i;
b[i] = (float) i;
c[i] = 0.0;
}
hipMemcpy( d_a, a, SIZE*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_b, b, SIZE*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_c, c, SIZE*sizeof(float), hipMemcpyHostToDevice );
start = clock();
VectorAdd<<< 1, SIZE >>>(d_a, d_b, d_c, SIZE);
end = clock();
hipMemcpy( c, d_c, SIZE*sizeof(float), hipMemcpyDeviceToHost );
for( int i = 0; i < 10; ++i)
printf("c[%d] = %f\n", i, c[i]);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Time = %f seconds to execute.\n", cpu_time_used);
return 0;
} | .text
.file "first_cuda.hip"
.globl _Z24__device_stub__VectorAddPfS_S_i # -- Begin function _Z24__device_stub__VectorAddPfS_S_i
.p2align 4, 0x90
.type _Z24__device_stub__VectorAddPfS_S_i,@function
_Z24__device_stub__VectorAddPfS_S_i: # @_Z24__device_stub__VectorAddPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9VectorAddPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__VectorAddPfS_S_i, .Lfunc_end0-_Z24__device_stub__VectorAddPfS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %rbx
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r14
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r15
leaq 24(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
xorl %r12d, %r12d
movl $4096, %edx # imm = 0x1000
movq %r15, %rdi
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %r12d, %xmm0
movss %xmm0, (%rbx,%r12,4)
movss %xmm0, (%r14,%r12,4)
incq %r12
cmpq $1024, %r12 # imm = 0x400
jne .LBB1_1
# %bb.2:
movq 24(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
callq clock
movq %rax, %r12
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 1023(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $1024, 36(%rsp) # imm = 0x400
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z9VectorAddPfS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq clock
movq %rax, %r13
movq 8(%rsp), %rsi
movl $4096, %edx # imm = 0x1000
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movss (%r15,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movl %ebp, %esi
movb $1, %al
callq printf
incq %rbp
cmpq $10, %rbp
jne .LBB1_5
# %bb.6:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
subq %r12, %r13
xorps %xmm0, %xmm0
cvtsi2sd %r13, %xmm0
divsd .LCPI1_0(%rip), %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9VectorAddPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9VectorAddPfS_S_i,@object # @_Z9VectorAddPfS_S_i
.section .rodata,"a",@progbits
.globl _Z9VectorAddPfS_S_i
.p2align 3, 0x0
_Z9VectorAddPfS_S_i:
.quad _Z24__device_stub__VectorAddPfS_S_i
.size _Z9VectorAddPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "c[%d] = %f\n"
.size .L.str, 12
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Time = %f seconds to execute.\n"
.size .L.str.1, 31
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9VectorAddPfS_S_i"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__VectorAddPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9VectorAddPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9VectorAddPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x001fda0003f06270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0060*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00b0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9VectorAddPfS_S_i
.globl _Z9VectorAddPfS_S_i
.p2align 8
.type _Z9VectorAddPfS_S_i,@function
_Z9VectorAddPfS_S_i:
s_load_b32 s2, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9VectorAddPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9VectorAddPfS_S_i, .Lfunc_end0-_Z9VectorAddPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9VectorAddPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 10
.sgpr_spill_count: 0
.symbol: _Z9VectorAddPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0014ecd4_00000000-6_first_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i
.type _Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i, @function
_Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9VectorAddPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i, .-_Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i
.globl _Z9VectorAddPfS_S_i
.type _Z9VectorAddPfS_S_i, @function
_Z9VectorAddPfS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9VectorAddPfS_S_i, .-_Z9VectorAddPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "c[%d] = %f\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "Time = %f seconds to execute.\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $4096, %edi
call malloc@PLT
movq %rax, %r12
movl $4096, %edi
call malloc@PLT
movq %rax, %rbp
movl $4096, %edi
call malloc@PLT
movq %rax, %rbx
leaq 24(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
movl $0, %eax
.L12:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%r12,%rax,4)
movss %xmm0, 0(%rbp,%rax,4)
movl $0x00000000, (%rbx,%rax,4)
addq $1, %rax
cmpq $1024, %rax
jne .L12
movl $1, %ecx
movl $4096, %edx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4096, %edx
movq %rbp, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4096, %edx
movq %rbx, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
call clock@PLT
movq %rax, %r15
movl $1024, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
call clock@PLT
movq %rax, 8(%rsp)
movl $2, %ecx
movl $4096, %edx
movq 40(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl $0, %r13d
leaq .LC1(%rip), %r14
.L14:
pxor %xmm0, %xmm0
cvtss2sd (%rbx,%r13,4), %xmm0
movl %r13d, %edx
movq %r14, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $1, %r13
cmpq $10, %r13
jne .L14
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rax
subq %r15, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC2(%rip), %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movl $1024, %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z33__device_stub__Z9VectorAddPfS_S_iPfS_S_i
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z9VectorAddPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z9VectorAddPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "first_cuda.hip"
.globl _Z24__device_stub__VectorAddPfS_S_i # -- Begin function _Z24__device_stub__VectorAddPfS_S_i
.p2align 4, 0x90
.type _Z24__device_stub__VectorAddPfS_S_i,@function
_Z24__device_stub__VectorAddPfS_S_i: # @_Z24__device_stub__VectorAddPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9VectorAddPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__VectorAddPfS_S_i, .Lfunc_end0-_Z24__device_stub__VectorAddPfS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %rbx
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r14
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r15
leaq 24(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
xorl %r12d, %r12d
movl $4096, %edx # imm = 0x1000
movq %r15, %rdi
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %r12d, %xmm0
movss %xmm0, (%rbx,%r12,4)
movss %xmm0, (%r14,%r12,4)
incq %r12
cmpq $1024, %r12 # imm = 0x400
jne .LBB1_1
# %bb.2:
movq 24(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
callq clock
movq %rax, %r12
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 1023(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $1024, 36(%rsp) # imm = 0x400
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z9VectorAddPfS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq clock
movq %rax, %r13
movq 8(%rsp), %rsi
movl $4096, %edx # imm = 0x1000
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movss (%r15,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movl %ebp, %esi
movb $1, %al
callq printf
incq %rbp
cmpq $10, %rbp
jne .LBB1_5
# %bb.6:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
subq %r12, %r13
xorps %xmm0, %xmm0
cvtsi2sd %r13, %xmm0
divsd .LCPI1_0(%rip), %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9VectorAddPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9VectorAddPfS_S_i,@object # @_Z9VectorAddPfS_S_i
.section .rodata,"a",@progbits
.globl _Z9VectorAddPfS_S_i
.p2align 3, 0x0
_Z9VectorAddPfS_S_i:
.quad _Z24__device_stub__VectorAddPfS_S_i
.size _Z9VectorAddPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "c[%d] = %f\n"
.size .L.str, 12
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Time = %f seconds to execute.\n"
.size .L.str.1, 31
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9VectorAddPfS_S_i"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__VectorAddPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9VectorAddPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /* matrix transpose program */
#include <stdio.h>
const int P = 32;
/* naive CPU */
void naiveCPU(float *src, float *dst, int M, int N)
{
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
dst[i*N+j] = src[j*N+i];
}
}
}
/* naive GPU */
__global__ void matrixTranspose(float *_a, float *_b, int cols,int rows)
{
int i = blockIdx.y * blockDim.y + threadIdx.y; // row
int j = blockIdx.x * blockDim.x + threadIdx.x; // col
int index_in = i * cols + j;
int index_out = j * rows + i;
_b[index_out] = _a[index_in];
}
/* shared memory GPU */
__global__ void matrixTransposeShared(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
/* shared memory without bank conflict */
__global__ void matrixTransposeUnrolled(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int x = blockIdx.x * P + threadIdx.x;
int y = blockIdx.y * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < rows && y+k < cols)
mat[threadIdx.y+k][threadIdx.x] = _a[(y+k)*rows + x];
}
__syncthreads();
x = blockIdx.y * P + threadIdx.x;
y = blockIdx.x * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < cols && y+k < rows)
_b[(y+k)*cols + x] = mat[threadIdx.x][threadIdx.y+k];
}
}
/* loop unrolled */
__global__ void matrixTransposeSharedwBC(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
int main(int argc, char **argv)
{
/* N*M matrix, parallelism is P */
const int N = 1024;
const int M = 1024;
const int matSize = N * M * sizeof(float);
dim3 gridDim(N/P, M/P, 1);
dim3 blockDim(P , P, 1);
/* configuration of GPU */
printf("===================\n");
printf("Matrix: %d * %d\n", N, M);
printf("Grid: %d * %d * %d\n", gridDim.x, gridDim.y, gridDim.z);
printf("Block: %d * %d * %d\n", blockDim.x, blockDim.y, blockDim.z);
printf("===================\n");
/* allocate memory for matrix in host */
float *h_matrix = (float *) malloc(matSize);
float *h_transpose = (float *) malloc(matSize);
/* allocate memory for matrix in device */
float *d_matrix, *d_transpose;
cudaMalloc(&d_matrix, matSize);
cudaMalloc(&d_transpose, matSize);
/* randomly generate a matrix in host */
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
h_matrix[i*N+j] = (float)rand() / (float)(RAND_MAX) * 100.0;
}
}
/* utility for recording start and finish time */
cudaEvent_t tStart, tEnd;
float duration;
cudaEventCreate(&tStart);
cudaEventCreate(&tEnd);
const int nIterations = 100;
/* 1. naive CPU transpose */
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
naiveCPU(h_matrix, h_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd); // waits for record to terminate
cudaEventElapsedTime(&duration, tStart, tEnd);
printf("\nNaive CPU: %f\n", duration / nIterations);
/* 2. naive GPU transpose */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTranspose<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nNaive GPU: %f\n", duration / nIterations);
/* 3. shared memory GPU transpose */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeShared<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nShared GPU: %f\n", duration / nIterations);
/* 4. shared memory GPU transpose without bank conflict */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeSharedwBC<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nSharedwBC GPU: %f\n", duration / nIterations);
duration = 0;
/* 5. unrolled GPU transpose */
dim3 blockDimUnroll(P, 8, 1);
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeUnrolled<<<gridDim,blockDimUnroll>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nUnrolled GPU: %f\n", duration / nIterations);
return 0;
} | code for sm_80
Function : _Z24matrixTransposeSharedwBCPfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */
/* 0x000e220000002600 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R6, SR_TID.Y ; /* 0x0000000000067919 */
/* 0x000e280000002200 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e680000002100 */
/*0050*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e620000002500 */
/*0060*/ IMAD R0, R5, c[0x0][0x4], R6 ; /* 0x0000010005007a24 */
/* 0x001fca00078e0206 */
/*0070*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fe20003f06270 */
/*0080*/ IMAD R3, R4, c[0x0][0x0], R7 ; /* 0x0000000004037a24 */
/* 0x002fca00078e0207 */
/*0090*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x170], P0 ; /* 0x00005c0003007a0c */
/* 0x000fda0000706670 */
/*00a0*/ @!P0 MOV R2, 0x4 ; /* 0x0000000400028802 */
/* 0x000fe20000000f00 */
/*00b0*/ @!P0 IMAD R3, R0, c[0x0][0x170], R3 ; /* 0x00005c0000038a24 */
/* 0x000fc800078e0203 */
/*00c0*/ @!P0 IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003028625 */
/* 0x000fcc00078e0202 */
/*00d0*/ @!P0 LDG.E R2, [R2.64] ; /* 0x0000000402028981 */
/* 0x000ea2000c1e1900 */
/*00e0*/ @!P0 IMAD R9, R7, 0x21, R6.reuse ; /* 0x0000002107098824 */
/* 0x100fe400078e0206 */
/*00f0*/ IMAD R0, R4, c[0x0][0x0], R6 ; /* 0x0000000004007a24 */
/* 0x000fe400078e0206 */
/*0100*/ IMAD R5, R5, c[0x0][0x4], R7 ; /* 0x0000010005057a24 */
/* 0x000fc600078e0207 */
/*0110*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fc80003f26270 */
/*0120*/ ISETP.GE.OR P1, PT, R5, c[0x0][0x170], P1 ; /* 0x00005c0005007a0c */
/* 0x000fe20000f26670 */
/*0130*/ @!P0 STS [R9.X4], R2 ; /* 0x0000000209008388 */
/* 0x0041e80000004800 */
/*0140*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*0150*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0160*/ IMAD R7, R6, 0x21, R7 ; /* 0x0000002106077824 */
/* 0x001fe200078e0207 */
/*0170*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0180*/ IMAD R2, R0, c[0x0][0x174], R5 ; /* 0x00005d0000027a24 */
/* 0x000fc800078e0205 */
/*0190*/ LDS R7, [R7.X4] ; /* 0x0000000007077984 */
/* 0x000e2a0000004800 */
/*01a0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0203 */
/*01b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x001fe2000c101904 */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z23matrixTransposeUnrolledPfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0040*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002200 */
/*0050*/ S2R R12, SR_CTAID.Y ; /* 0x00000000000c7919 */
/* 0x000e620000002600 */
/*0060*/ IMAD R6, R2, 0x20, R3 ; /* 0x0000002002067824 */
/* 0x001fca00078e0203 */
/*0070*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x174], PT ; /* 0x00005d0006007a0c */
/* 0x000fe40003f06270 */
/*0080*/ LEA R5, R12, R0, 0x5 ; /* 0x000000000c057211 */
/* 0x002fc800078e28ff */
/*0090*/ IADD3 R7, R5.reuse, 0x8, RZ ; /* 0x0000000805077810 */
/* 0x040fe40007ffe0ff */
/*00a0*/ IADD3 R9, R5.reuse, 0x10, RZ ; /* 0x0000001005097810 */
/* 0x040fe40007ffe0ff */
/*00b0*/ IADD3 R11, R5, 0x18, RZ ; /* 0x00000018050b7810 */
/* 0x000fe40007ffe0ff */
/*00c0*/ ISETP.GE.OR P2, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000746670 */
/*00d0*/ ISETP.GE.OR P4, PT, R5, c[0x0][0x170], P0 ; /* 0x00005c0005007a0c */
/* 0x000fe40000786670 */
/*00e0*/ ISETP.GE.OR P3, PT, R9, c[0x0][0x170], P0 ; /* 0x00005c0009007a0c */
/* 0x000fc40000766670 */
/*00f0*/ ISETP.GE.OR P0, PT, R11, c[0x0][0x170], P0 ; /* 0x00005c000b007a0c */
/* 0x000fce0000706670 */
/*0100*/ @!P2 MOV R8, 0x4 ; /* 0x000000040008a802 */
/* 0x000fe20000000f00 */
/*0110*/ @!P2 IMAD R7, R7, c[0x0][0x174], R6.reuse ; /* 0x00005d000707aa24 */
/* 0x100fe400078e0206 */
/*0120*/ @!P4 IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff04c424 */
/* 0x000fe400078e00ff */
/*0130*/ @!P3 IMAD R9, R9, c[0x0][0x174], R6.reuse ; /* 0x00005d000909ba24 */
/* 0x100fe200078e0206 */
/*0140*/ @!P0 MOV R14, 0x4 ; /* 0x00000004000e8802 */
/* 0x000fe20000000f00 */
/*0150*/ @!P3 IMAD.MOV.U32 R10, RZ, RZ, 0x4 ; /* 0x00000004ff0ab424 */
/* 0x000fe400078e00ff */
/*0160*/ @!P4 IMAD R5, R5, c[0x0][0x174], R6.reuse ; /* 0x00005d000505ca24 */
/* 0x100fe400078e0206 */
/*0170*/ @!P0 IMAD R11, R11, c[0x0][0x174], R6 ; /* 0x00005d000b0b8a24 */
/* 0x000fc400078e0206 */
/*0180*/ @!P2 IMAD.WIDE R6, R7, R8, c[0x0][0x160] ; /* 0x000058000706a625 */
/* 0x000fc800078e0208 */
/*0190*/ @!P3 IMAD.WIDE R8, R9, R10, c[0x0][0x160] ; /* 0x000058000908b625 */
/* 0x000fe400078e020a */
/*01a0*/ @!P2 LDG.E R6, [R6.64] ; /* 0x000000040606a981 */
/* 0x0000a4000c1e1900 */
/*01b0*/ @!P4 IMAD.WIDE R4, R5, R4, c[0x0][0x160] ; /* 0x000058000504c625 */
/* 0x000fe400078e0204 */
/*01c0*/ @!P3 LDG.E R8, [R8.64] ; /* 0x000000040808b981 */
/* 0x000ee4000c1e1900 */
/*01d0*/ @!P0 IMAD.WIDE R10, R11, R14, c[0x0][0x160] ; /* 0x000058000b0a8625 */
/* 0x000fe400078e020e */
/*01e0*/ @!P4 LDG.E R5, [R4.64] ; /* 0x000000040405c981 */
/* 0x000328000c1e1900 */
/*01f0*/ @!P0 LDG.E R10, [R10.64] ; /* 0x000000040a0a8981 */
/* 0x000f62000c1e1900 */
/*0200*/ IMAD R23, R0, 0x21, R3.reuse ; /* 0x0000002100177824 */
/* 0x100fe200078e0203 */
/*0210*/ LEA R13, R2, R0, 0x5 ; /* 0x00000000020d7211 */
/* 0x000fe200078e28ff */
/*0220*/ IMAD R4, R12, 0x20, R3 ; /* 0x000000200c047824 */
/* 0x002fc600078e0203 */
/*0230*/ IADD3 R15, R13.reuse, 0x8, RZ ; /* 0x000000080d0f7810 */
/* 0x040fe40007ffe0ff */
/*0240*/ IADD3 R19, R13, 0x10, RZ ; /* 0x000000100d137810 */
/* 0x000fe40007ffe0ff */
/*0250*/ ISETP.GE.AND P1, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fc80003f26270 */
/*0260*/ ISETP.GE.OR P6, PT, R13, c[0x0][0x174], P1 ; /* 0x00005d000d007a0c */
/* 0x000fe40000fc6670 */
/*0270*/ ISETP.GE.OR P5, PT, R15, c[0x0][0x174], P1 ; /* 0x00005d000f007a0c */
/* 0x000fe20000fa6670 */
/*0280*/ IMAD R0, R3, 0x21, R0 ; /* 0x0000002103007824 */
/* 0x000fe200078e0200 */
/*0290*/ IADD3 R21, R13, 0x18, RZ ; /* 0x000000180d157810 */
/* 0x000fd20007ffe0ff */
/*02a0*/ @!P6 MOV R3, 0x4 ; /* 0x000000040003e802 */
/* 0x000fe20000000f00 */
/*02b0*/ @!P6 IMAD R2, R13, c[0x0][0x170], R4.reuse ; /* 0x00005c000d02ea24 */
/* 0x100fe400078e0204 */
/*02c0*/ @!P5 IMAD R7, R15, c[0x0][0x170], R4 ; /* 0x00005c000f07da24 */
/* 0x001fe400078e0204 */
/*02d0*/ @!P6 IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a000202e625 */
/* 0x000fe200078e0203 */
/*02e0*/ @!P2 STS [R23.X4+0x420], R6 ; /* 0x000420061700a388 */
/* 0x0041e80000004800 */
/*02f0*/ @!P3 STS [R23.X4+0x840], R8 ; /* 0x000840081700b388 */
/* 0x0083e80000004800 */
/*0300*/ @!P4 STS [R23.X4], R5 ; /* 0x000000051700c388 */
/* 0x010fe80000004800 */
/*0310*/ @!P0 STS [R23.X4+0xc60], R10 ; /* 0x000c600a17008388 */
/* 0x020fe80000004800 */
/*0320*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0330*/ ISETP.GE.OR P4, PT, R19, c[0x0][0x174], P1 ; /* 0x00005d0013007a0c */
/* 0x000fca0000f86670 */
/*0340*/ @!P6 LDS R11, [R0.X4] ; /* 0x00000000000be984 */
/* 0x000ea80000004800 */
/*0350*/ @!P5 LDS R17, [R0.X4+0x20] ; /* 0x000020000011d984 */
/* 0x000ee80000004800 */
/*0360*/ @!P4 LDS R5, [R0.X4+0x40] ; /* 0x000040000005c984 */
/* 0x000f220000004800 */
/*0370*/ ISETP.GE.OR P1, PT, R21, c[0x0][0x174], P1 ; /* 0x00005d0015007a0c */
/* 0x000fe20000f26670 */
/*0380*/ @!P5 IMAD.MOV.U32 R6, RZ, RZ, 0x4 ; /* 0x00000004ff06d424 */
/* 0x001fe200078e00ff */
/*0390*/ @!P4 MOV R9, 0x4 ; /* 0x000000040009c802 */
/* 0x000fe20000000f00 */
/*03a0*/ @!P4 IMAD R8, R19, c[0x0][0x170], R4 ; /* 0x00005c001308ca24 */
/* 0x002fc400078e0204 */
/*03b0*/ @!P5 IMAD.WIDE R6, R7, R6, c[0x0][0x168] ; /* 0x00005a000706d625 */
/* 0x000fc800078e0206 */
/*03c0*/ @!P4 IMAD.WIDE R8, R8, R9, c[0x0][0x168] ; /* 0x00005a000808c625 */
/* 0x000fe200078e0209 */
/*03d0*/ @!P6 STG.E [R2.64], R11 ; /* 0x0000000b0200e986 */
/* 0x0041e8000c101904 */
/*03e0*/ @!P5 STG.E [R6.64], R17 ; /* 0x000000110600d986 */
/* 0x0081e8000c101904 */
/*03f0*/ @!P4 STG.E [R8.64], R5 ; /* 0x000000050800c986 */
/* 0x0101e2000c101904 */
/*0400*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0410*/ LDS R5, [R0.X4+0x60] ; /* 0x0000600000057984 */
/* 0x001e220000004800 */
/*0420*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0430*/ IMAD R2, R21, c[0x0][0x170], R4 ; /* 0x00005c0015027a24 */
/* 0x000fd200078e0204 */
/*0440*/ IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0203 */
/*0450*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0460*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0470*/ BRA 0x470; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0480*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0490*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z21matrixTransposeSharedPfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */
/* 0x000e220000002600 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R6, SR_TID.Y ; /* 0x0000000000067919 */
/* 0x000e280000002200 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e680000002100 */
/*0050*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e620000002500 */
/*0060*/ IMAD R0, R5, c[0x0][0x4], R6 ; /* 0x0000010005007a24 */
/* 0x001fca00078e0206 */
/*0070*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fe20003f06270 */
/*0080*/ IMAD R3, R4, c[0x0][0x0], R7 ; /* 0x0000000004037a24 */
/* 0x002fca00078e0207 */
/*0090*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x170], P0 ; /* 0x00005c0003007a0c */
/* 0x000fda0000706670 */
/*00a0*/ @!P0 MOV R2, 0x4 ; /* 0x0000000400028802 */
/* 0x000fe20000000f00 */
/*00b0*/ @!P0 IMAD R3, R0, c[0x0][0x170], R3 ; /* 0x00005c0000038a24 */
/* 0x000fc800078e0203 */
/*00c0*/ @!P0 IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003028625 */
/* 0x000fcc00078e0202 */
/*00d0*/ @!P0 LDG.E R2, [R2.64] ; /* 0x0000000402028981 */
/* 0x000ea2000c1e1900 */
/*00e0*/ @!P0 LEA R9, R7, R6, 0x5 ; /* 0x0000000607098211 */
/* 0x000fe200078e28ff */
/*00f0*/ IMAD R0, R4, c[0x0][0x0], R6 ; /* 0x0000000004007a24 */
/* 0x000fe400078e0206 */
/*0100*/ IMAD R5, R5, c[0x0][0x4], R7 ; /* 0x0000010005057a24 */
/* 0x000fc600078e0207 */
/*0110*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fc80003f26270 */
/*0120*/ ISETP.GE.OR P1, PT, R5, c[0x0][0x170], P1 ; /* 0x00005c0005007a0c */
/* 0x000fe20000f26670 */
/*0130*/ @!P0 STS [R9.X4], R2 ; /* 0x0000000209008388 */
/* 0x0041e80000004800 */
/*0140*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*0150*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0160*/ LEA R7, R6, R7, 0x5 ; /* 0x0000000706077211 */
/* 0x001fe200078e28ff */
/*0170*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0180*/ IMAD R2, R0, c[0x0][0x174], R5 ; /* 0x00005d0000027a24 */
/* 0x000fc800078e0205 */
/*0190*/ LDS R7, [R7.X4] ; /* 0x0000000007077984 */
/* 0x000e2a0000004800 */
/*01a0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0203 */
/*01b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x001fe2000c101904 */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z15matrixTransposePfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002200 */
/*0050*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e680000002500 */
/*0060*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e620000002100 */
/*0070*/ IMAD R0, R0, c[0x0][0x4], R3 ; /* 0x0000010000007a24 */
/* 0x001fc400078e0203 */
/*0080*/ IMAD R5, R5, c[0x0][0x0], R2 ; /* 0x0000000005057a24 */
/* 0x002fc800078e0202 */
/*0090*/ IMAD R2, R0, c[0x0][0x170], R5 ; /* 0x00005c0000027a24 */
/* 0x000fc800078e0205 */
/*00a0*/ IMAD.WIDE R2, R2, R4, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0204 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD R5, R5, c[0x0][0x174], R0 ; /* 0x00005d0005057a24 */
/* 0x000fc800078e0200 */
/*00d0*/ IMAD.WIDE R4, R5, R4, c[0x0][0x168] ; /* 0x00005a0005047625 */
/* 0x000fca00078e0204 */
/*00e0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* matrix transpose program */
#include <stdio.h>
const int P = 32;
/* naive CPU */
void naiveCPU(float *src, float *dst, int M, int N)
{
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
dst[i*N+j] = src[j*N+i];
}
}
}
/* naive GPU */
__global__ void matrixTranspose(float *_a, float *_b, int cols,int rows)
{
int i = blockIdx.y * blockDim.y + threadIdx.y; // row
int j = blockIdx.x * blockDim.x + threadIdx.x; // col
int index_in = i * cols + j;
int index_out = j * rows + i;
_b[index_out] = _a[index_in];
}
/* shared memory GPU */
__global__ void matrixTransposeShared(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
/* shared memory without bank conflict */
__global__ void matrixTransposeUnrolled(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int x = blockIdx.x * P + threadIdx.x;
int y = blockIdx.y * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < rows && y+k < cols)
mat[threadIdx.y+k][threadIdx.x] = _a[(y+k)*rows + x];
}
__syncthreads();
x = blockIdx.y * P + threadIdx.x;
y = blockIdx.x * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < cols && y+k < rows)
_b[(y+k)*cols + x] = mat[threadIdx.x][threadIdx.y+k];
}
}
/* loop unrolled */
__global__ void matrixTransposeSharedwBC(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
int main(int argc, char **argv)
{
/* N*M matrix, parallelism is P */
const int N = 1024;
const int M = 1024;
const int matSize = N * M * sizeof(float);
dim3 gridDim(N/P, M/P, 1);
dim3 blockDim(P , P, 1);
/* configuration of GPU */
printf("===================\n");
printf("Matrix: %d * %d\n", N, M);
printf("Grid: %d * %d * %d\n", gridDim.x, gridDim.y, gridDim.z);
printf("Block: %d * %d * %d\n", blockDim.x, blockDim.y, blockDim.z);
printf("===================\n");
/* allocate memory for matrix in host */
float *h_matrix = (float *) malloc(matSize);
float *h_transpose = (float *) malloc(matSize);
/* allocate memory for matrix in device */
float *d_matrix, *d_transpose;
cudaMalloc(&d_matrix, matSize);
cudaMalloc(&d_transpose, matSize);
/* randomly generate a matrix in host */
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
h_matrix[i*N+j] = (float)rand() / (float)(RAND_MAX) * 100.0;
}
}
/* utility for recording start and finish time */
cudaEvent_t tStart, tEnd;
float duration;
cudaEventCreate(&tStart);
cudaEventCreate(&tEnd);
const int nIterations = 100;
/* 1. naive CPU transpose */
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
naiveCPU(h_matrix, h_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd); // waits for record to terminate
cudaEventElapsedTime(&duration, tStart, tEnd);
printf("\nNaive CPU: %f\n", duration / nIterations);
/* 2. naive GPU transpose */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTranspose<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nNaive GPU: %f\n", duration / nIterations);
/* 3. shared memory GPU transpose */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeShared<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nShared GPU: %f\n", duration / nIterations);
/* 4. shared memory GPU transpose without bank conflict */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeSharedwBC<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nSharedwBC GPU: %f\n", duration / nIterations);
duration = 0;
/* 5. unrolled GPU transpose */
dim3 blockDimUnroll(P, 8, 1);
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeUnrolled<<<gridDim,blockDimUnroll>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nUnrolled GPU: %f\n", duration / nIterations);
return 0;
} | .file "tmpxft_000254f5_00000000-6_cudabench.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8naiveCPUPfS_ii
.type _Z8naiveCPUPfS_ii, @function
_Z8naiveCPUPfS_ii:
.LFB2057:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L11
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
movq %rsi, %r11
movl %edx, %ebx
movslq %ecx, %r8
salq $2, %r8
movl $0, %r10d
movl $0, %r9d
movslq %edx, %rbp
jmp .L5
.L7:
movslq %r10d, %rdx
leaq (%r11,%rdx,4), %rax
addq %rbp, %rdx
leaq (%r11,%rdx,4), %rsi
movq %rdi, %rdx
.L6:
movss (%rdx), %xmm0
movss %xmm0, (%rax)
addq %r8, %rdx
addq $4, %rax
cmpq %rsi, %rax
jne .L6
.L8:
addl $1, %r9d
addq $4, %rdi
addl %ecx, %r10d
cmpl %r9d, %ecx
je .L3
.L5:
testl %ebx, %ebx
jg .L7
jmp .L8
.L3:
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2057:
.size _Z8naiveCPUPfS_ii, .-_Z8naiveCPUPfS_ii
.globl _Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii
.type _Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii, @function
_Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L18
.L14:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z15matrixTransposePfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L14
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii, .-_Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii
.globl _Z15matrixTransposePfS_ii
.type _Z15matrixTransposePfS_ii, @function
_Z15matrixTransposePfS_ii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z15matrixTransposePfS_ii, .-_Z15matrixTransposePfS_ii
.globl _Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii
.type _Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii, @function
_Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L26
.L22:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L27
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z21matrixTransposeSharedPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L22
.L27:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii, .-_Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii
.globl _Z21matrixTransposeSharedPfS_ii
.type _Z21matrixTransposeSharedPfS_ii, @function
_Z21matrixTransposeSharedPfS_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z21matrixTransposeSharedPfS_ii, .-_Z21matrixTransposeSharedPfS_ii
.globl _Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii
.type _Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii, @function
_Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L34
.L30:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L35
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L34:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23matrixTransposeUnrolledPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L30
.L35:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii, .-_Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii
.globl _Z23matrixTransposeUnrolledPfS_ii
.type _Z23matrixTransposeUnrolledPfS_ii, @function
_Z23matrixTransposeUnrolledPfS_ii:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z23matrixTransposeUnrolledPfS_ii, .-_Z23matrixTransposeUnrolledPfS_ii
.globl _Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii
.type _Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii, @function
_Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii:
.LFB2089:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L42
.L38:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L43
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L42:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z24matrixTransposeSharedwBCPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L38
.L43:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii, .-_Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii
.globl _Z24matrixTransposeSharedwBCPfS_ii
.type _Z24matrixTransposeSharedwBCPfS_ii, @function
_Z24matrixTransposeSharedwBCPfS_ii:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _Z24matrixTransposeSharedwBCPfS_ii, .-_Z24matrixTransposeSharedwBCPfS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "===================\n"
.LC1:
.string "Matrix: %d * %d\n"
.LC2:
.string "Grid: %d * %d * %d\n"
.LC3:
.string "Block: %d * %d * %d\n"
.LC6:
.string "\nNaive CPU: %f\n"
.LC7:
.string "\nNaive GPU: %f\n"
.LC8:
.string "\nShared GPU: %f\n"
.LC9:
.string "\nSharedwBC GPU: %f\n"
.LC11:
.string "\nUnrolled GPU: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $96, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rbx
movq %rbx, %rsi
movl $2, %edi
call __printf_chk@PLT
movl $1024, %ecx
movl $1024, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %r8d
movl $32, %ecx
movl $32, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %r8d
movl $32, %ecx
movl $32, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $4194304, %edi
call malloc@PLT
movq %rax, %r12
movl $4194304, %edi
call malloc@PLT
movq %rax, %r13
leaq 16(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 4096(%r12), %rbp
leaq 4198400(%r12), %r14
.L47:
leaq -4096(%rbp), %rbx
.L48:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC4(%rip), %xmm0
mulss .LC5(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L48
addq $4096, %rbp
cmpq %r14, %rbp
jne .L47
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $100, %ebx
.L50:
movl $1024, %ecx
movl $1024, %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z8naiveCPUPfS_ii
subl $1, %ebx
jne .L50
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movss 12(%rsp), %xmm0
divss .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $4194304, %edx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $4194304, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $100, %ebx
jmp .L52
.L51:
subl $1, %ebx
je .L67
.L52:
movl $32, 52(%rsp)
movl $32, 56(%rsp)
movl $32, 64(%rsp)
movl $32, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movl $1, %ecx
movq 52(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L51
movl $1024, %ecx
movl $1024, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii
jmp .L51
.L67:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movss 12(%rsp), %xmm0
divss .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $4194304, %edx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $4194304, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $100, %ebx
jmp .L54
.L53:
subl $1, %ebx
je .L68
.L54:
movl $32, 52(%rsp)
movl $32, 56(%rsp)
movl $32, 64(%rsp)
movl $32, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movl $1, %ecx
movq 52(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L53
movl $1024, %ecx
movl $1024, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii
jmp .L53
.L68:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movss 12(%rsp), %xmm0
divss .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $4194304, %edx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $4194304, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $100, %ebx
jmp .L56
.L55:
subl $1, %ebx
je .L69
.L56:
movl $32, 52(%rsp)
movl $32, 56(%rsp)
movl $32, 64(%rsp)
movl $32, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movl $1, %ecx
movq 52(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L55
movl $1024, %ecx
movl $1024, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii
jmp .L55
.L69:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movss 12(%rsp), %xmm0
divss .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $0x00000000, 12(%rsp)
movl $32, 76(%rsp)
movl $8, 80(%rsp)
movl $1, 84(%rsp)
movl $1, %ecx
movl $4194304, %edx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $4194304, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $100, %ebx
jmp .L58
.L57:
subl $1, %ebx
je .L70
.L58:
movl $32, 52(%rsp)
movl $32, 56(%rsp)
movl 84(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movq 52(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L57
movl $1024, %ecx
movl $1024, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii
jmp .L57
.L70:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movss 12(%rsp), %xmm0
divss .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L71
movl $0, %eax
addq $96, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L71:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC12:
.string "_Z24matrixTransposeSharedwBCPfS_ii"
.align 8
.LC13:
.string "_Z23matrixTransposeUnrolledPfS_ii"
.align 8
.LC14:
.string "_Z21matrixTransposeSharedPfS_ii"
.section .rodata.str1.1
.LC15:
.string "_Z15matrixTransposePfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2092:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z24matrixTransposeSharedwBCPfS_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z23matrixTransposeUnrolledPfS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z21matrixTransposeSharedPfS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z15matrixTransposePfS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC4:
.long 805306368
.align 4
.LC5:
.long 1120403456
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* matrix transpose program */
#include <stdio.h>
const int P = 32;
/* naive CPU */
void naiveCPU(float *src, float *dst, int M, int N)
{
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
dst[i*N+j] = src[j*N+i];
}
}
}
/* naive GPU */
__global__ void matrixTranspose(float *_a, float *_b, int cols,int rows)
{
int i = blockIdx.y * blockDim.y + threadIdx.y; // row
int j = blockIdx.x * blockDim.x + threadIdx.x; // col
int index_in = i * cols + j;
int index_out = j * rows + i;
_b[index_out] = _a[index_in];
}
/* shared memory GPU */
__global__ void matrixTransposeShared(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
/* shared memory without bank conflict */
__global__ void matrixTransposeUnrolled(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int x = blockIdx.x * P + threadIdx.x;
int y = blockIdx.y * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < rows && y+k < cols)
mat[threadIdx.y+k][threadIdx.x] = _a[(y+k)*rows + x];
}
__syncthreads();
x = blockIdx.y * P + threadIdx.x;
y = blockIdx.x * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < cols && y+k < rows)
_b[(y+k)*cols + x] = mat[threadIdx.x][threadIdx.y+k];
}
}
/* loop unrolled */
__global__ void matrixTransposeSharedwBC(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
int main(int argc, char **argv)
{
/* N*M matrix, parallelism is P */
const int N = 1024;
const int M = 1024;
const int matSize = N * M * sizeof(float);
dim3 gridDim(N/P, M/P, 1);
dim3 blockDim(P , P, 1);
/* configuration of GPU */
printf("===================\n");
printf("Matrix: %d * %d\n", N, M);
printf("Grid: %d * %d * %d\n", gridDim.x, gridDim.y, gridDim.z);
printf("Block: %d * %d * %d\n", blockDim.x, blockDim.y, blockDim.z);
printf("===================\n");
/* allocate memory for matrix in host */
float *h_matrix = (float *) malloc(matSize);
float *h_transpose = (float *) malloc(matSize);
/* allocate memory for matrix in device */
float *d_matrix, *d_transpose;
cudaMalloc(&d_matrix, matSize);
cudaMalloc(&d_transpose, matSize);
/* randomly generate a matrix in host */
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
h_matrix[i*N+j] = (float)rand() / (float)(RAND_MAX) * 100.0;
}
}
/* utility for recording start and finish time */
cudaEvent_t tStart, tEnd;
float duration;
cudaEventCreate(&tStart);
cudaEventCreate(&tEnd);
const int nIterations = 100;
/* 1. naive CPU transpose */
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
naiveCPU(h_matrix, h_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd); // waits for record to terminate
cudaEventElapsedTime(&duration, tStart, tEnd);
printf("\nNaive CPU: %f\n", duration / nIterations);
/* 2. naive GPU transpose */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTranspose<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nNaive GPU: %f\n", duration / nIterations);
/* 3. shared memory GPU transpose */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeShared<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nShared GPU: %f\n", duration / nIterations);
/* 4. shared memory GPU transpose without bank conflict */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeSharedwBC<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nSharedwBC GPU: %f\n", duration / nIterations);
duration = 0;
/* 5. unrolled GPU transpose */
dim3 blockDimUnroll(P, 8, 1);
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeUnrolled<<<gridDim,blockDimUnroll>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nUnrolled GPU: %f\n", duration / nIterations);
return 0;
} | /* matrix transpose program */
#include <hip/hip_runtime.h>
#include <stdio.h>
const int P = 32;
/* naive CPU */
void naiveCPU(float *src, float *dst, int M, int N)
{
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
dst[i*N+j] = src[j*N+i];
}
}
}
/* naive GPU */
__global__ void matrixTranspose(float *_a, float *_b, int cols,int rows)
{
int i = blockIdx.y * blockDim.y + threadIdx.y; // row
int j = blockIdx.x * blockDim.x + threadIdx.x; // col
int index_in = i * cols + j;
int index_out = j * rows + i;
_b[index_out] = _a[index_in];
}
/* shared memory GPU */
__global__ void matrixTransposeShared(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
/* shared memory without bank conflict */
__global__ void matrixTransposeUnrolled(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int x = blockIdx.x * P + threadIdx.x;
int y = blockIdx.y * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < rows && y+k < cols)
mat[threadIdx.y+k][threadIdx.x] = _a[(y+k)*rows + x];
}
__syncthreads();
x = blockIdx.y * P + threadIdx.x;
y = blockIdx.x * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < cols && y+k < rows)
_b[(y+k)*cols + x] = mat[threadIdx.x][threadIdx.y+k];
}
}
/* loop unrolled */
__global__ void matrixTransposeSharedwBC(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
int main(int argc, char **argv)
{
/* N*M matrix, parallelism is P */
const int N = 1024;
const int M = 1024;
const int matSize = N * M * sizeof(float);
dim3 gridDim(N/P, M/P, 1);
dim3 blockDim(P , P, 1);
/* configuration of GPU */
printf("===================\n");
printf("Matrix: %d * %d\n", N, M);
printf("Grid: %d * %d * %d\n", gridDim.x, gridDim.y, gridDim.z);
printf("Block: %d * %d * %d\n", blockDim.x, blockDim.y, blockDim.z);
printf("===================\n");
/* allocate memory for matrix in host */
float *h_matrix = (float *) malloc(matSize);
float *h_transpose = (float *) malloc(matSize);
/* allocate memory for matrix in device */
float *d_matrix, *d_transpose;
hipMalloc(&d_matrix, matSize);
hipMalloc(&d_transpose, matSize);
/* randomly generate a matrix in host */
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
h_matrix[i*N+j] = (float)rand() / (float)(RAND_MAX) * 100.0;
}
}
/* utility for recording start and finish time */
hipEvent_t tStart, tEnd;
float duration;
hipEventCreate(&tStart);
hipEventCreate(&tEnd);
const int nIterations = 100;
/* 1. naive CPU transpose */
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
naiveCPU(h_matrix, h_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd); // waits for record to terminate
hipEventElapsedTime(&duration, tStart, tEnd);
printf("\nNaive CPU: %f\n", duration / nIterations);
/* 2. naive GPU transpose */
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTranspose<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nNaive GPU: %f\n", duration / nIterations);
/* 3. shared memory GPU transpose */
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeShared<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nShared GPU: %f\n", duration / nIterations);
/* 4. shared memory GPU transpose without bank conflict */
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeSharedwBC<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nSharedwBC GPU: %f\n", duration / nIterations);
duration = 0;
/* 5. unrolled GPU transpose */
dim3 blockDimUnroll(P, 8, 1);
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeUnrolled<<<gridDim,blockDimUnroll>>>(d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nUnrolled GPU: %f\n", duration / nIterations);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* matrix transpose program */
#include <hip/hip_runtime.h>
#include <stdio.h>
const int P = 32;
/* naive CPU */
void naiveCPU(float *src, float *dst, int M, int N)
{
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
dst[i*N+j] = src[j*N+i];
}
}
}
/* naive GPU */
__global__ void matrixTranspose(float *_a, float *_b, int cols,int rows)
{
int i = blockIdx.y * blockDim.y + threadIdx.y; // row
int j = blockIdx.x * blockDim.x + threadIdx.x; // col
int index_in = i * cols + j;
int index_out = j * rows + i;
_b[index_out] = _a[index_in];
}
/* shared memory GPU */
__global__ void matrixTransposeShared(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
/* shared memory without bank conflict */
__global__ void matrixTransposeUnrolled(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int x = blockIdx.x * P + threadIdx.x;
int y = blockIdx.y * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < rows && y+k < cols)
mat[threadIdx.y+k][threadIdx.x] = _a[(y+k)*rows + x];
}
__syncthreads();
x = blockIdx.y * P + threadIdx.x;
y = blockIdx.x * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < cols && y+k < rows)
_b[(y+k)*cols + x] = mat[threadIdx.x][threadIdx.y+k];
}
}
/* loop unrolled */
__global__ void matrixTransposeSharedwBC(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
int main(int argc, char **argv)
{
/* N*M matrix, parallelism is P */
const int N = 1024;
const int M = 1024;
const int matSize = N * M * sizeof(float);
dim3 gridDim(N/P, M/P, 1);
dim3 blockDim(P , P, 1);
/* configuration of GPU */
printf("===================\n");
printf("Matrix: %d * %d\n", N, M);
printf("Grid: %d * %d * %d\n", gridDim.x, gridDim.y, gridDim.z);
printf("Block: %d * %d * %d\n", blockDim.x, blockDim.y, blockDim.z);
printf("===================\n");
/* allocate memory for matrix in host */
float *h_matrix = (float *) malloc(matSize);
float *h_transpose = (float *) malloc(matSize);
/* allocate memory for matrix in device */
float *d_matrix, *d_transpose;
hipMalloc(&d_matrix, matSize);
hipMalloc(&d_transpose, matSize);
/* randomly generate a matrix in host */
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
h_matrix[i*N+j] = (float)rand() / (float)(RAND_MAX) * 100.0;
}
}
/* utility for recording start and finish time */
hipEvent_t tStart, tEnd;
float duration;
hipEventCreate(&tStart);
hipEventCreate(&tEnd);
const int nIterations = 100;
/* 1. naive CPU transpose */
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
naiveCPU(h_matrix, h_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd); // waits for record to terminate
hipEventElapsedTime(&duration, tStart, tEnd);
printf("\nNaive CPU: %f\n", duration / nIterations);
/* 2. naive GPU transpose */
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTranspose<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nNaive GPU: %f\n", duration / nIterations);
/* 3. shared memory GPU transpose */
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeShared<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nShared GPU: %f\n", duration / nIterations);
/* 4. shared memory GPU transpose without bank conflict */
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeSharedwBC<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nSharedwBC GPU: %f\n", duration / nIterations);
duration = 0;
/* 5. unrolled GPU transpose */
dim3 blockDimUnroll(P, 8, 1);
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeUnrolled<<<gridDim,blockDimUnroll>>>(d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nUnrolled GPU: %f\n", duration / nIterations);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15matrixTransposePfS_ii
.globl _Z15matrixTransposePfS_ii
.p2align 8
.type _Z15matrixTransposePfS_ii,@function
_Z15matrixTransposePfS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x10
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s2, 16
s_and_b32 s2, s2, 0xffff
v_mad_u64_u32 v[2:3], null, s15, s3, v[1:2]
v_mad_u64_u32 v[3:4], null, s14, s2, v[0:1]
s_load_b128 s[0:3], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, v2, s4, v[3:4]
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v4, v[0:1], off
v_mad_u64_u32 v[0:1], null, v3, s5, v[2:3]
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v4, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15matrixTransposePfS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15matrixTransposePfS_ii, .Lfunc_end0-_Z15matrixTransposePfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z21matrixTransposeSharedPfS_ii
.globl _Z21matrixTransposeSharedPfS_ii
.p2align 8
.type _Z21matrixTransposeSharedPfS_ii,@function
_Z21matrixTransposeSharedPfS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x10
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v2, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s2, 16
s_and_b32 s2, s2, 0xffff
s_mul_i32 s15, s15, s3
s_mul_i32 s14, s14, s2
v_add_nc_u32_e32 v3, s15, v1
v_add_nc_u32_e32 v0, s14, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s5, v3
v_cmp_gt_i32_e64 s2, s4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s2, vcc_lo
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB1_2
s_load_b64 s[6:7], s[0:1], 0x0
v_mad_u64_u32 v[4:5], null, v3, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[3:4], 2, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
global_load_b32 v0, v[3:4], off
v_lshlrev_b32_e32 v3, 2, v1
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v3, v2, 7, v3
s_waitcnt vmcnt(0)
ds_store_b32 v3, v0
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s2
v_add_nc_u32_e32 v0, s15, v2
v_add_nc_u32_e32 v3, s14, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmp_gt_i32_e32 vcc_lo, s4, v0
v_cmp_gt_i32_e64 s2, s5, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_4
v_lshlrev_b32_e32 v2, 2, v2
s_load_b64 s[0:1], s[0:1], 0x8
v_mad_u64_u32 v[4:5], null, v3, s5, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshl_add_u32 v0, v1, 7, v2
v_ashrrev_i32_e32 v5, 31, v4
ds_load_b32 v2, v0
v_lshlrev_b64 v[0:1], 2, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB1_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21matrixTransposeSharedPfS_ii
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z21matrixTransposeSharedPfS_ii, .Lfunc_end1-_Z21matrixTransposeSharedPfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z23matrixTransposeUnrolledPfS_ii
.globl _Z23matrixTransposeUnrolledPfS_ii
.p2align 8
.type _Z23matrixTransposeUnrolledPfS_ii,@function
_Z23matrixTransposeUnrolledPfS_ii:
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x10
s_load_b64 s[6:7], s[0:1], 0x0
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v1, v0, 10, 10
s_lshl_b32 s8, s14, 5
s_lshl_b32 s9, s15, 5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v0, s8, v2
v_add_nc_u32_e32 v3, s9, v1
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s5, v0
v_cmp_gt_i32_e64 s2, s4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB2_2
v_mad_u64_u32 v[4:5], null, v3, s5, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v4, s2, s6, v4
v_add_co_ci_u32_e64 v5, s2, s7, v5, s2
global_load_b32 v4, v[4:5], off
v_lshlrev_b32_e32 v5, 2, v2
s_delay_alu instid0(VALU_DEP_1)
v_mad_u32_u24 v5, v1, 0x84, v5
s_waitcnt vmcnt(0)
ds_store_b32 v5, v4
.LBB2_2:
s_or_b32 exec_lo, exec_lo, s3
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB2_11
v_add_nc_u32_e32 v4, 8, v3
s_mov_b32 s10, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s4, v4
s_cbranch_execz .LBB2_5
v_mad_u64_u32 v[5:6], null, v4, s5, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[4:5], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v4, s2, s6, v4
v_add_co_ci_u32_e64 v5, s2, s7, v5, s2
global_load_b32 v4, v[4:5], off
v_lshlrev_b32_e32 v5, 2, v2
s_delay_alu instid0(VALU_DEP_1)
v_mad_u32_u24 v5, v1, 0x84, v5
s_waitcnt vmcnt(0)
ds_store_b32 v5, v4 offset:1056
.LBB2_5:
s_or_b32 exec_lo, exec_lo, s10
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB2_11
v_add_nc_u32_e32 v4, 16, v3
s_mov_b32 s10, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s4, v4
s_cbranch_execz .LBB2_8
v_mad_u64_u32 v[5:6], null, v4, s5, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[4:5], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v4, s2, s6, v4
v_add_co_ci_u32_e64 v5, s2, s7, v5, s2
global_load_b32 v4, v[4:5], off
v_lshlrev_b32_e32 v5, 2, v2
s_delay_alu instid0(VALU_DEP_1)
v_mad_u32_u24 v5, v1, 0x84, v5
s_waitcnt vmcnt(0)
ds_store_b32 v5, v4 offset:2112
.LBB2_8:
s_or_b32 exec_lo, exec_lo, s10
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB2_11
v_add_nc_u32_e32 v3, 24, v3
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s4, v3
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB2_11
v_mad_u64_u32 v[4:5], null, v3, s5, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[3:4], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
global_load_b32 v0, v[3:4], off
v_lshlrev_b32_e32 v3, 2, v2
s_delay_alu instid0(VALU_DEP_1)
v_mad_u32_u24 v3, v1, 0x84, v3
s_waitcnt vmcnt(0)
ds_store_b32 v3, v0 offset:3168
.LBB2_11:
s_or_b32 exec_lo, exec_lo, s3
s_load_b64 s[2:3], s[0:1], 0x8
v_add_nc_u32_e32 v0, s9, v2
v_add_nc_u32_e32 v3, s8, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmp_gt_i32_e32 vcc_lo, s4, v0
v_cmp_gt_i32_e64 s0, s5, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s0, vcc_lo, s0
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB2_13
v_lshlrev_b32_e32 v6, 2, v1
v_mad_u64_u32 v[4:5], null, v3, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u32_u24 v5, v2, 0x84, v6
ds_load_b32 v6, v5
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v4, s0, s2, v4
v_add_co_ci_u32_e64 v5, s0, s3, v5, s0
s_waitcnt lgkmcnt(0)
global_store_b32 v[4:5], v6, off
.LBB2_13:
s_or_b32 exec_lo, exec_lo, s1
s_and_saveexec_b32 s0, vcc_lo
s_cbranch_execz .LBB2_22
v_add_nc_u32_e32 v4, 8, v3
s_mov_b32 s1, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s5, v4
s_cbranch_execz .LBB2_16
v_mul_u32_u24_e32 v7, 0x84, v2
v_mad_u64_u32 v[5:6], null, v4, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshl_add_u32 v4, v1, 2, v7
v_ashrrev_i32_e32 v6, 31, v5
ds_load_b32 v7, v4 offset:32
v_lshlrev_b64 v[4:5], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v4, s0, s2, v4
v_add_co_ci_u32_e64 v5, s0, s3, v5, s0
s_waitcnt lgkmcnt(0)
global_store_b32 v[4:5], v7, off
.LBB2_16:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB2_22
v_add_nc_u32_e32 v4, 16, v3
s_mov_b32 s1, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s5, v4
s_cbranch_execz .LBB2_19
v_mul_u32_u24_e32 v7, 0x84, v2
v_mad_u64_u32 v[5:6], null, v4, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshl_add_u32 v4, v1, 2, v7
v_ashrrev_i32_e32 v6, 31, v5
ds_load_b32 v7, v4 offset:64
v_lshlrev_b64 v[4:5], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v4, s0, s2, v4
v_add_co_ci_u32_e64 v5, s0, s3, v5, s0
s_waitcnt lgkmcnt(0)
global_store_b32 v[4:5], v7, off
.LBB2_19:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB2_22
v_add_nc_u32_e32 v3, 24, v3
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s5, v3
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB2_22
v_mul_u32_u24_e32 v2, 0x84, v2
v_mad_u64_u32 v[4:5], null, v3, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshl_add_u32 v0, v1, 2, v2
v_ashrrev_i32_e32 v5, 31, v4
ds_load_b32 v2, v0 offset:96
v_lshlrev_b64 v[0:1], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[0:1], v2, off
.LBB2_22:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23matrixTransposeUnrolledPfS_ii
.amdhsa_group_segment_fixed_size 4224
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z23matrixTransposeUnrolledPfS_ii, .Lfunc_end2-_Z23matrixTransposeUnrolledPfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z24matrixTransposeSharedwBCPfS_ii
.globl _Z24matrixTransposeSharedwBCPfS_ii
.p2align 8
.type _Z24matrixTransposeSharedwBCPfS_ii,@function
_Z24matrixTransposeSharedwBCPfS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x10
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v2, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s2, 16
s_and_b32 s2, s2, 0xffff
s_mul_i32 s15, s15, s3
s_mul_i32 s14, s14, s2
v_add_nc_u32_e32 v3, s15, v1
v_add_nc_u32_e32 v0, s14, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s5, v3
v_cmp_gt_i32_e64 s2, s4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s2, vcc_lo
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB3_2
s_load_b64 s[6:7], s[0:1], 0x0
v_mad_u64_u32 v[4:5], null, v3, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[3:4], 2, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
global_load_b32 v0, v[3:4], off
v_lshlrev_b32_e32 v3, 2, v1
s_delay_alu instid0(VALU_DEP_1)
v_mad_u32_u24 v3, v2, 0x84, v3
s_waitcnt vmcnt(0)
ds_store_b32 v3, v0
.LBB3_2:
s_or_b32 exec_lo, exec_lo, s2
v_add_nc_u32_e32 v0, s15, v2
v_add_nc_u32_e32 v3, s14, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmp_gt_i32_e32 vcc_lo, s4, v0
v_cmp_gt_i32_e64 s2, s5, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB3_4
v_lshlrev_b32_e32 v2, 2, v2
s_load_b64 s[0:1], s[0:1], 0x8
v_mad_u64_u32 v[4:5], null, v3, s5, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mad_u32_u24 v0, v1, 0x84, v2
v_ashrrev_i32_e32 v5, 31, v4
ds_load_b32 v2, v0
v_lshlrev_b64 v[0:1], 2, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB3_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z24matrixTransposeSharedwBCPfS_ii
.amdhsa_group_segment_fixed_size 4224
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z24matrixTransposeSharedwBCPfS_ii, .Lfunc_end3-_Z24matrixTransposeSharedwBCPfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15matrixTransposePfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15matrixTransposePfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21matrixTransposeSharedPfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z21matrixTransposeSharedPfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 4224
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23matrixTransposeUnrolledPfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23matrixTransposeUnrolledPfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4224
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z24matrixTransposeSharedwBCPfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z24matrixTransposeSharedwBCPfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* matrix transpose program */
#include <hip/hip_runtime.h>
#include <stdio.h>
const int P = 32;
/* naive CPU */
void naiveCPU(float *src, float *dst, int M, int N)
{
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
dst[i*N+j] = src[j*N+i];
}
}
}
/* naive GPU */
__global__ void matrixTranspose(float *_a, float *_b, int cols,int rows)
{
int i = blockIdx.y * blockDim.y + threadIdx.y; // row
int j = blockIdx.x * blockDim.x + threadIdx.x; // col
int index_in = i * cols + j;
int index_out = j * rows + i;
_b[index_out] = _a[index_in];
}
/* shared memory GPU */
__global__ void matrixTransposeShared(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
/* shared memory without bank conflict */
__global__ void matrixTransposeUnrolled(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int x = blockIdx.x * P + threadIdx.x;
int y = blockIdx.y * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < rows && y+k < cols)
mat[threadIdx.y+k][threadIdx.x] = _a[(y+k)*rows + x];
}
__syncthreads();
x = blockIdx.y * P + threadIdx.x;
y = blockIdx.x * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < cols && y+k < rows)
_b[(y+k)*cols + x] = mat[threadIdx.x][threadIdx.y+k];
}
}
/* loop unrolled */
__global__ void matrixTransposeSharedwBC(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
int main(int argc, char **argv)
{
/* N*M matrix, parallelism is P */
const int N = 1024;
const int M = 1024;
const int matSize = N * M * sizeof(float);
dim3 gridDim(N/P, M/P, 1);
dim3 blockDim(P , P, 1);
/* configuration of GPU */
printf("===================\n");
printf("Matrix: %d * %d\n", N, M);
printf("Grid: %d * %d * %d\n", gridDim.x, gridDim.y, gridDim.z);
printf("Block: %d * %d * %d\n", blockDim.x, blockDim.y, blockDim.z);
printf("===================\n");
/* allocate memory for matrix in host */
float *h_matrix = (float *) malloc(matSize);
float *h_transpose = (float *) malloc(matSize);
/* allocate memory for matrix in device */
float *d_matrix, *d_transpose;
hipMalloc(&d_matrix, matSize);
hipMalloc(&d_transpose, matSize);
/* randomly generate a matrix in host */
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
h_matrix[i*N+j] = (float)rand() / (float)(RAND_MAX) * 100.0;
}
}
/* utility for recording start and finish time */
hipEvent_t tStart, tEnd;
float duration;
hipEventCreate(&tStart);
hipEventCreate(&tEnd);
const int nIterations = 100;
/* 1. naive CPU transpose */
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
naiveCPU(h_matrix, h_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd); // waits for record to terminate
hipEventElapsedTime(&duration, tStart, tEnd);
printf("\nNaive CPU: %f\n", duration / nIterations);
/* 2. naive GPU transpose */
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTranspose<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nNaive GPU: %f\n", duration / nIterations);
/* 3. shared memory GPU transpose */
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeShared<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nShared GPU: %f\n", duration / nIterations);
/* 4. shared memory GPU transpose without bank conflict */
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeSharedwBC<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nSharedwBC GPU: %f\n", duration / nIterations);
duration = 0;
/* 5. unrolled GPU transpose */
dim3 blockDimUnroll(P, 8, 1);
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeUnrolled<<<gridDim,blockDimUnroll>>>(d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nUnrolled GPU: %f\n", duration / nIterations);
return 0;
} | .text
.file "cudabench.hip"
.globl _Z8naiveCPUPfS_ii # -- Begin function _Z8naiveCPUPfS_ii
.p2align 4, 0x90
.type _Z8naiveCPUPfS_ii,@function
_Z8naiveCPUPfS_ii: # @_Z8naiveCPUPfS_ii
.cfi_startproc
# %bb.0:
testl %ecx, %ecx
jle .LBB0_7
# %bb.1: # %.preheader.lr.ph
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %ecx, %eax
movl %edx, %r8d
leaq (,%rax,4), %r9
xorl %r10d, %r10d
xorl %r11d, %r11d
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_5: # %._crit_edge
# in Loop: Header=BB0_2 Depth=1
incq %r11
addl %ecx, %r10d
addq $4, %rdi
cmpq %rax, %r11
je .LBB0_6
.LBB0_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
testl %edx, %edx
jle .LBB0_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB0_2 Depth=1
movl %r10d, %ebx
leaq (%rsi,%rbx,4), %rbx
movq %rdi, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_4: # Parent Loop BB0_2 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%rbx,%r15,4)
incq %r15
addq %r9, %r14
cmpq %r15, %r8
jne .LBB0_4
jmp .LBB0_5
.LBB0_6:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB0_7: # %._crit_edge17
retq
.Lfunc_end0:
.size _Z8naiveCPUPfS_ii, .Lfunc_end0-_Z8naiveCPUPfS_ii
.cfi_endproc
# -- End function
.globl _Z30__device_stub__matrixTransposePfS_ii # -- Begin function _Z30__device_stub__matrixTransposePfS_ii
.p2align 4, 0x90
.type _Z30__device_stub__matrixTransposePfS_ii,@function
_Z30__device_stub__matrixTransposePfS_ii: # @_Z30__device_stub__matrixTransposePfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15matrixTransposePfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z30__device_stub__matrixTransposePfS_ii, .Lfunc_end1-_Z30__device_stub__matrixTransposePfS_ii
.cfi_endproc
# -- End function
.globl _Z36__device_stub__matrixTransposeSharedPfS_ii # -- Begin function _Z36__device_stub__matrixTransposeSharedPfS_ii
.p2align 4, 0x90
.type _Z36__device_stub__matrixTransposeSharedPfS_ii,@function
_Z36__device_stub__matrixTransposeSharedPfS_ii: # @_Z36__device_stub__matrixTransposeSharedPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z21matrixTransposeSharedPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z36__device_stub__matrixTransposeSharedPfS_ii, .Lfunc_end2-_Z36__device_stub__matrixTransposeSharedPfS_ii
.cfi_endproc
# -- End function
.globl _Z38__device_stub__matrixTransposeUnrolledPfS_ii # -- Begin function _Z38__device_stub__matrixTransposeUnrolledPfS_ii
.p2align 4, 0x90
.type _Z38__device_stub__matrixTransposeUnrolledPfS_ii,@function
_Z38__device_stub__matrixTransposeUnrolledPfS_ii: # @_Z38__device_stub__matrixTransposeUnrolledPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23matrixTransposeUnrolledPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z38__device_stub__matrixTransposeUnrolledPfS_ii, .Lfunc_end3-_Z38__device_stub__matrixTransposeUnrolledPfS_ii
.cfi_endproc
# -- End function
.globl _Z39__device_stub__matrixTransposeSharedwBCPfS_ii # -- Begin function _Z39__device_stub__matrixTransposeSharedwBCPfS_ii
.p2align 4, 0x90
.type _Z39__device_stub__matrixTransposeSharedwBCPfS_ii,@function
_Z39__device_stub__matrixTransposeSharedwBCPfS_ii: # @_Z39__device_stub__matrixTransposeSharedwBCPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z24matrixTransposeSharedwBCPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end4:
.size _Z39__device_stub__matrixTransposeSharedwBCPfS_ii, .Lfunc_end4-_Z39__device_stub__matrixTransposeSharedwBCPfS_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI5_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI5_1:
.long 0x42c80000 # float 100
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $.Lstr.1, %edi
callq puts@PLT
xorl %ebx, %ebx
movl $.L.str.1, %edi
movl $1024, %esi # imm = 0x400
movl $1024, %edx # imm = 0x400
xorl %eax, %eax
callq printf
movl $.L.str.2, %edi
movl $32, %esi
movl $32, %edx
movl $1, %ecx
xorl %eax, %eax
callq printf
movl $.L.str.3, %edi
movl $32, %esi
movl $32, %edx
movl $1, %ecx
xorl %eax, %eax
callq printf
movl $.Lstr.1, %edi
callq puts@PLT
movl $4194304, %edi # imm = 0x400000
callq malloc
movq %rax, %r13
movl $4194304, %edi # imm = 0x400000
callq malloc
movq %rax, %r12
leaq 40(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
movq %r13, %r14
.p2align 4, 0x90
.LBB5_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB5_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB5_2: # Parent Loop BB5_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI5_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss .LCPI5_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss %xmm1, %xmm0
movss %xmm0, (%r14,%r15,4)
incq %r15
cmpq $1024, %r15 # imm = 0x400
jne .LBB5_2
# %bb.3: # in Loop: Header=BB5_1 Depth=1
incq %rbx
addq $4096, %r14 # imm = 0x1000
cmpq $1024, %rbx # imm = 0x400
jne .LBB5_1
# %bb.4:
leaq 32(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 32(%rsp), %rdi
xorl %ebx, %ebx
xorl %esi, %esi
callq hipEventRecord
.p2align 4, 0x90
.LBB5_5: # %.preheader.i.preheader
# =>This Loop Header: Depth=1
# Child Loop BB5_6 Depth 2
# Child Loop BB5_7 Depth 3
movq %r13, %rax
movq %r12, %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_6: # %.preheader.i
# Parent Loop BB5_5 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB5_7 Depth 3
movq %rax, %rsi
xorl %edi, %edi
.p2align 4, 0x90
.LBB5_7: # Parent Loop BB5_5 Depth=1
# Parent Loop BB5_6 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rsi), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%rcx,%rdi,4)
incq %rdi
addq $4096, %rsi # imm = 0x1000
cmpq $1024, %rdi # imm = 0x400
jne .LBB5_7
# %bb.8: # %._crit_edge.i
# in Loop: Header=BB5_6 Depth=2
incq %rdx
addq $4096, %rcx # imm = 0x1000
addq $4, %rax
cmpq $1024, %rdx # imm = 0x400
jne .LBB5_6
# %bb.9: # %_Z8naiveCPUPfS_ii.exit
# in Loop: Header=BB5_5 Depth=1
incl %ebx
cmpl $100, %ebx
jne .LBB5_5
# %bb.10:
movq %r12, 152(%rsp) # 8-byte Spill
movabsq $137438953504, %r15 # imm = 0x2000000020
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
movq 40(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq %r13, 160(%rsp) # 8-byte Spill
movq %r13, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
xorl %esi, %esi
callq hipMemset
movq 32(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl $100, %r12d
leaq 64(%rsp), %r13
leaq 56(%rsp), %rbp
leaq 48(%rsp), %rbx
leaq 112(%rsp), %r14
jmp .LBB5_11
.p2align 4, 0x90
.LBB5_13: # in Loop: Header=BB5_11 Depth=1
decl %r12d
je .LBB5_14
.LBB5_11: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_13
# %bb.12: # in Loop: Header=BB5_11 Depth=1
movq 40(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl $1024, 20(%rsp) # imm = 0x400
movl $1024, 16(%rsp) # imm = 0x400
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 80(%rsp), %rdi
movq %r13, %rsi
movq %rbp, %rdx
movq %rbx, %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
movl $_Z15matrixTransposePfS_ii, %edi
movq %r14, %r9
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB5_13
.LBB5_14:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq 24(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq 152(%rsp), %rdi # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.5, %edi
movb $1, %al
callq printf
movq 40(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq 160(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
xorl %esi, %esi
callq hipMemset
movq 32(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl $100, %r12d
leaq 64(%rsp), %r13
leaq 56(%rsp), %rbp
leaq 48(%rsp), %rbx
leaq 112(%rsp), %r14
jmp .LBB5_15
.p2align 4, 0x90
.LBB5_17: # in Loop: Header=BB5_15 Depth=1
decl %r12d
je .LBB5_18
.LBB5_15: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_17
# %bb.16: # in Loop: Header=BB5_15 Depth=1
movq 40(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl $1024, 20(%rsp) # imm = 0x400
movl $1024, 16(%rsp) # imm = 0x400
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 80(%rsp), %rdi
movq %r13, %rsi
movq %rbp, %rdx
movq %rbx, %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
movl $_Z21matrixTransposeSharedPfS_ii, %edi
movq %r14, %r9
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB5_17
.LBB5_18:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq 24(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq 152(%rsp), %rdi # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
movq 40(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq 160(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
xorl %esi, %esi
callq hipMemset
movq 32(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl $100, %r12d
leaq 64(%rsp), %r13
leaq 56(%rsp), %rbp
leaq 48(%rsp), %rbx
leaq 112(%rsp), %r14
jmp .LBB5_19
.p2align 4, 0x90
.LBB5_21: # in Loop: Header=BB5_19 Depth=1
decl %r12d
je .LBB5_22
.LBB5_19: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_21
# %bb.20: # in Loop: Header=BB5_19 Depth=1
movq 40(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl $1024, 20(%rsp) # imm = 0x400
movl $1024, 16(%rsp) # imm = 0x400
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 80(%rsp), %rdi
movq %r13, %rsi
movq %rbp, %rdx
movq %rbx, %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
movl $_Z24matrixTransposeSharedwBCPfS_ii, %edi
movq %r14, %r9
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB5_21
.LBB5_22:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq 24(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq 152(%rsp), %rdi # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.7, %edi
movb $1, %al
callq printf
movl $0, 4(%rsp)
movq 40(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq 160(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
xorl %esi, %esi
callq hipMemset
movq 32(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl $100, %r13d
movabsq $34359738400, %r14 # imm = 0x800000020
leaq 56(%rsp), %rbp
leaq 48(%rsp), %rbx
leaq 112(%rsp), %r12
jmp .LBB5_23
.p2align 4, 0x90
.LBB5_25: # in Loop: Header=BB5_23 Depth=1
decl %r13d
je .LBB5_26
.LBB5_23: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_25
# %bb.24: # in Loop: Header=BB5_23 Depth=1
movq 40(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl $1024, 20(%rsp) # imm = 0x400
movl $1024, 16(%rsp) # imm = 0x400
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
movq %rbp, %rdx
movq %rbx, %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
movl $_Z23matrixTransposeUnrolledPfS_ii, %edi
movq %r12, %r9
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB5_25
.LBB5_26:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq 24(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq 152(%rsp), %rdi # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15matrixTransposePfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21matrixTransposeSharedPfS_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23matrixTransposeUnrolledPfS_ii, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z24matrixTransposeSharedwBCPfS_ii, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15matrixTransposePfS_ii,@object # @_Z15matrixTransposePfS_ii
.section .rodata,"a",@progbits
.globl _Z15matrixTransposePfS_ii
.p2align 3, 0x0
_Z15matrixTransposePfS_ii:
.quad _Z30__device_stub__matrixTransposePfS_ii
.size _Z15matrixTransposePfS_ii, 8
.type _Z21matrixTransposeSharedPfS_ii,@object # @_Z21matrixTransposeSharedPfS_ii
.globl _Z21matrixTransposeSharedPfS_ii
.p2align 3, 0x0
_Z21matrixTransposeSharedPfS_ii:
.quad _Z36__device_stub__matrixTransposeSharedPfS_ii
.size _Z21matrixTransposeSharedPfS_ii, 8
.type _Z23matrixTransposeUnrolledPfS_ii,@object # @_Z23matrixTransposeUnrolledPfS_ii
.globl _Z23matrixTransposeUnrolledPfS_ii
.p2align 3, 0x0
_Z23matrixTransposeUnrolledPfS_ii:
.quad _Z38__device_stub__matrixTransposeUnrolledPfS_ii
.size _Z23matrixTransposeUnrolledPfS_ii, 8
.type _Z24matrixTransposeSharedwBCPfS_ii,@object # @_Z24matrixTransposeSharedwBCPfS_ii
.globl _Z24matrixTransposeSharedwBCPfS_ii
.p2align 3, 0x0
_Z24matrixTransposeSharedwBCPfS_ii:
.quad _Z39__device_stub__matrixTransposeSharedwBCPfS_ii
.size _Z24matrixTransposeSharedwBCPfS_ii, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Matrix: %d * %d\n"
.size .L.str.1, 17
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Grid: %d * %d * %d\n"
.size .L.str.2, 22
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Block: %d * %d * %d\n"
.size .L.str.3, 22
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\nNaive CPU: %f\n"
.size .L.str.4, 16
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "\nNaive GPU: %f\n"
.size .L.str.5, 16
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\nShared GPU: %f\n"
.size .L.str.6, 17
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "\nSharedwBC GPU: %f\n"
.size .L.str.7, 20
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "\nUnrolled GPU: %f\n"
.size .L.str.8, 19
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z15matrixTransposePfS_ii"
.size .L__unnamed_1, 26
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z21matrixTransposeSharedPfS_ii"
.size .L__unnamed_2, 32
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z23matrixTransposeUnrolledPfS_ii"
.size .L__unnamed_3, 34
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z24matrixTransposeSharedwBCPfS_ii"
.size .L__unnamed_4, 35
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.1,@object # @str.1
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.1:
.asciz "==================="
.size .Lstr.1, 20
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__matrixTransposePfS_ii
.addrsig_sym _Z36__device_stub__matrixTransposeSharedPfS_ii
.addrsig_sym _Z38__device_stub__matrixTransposeUnrolledPfS_ii
.addrsig_sym _Z39__device_stub__matrixTransposeSharedwBCPfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15matrixTransposePfS_ii
.addrsig_sym _Z21matrixTransposeSharedPfS_ii
.addrsig_sym _Z23matrixTransposeUnrolledPfS_ii
.addrsig_sym _Z24matrixTransposeSharedwBCPfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000254f5_00000000-6_cudabench.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8naiveCPUPfS_ii
.type _Z8naiveCPUPfS_ii, @function
_Z8naiveCPUPfS_ii:
.LFB2057:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L11
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
movq %rsi, %r11
movl %edx, %ebx
movslq %ecx, %r8
salq $2, %r8
movl $0, %r10d
movl $0, %r9d
movslq %edx, %rbp
jmp .L5
.L7:
movslq %r10d, %rdx
leaq (%r11,%rdx,4), %rax
addq %rbp, %rdx
leaq (%r11,%rdx,4), %rsi
movq %rdi, %rdx
.L6:
movss (%rdx), %xmm0
movss %xmm0, (%rax)
addq %r8, %rdx
addq $4, %rax
cmpq %rsi, %rax
jne .L6
.L8:
addl $1, %r9d
addq $4, %rdi
addl %ecx, %r10d
cmpl %r9d, %ecx
je .L3
.L5:
testl %ebx, %ebx
jg .L7
jmp .L8
.L3:
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2057:
.size _Z8naiveCPUPfS_ii, .-_Z8naiveCPUPfS_ii
.globl _Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii
.type _Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii, @function
_Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L18
.L14:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z15matrixTransposePfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L14
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii, .-_Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii
.globl _Z15matrixTransposePfS_ii
.type _Z15matrixTransposePfS_ii, @function
_Z15matrixTransposePfS_ii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z15matrixTransposePfS_ii, .-_Z15matrixTransposePfS_ii
.globl _Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii
.type _Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii, @function
_Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L26
.L22:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L27
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z21matrixTransposeSharedPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L22
.L27:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii, .-_Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii
.globl _Z21matrixTransposeSharedPfS_ii
.type _Z21matrixTransposeSharedPfS_ii, @function
_Z21matrixTransposeSharedPfS_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z21matrixTransposeSharedPfS_ii, .-_Z21matrixTransposeSharedPfS_ii
.globl _Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii
.type _Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii, @function
_Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L34
.L30:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L35
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L34:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23matrixTransposeUnrolledPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L30
.L35:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii, .-_Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii
.globl _Z23matrixTransposeUnrolledPfS_ii
.type _Z23matrixTransposeUnrolledPfS_ii, @function
_Z23matrixTransposeUnrolledPfS_ii:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z23matrixTransposeUnrolledPfS_ii, .-_Z23matrixTransposeUnrolledPfS_ii
.globl _Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii
.type _Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii, @function
_Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii:
.LFB2089:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L42
.L38:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L43
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L42:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z24matrixTransposeSharedwBCPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L38
.L43:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii, .-_Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii
.globl _Z24matrixTransposeSharedwBCPfS_ii
.type _Z24matrixTransposeSharedwBCPfS_ii, @function
_Z24matrixTransposeSharedwBCPfS_ii:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _Z24matrixTransposeSharedwBCPfS_ii, .-_Z24matrixTransposeSharedwBCPfS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "===================\n"
.LC1:
.string "Matrix: %d * %d\n"
.LC2:
.string "Grid: %d * %d * %d\n"
.LC3:
.string "Block: %d * %d * %d\n"
.LC6:
.string "\nNaive CPU: %f\n"
.LC7:
.string "\nNaive GPU: %f\n"
.LC8:
.string "\nShared GPU: %f\n"
.LC9:
.string "\nSharedwBC GPU: %f\n"
.LC11:
.string "\nUnrolled GPU: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $96, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rbx
movq %rbx, %rsi
movl $2, %edi
call __printf_chk@PLT
movl $1024, %ecx
movl $1024, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %r8d
movl $32, %ecx
movl $32, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %r8d
movl $32, %ecx
movl $32, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $4194304, %edi
call malloc@PLT
movq %rax, %r12
movl $4194304, %edi
call malloc@PLT
movq %rax, %r13
leaq 16(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 4096(%r12), %rbp
leaq 4198400(%r12), %r14
.L47:
leaq -4096(%rbp), %rbx
.L48:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC4(%rip), %xmm0
mulss .LC5(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L48
addq $4096, %rbp
cmpq %r14, %rbp
jne .L47
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $100, %ebx
.L50:
movl $1024, %ecx
movl $1024, %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z8naiveCPUPfS_ii
subl $1, %ebx
jne .L50
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movss 12(%rsp), %xmm0
divss .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $4194304, %edx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $4194304, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $100, %ebx
jmp .L52
.L51:
subl $1, %ebx
je .L67
.L52:
movl $32, 52(%rsp)
movl $32, 56(%rsp)
movl $32, 64(%rsp)
movl $32, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movl $1, %ecx
movq 52(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L51
movl $1024, %ecx
movl $1024, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z39__device_stub__Z15matrixTransposePfS_iiPfS_ii
jmp .L51
.L67:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movss 12(%rsp), %xmm0
divss .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $4194304, %edx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $4194304, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $100, %ebx
jmp .L54
.L53:
subl $1, %ebx
je .L68
.L54:
movl $32, 52(%rsp)
movl $32, 56(%rsp)
movl $32, 64(%rsp)
movl $32, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movl $1, %ecx
movq 52(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L53
movl $1024, %ecx
movl $1024, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z45__device_stub__Z21matrixTransposeSharedPfS_iiPfS_ii
jmp .L53
.L68:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movss 12(%rsp), %xmm0
divss .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $4194304, %edx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $4194304, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $100, %ebx
jmp .L56
.L55:
subl $1, %ebx
je .L69
.L56:
movl $32, 52(%rsp)
movl $32, 56(%rsp)
movl $32, 64(%rsp)
movl $32, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movl $1, %ecx
movq 52(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L55
movl $1024, %ecx
movl $1024, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z48__device_stub__Z24matrixTransposeSharedwBCPfS_iiPfS_ii
jmp .L55
.L69:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movss 12(%rsp), %xmm0
divss .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $0x00000000, 12(%rsp)
movl $32, 76(%rsp)
movl $8, 80(%rsp)
movl $1, 84(%rsp)
movl $1, %ecx
movl $4194304, %edx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $4194304, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $100, %ebx
jmp .L58
.L57:
subl $1, %ebx
je .L70
.L58:
movl $32, 52(%rsp)
movl $32, 56(%rsp)
movl 84(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movq 52(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L57
movl $1024, %ecx
movl $1024, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z47__device_stub__Z23matrixTransposeUnrolledPfS_iiPfS_ii
jmp .L57
.L70:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movss 12(%rsp), %xmm0
divss .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L71
movl $0, %eax
addq $96, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L71:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC12:
.string "_Z24matrixTransposeSharedwBCPfS_ii"
.align 8
.LC13:
.string "_Z23matrixTransposeUnrolledPfS_ii"
.align 8
.LC14:
.string "_Z21matrixTransposeSharedPfS_ii"
.section .rodata.str1.1
.LC15:
.string "_Z15matrixTransposePfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2092:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z24matrixTransposeSharedwBCPfS_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z23matrixTransposeUnrolledPfS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z21matrixTransposeSharedPfS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z15matrixTransposePfS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC4:
.long 805306368
.align 4
.LC5:
.long 1120403456
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cudabench.hip"
.globl _Z8naiveCPUPfS_ii # -- Begin function _Z8naiveCPUPfS_ii
.p2align 4, 0x90
.type _Z8naiveCPUPfS_ii,@function
_Z8naiveCPUPfS_ii: # @_Z8naiveCPUPfS_ii
.cfi_startproc
# %bb.0:
testl %ecx, %ecx
jle .LBB0_7
# %bb.1: # %.preheader.lr.ph
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %ecx, %eax
movl %edx, %r8d
leaq (,%rax,4), %r9
xorl %r10d, %r10d
xorl %r11d, %r11d
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_5: # %._crit_edge
# in Loop: Header=BB0_2 Depth=1
incq %r11
addl %ecx, %r10d
addq $4, %rdi
cmpq %rax, %r11
je .LBB0_6
.LBB0_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
testl %edx, %edx
jle .LBB0_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB0_2 Depth=1
movl %r10d, %ebx
leaq (%rsi,%rbx,4), %rbx
movq %rdi, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_4: # Parent Loop BB0_2 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%rbx,%r15,4)
incq %r15
addq %r9, %r14
cmpq %r15, %r8
jne .LBB0_4
jmp .LBB0_5
.LBB0_6:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB0_7: # %._crit_edge17
retq
.Lfunc_end0:
.size _Z8naiveCPUPfS_ii, .Lfunc_end0-_Z8naiveCPUPfS_ii
.cfi_endproc
# -- End function
.globl _Z30__device_stub__matrixTransposePfS_ii # -- Begin function _Z30__device_stub__matrixTransposePfS_ii
.p2align 4, 0x90
.type _Z30__device_stub__matrixTransposePfS_ii,@function
_Z30__device_stub__matrixTransposePfS_ii: # @_Z30__device_stub__matrixTransposePfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15matrixTransposePfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z30__device_stub__matrixTransposePfS_ii, .Lfunc_end1-_Z30__device_stub__matrixTransposePfS_ii
.cfi_endproc
# -- End function
.globl _Z36__device_stub__matrixTransposeSharedPfS_ii # -- Begin function _Z36__device_stub__matrixTransposeSharedPfS_ii
.p2align 4, 0x90
.type _Z36__device_stub__matrixTransposeSharedPfS_ii,@function
_Z36__device_stub__matrixTransposeSharedPfS_ii: # @_Z36__device_stub__matrixTransposeSharedPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z21matrixTransposeSharedPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z36__device_stub__matrixTransposeSharedPfS_ii, .Lfunc_end2-_Z36__device_stub__matrixTransposeSharedPfS_ii
.cfi_endproc
# -- End function
.globl _Z38__device_stub__matrixTransposeUnrolledPfS_ii # -- Begin function _Z38__device_stub__matrixTransposeUnrolledPfS_ii
.p2align 4, 0x90
.type _Z38__device_stub__matrixTransposeUnrolledPfS_ii,@function
_Z38__device_stub__matrixTransposeUnrolledPfS_ii: # @_Z38__device_stub__matrixTransposeUnrolledPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23matrixTransposeUnrolledPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z38__device_stub__matrixTransposeUnrolledPfS_ii, .Lfunc_end3-_Z38__device_stub__matrixTransposeUnrolledPfS_ii
.cfi_endproc
# -- End function
.globl _Z39__device_stub__matrixTransposeSharedwBCPfS_ii # -- Begin function _Z39__device_stub__matrixTransposeSharedwBCPfS_ii
.p2align 4, 0x90
.type _Z39__device_stub__matrixTransposeSharedwBCPfS_ii,@function
_Z39__device_stub__matrixTransposeSharedwBCPfS_ii: # @_Z39__device_stub__matrixTransposeSharedwBCPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z24matrixTransposeSharedwBCPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end4:
.size _Z39__device_stub__matrixTransposeSharedwBCPfS_ii, .Lfunc_end4-_Z39__device_stub__matrixTransposeSharedwBCPfS_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI5_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI5_1:
.long 0x42c80000 # float 100
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $.Lstr.1, %edi
callq puts@PLT
xorl %ebx, %ebx
movl $.L.str.1, %edi
movl $1024, %esi # imm = 0x400
movl $1024, %edx # imm = 0x400
xorl %eax, %eax
callq printf
movl $.L.str.2, %edi
movl $32, %esi
movl $32, %edx
movl $1, %ecx
xorl %eax, %eax
callq printf
movl $.L.str.3, %edi
movl $32, %esi
movl $32, %edx
movl $1, %ecx
xorl %eax, %eax
callq printf
movl $.Lstr.1, %edi
callq puts@PLT
movl $4194304, %edi # imm = 0x400000
callq malloc
movq %rax, %r13
movl $4194304, %edi # imm = 0x400000
callq malloc
movq %rax, %r12
leaq 40(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
movq %r13, %r14
.p2align 4, 0x90
.LBB5_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB5_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB5_2: # Parent Loop BB5_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI5_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss .LCPI5_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss %xmm1, %xmm0
movss %xmm0, (%r14,%r15,4)
incq %r15
cmpq $1024, %r15 # imm = 0x400
jne .LBB5_2
# %bb.3: # in Loop: Header=BB5_1 Depth=1
incq %rbx
addq $4096, %r14 # imm = 0x1000
cmpq $1024, %rbx # imm = 0x400
jne .LBB5_1
# %bb.4:
leaq 32(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 32(%rsp), %rdi
xorl %ebx, %ebx
xorl %esi, %esi
callq hipEventRecord
.p2align 4, 0x90
.LBB5_5: # %.preheader.i.preheader
# =>This Loop Header: Depth=1
# Child Loop BB5_6 Depth 2
# Child Loop BB5_7 Depth 3
movq %r13, %rax
movq %r12, %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_6: # %.preheader.i
# Parent Loop BB5_5 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB5_7 Depth 3
movq %rax, %rsi
xorl %edi, %edi
.p2align 4, 0x90
.LBB5_7: # Parent Loop BB5_5 Depth=1
# Parent Loop BB5_6 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rsi), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%rcx,%rdi,4)
incq %rdi
addq $4096, %rsi # imm = 0x1000
cmpq $1024, %rdi # imm = 0x400
jne .LBB5_7
# %bb.8: # %._crit_edge.i
# in Loop: Header=BB5_6 Depth=2
incq %rdx
addq $4096, %rcx # imm = 0x1000
addq $4, %rax
cmpq $1024, %rdx # imm = 0x400
jne .LBB5_6
# %bb.9: # %_Z8naiveCPUPfS_ii.exit
# in Loop: Header=BB5_5 Depth=1
incl %ebx
cmpl $100, %ebx
jne .LBB5_5
# %bb.10:
movq %r12, 152(%rsp) # 8-byte Spill
movabsq $137438953504, %r15 # imm = 0x2000000020
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
movq 40(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq %r13, 160(%rsp) # 8-byte Spill
movq %r13, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
xorl %esi, %esi
callq hipMemset
movq 32(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl $100, %r12d
leaq 64(%rsp), %r13
leaq 56(%rsp), %rbp
leaq 48(%rsp), %rbx
leaq 112(%rsp), %r14
jmp .LBB5_11
.p2align 4, 0x90
.LBB5_13: # in Loop: Header=BB5_11 Depth=1
decl %r12d
je .LBB5_14
.LBB5_11: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_13
# %bb.12: # in Loop: Header=BB5_11 Depth=1
movq 40(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl $1024, 20(%rsp) # imm = 0x400
movl $1024, 16(%rsp) # imm = 0x400
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 80(%rsp), %rdi
movq %r13, %rsi
movq %rbp, %rdx
movq %rbx, %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
movl $_Z15matrixTransposePfS_ii, %edi
movq %r14, %r9
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB5_13
.LBB5_14:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq 24(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq 152(%rsp), %rdi # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.5, %edi
movb $1, %al
callq printf
movq 40(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq 160(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
xorl %esi, %esi
callq hipMemset
movq 32(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl $100, %r12d
leaq 64(%rsp), %r13
leaq 56(%rsp), %rbp
leaq 48(%rsp), %rbx
leaq 112(%rsp), %r14
jmp .LBB5_15
.p2align 4, 0x90
.LBB5_17: # in Loop: Header=BB5_15 Depth=1
decl %r12d
je .LBB5_18
.LBB5_15: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_17
# %bb.16: # in Loop: Header=BB5_15 Depth=1
movq 40(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl $1024, 20(%rsp) # imm = 0x400
movl $1024, 16(%rsp) # imm = 0x400
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 80(%rsp), %rdi
movq %r13, %rsi
movq %rbp, %rdx
movq %rbx, %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
movl $_Z21matrixTransposeSharedPfS_ii, %edi
movq %r14, %r9
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB5_17
.LBB5_18:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq 24(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq 152(%rsp), %rdi # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
movq 40(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq 160(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
xorl %esi, %esi
callq hipMemset
movq 32(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl $100, %r12d
leaq 64(%rsp), %r13
leaq 56(%rsp), %rbp
leaq 48(%rsp), %rbx
leaq 112(%rsp), %r14
jmp .LBB5_19
.p2align 4, 0x90
.LBB5_21: # in Loop: Header=BB5_19 Depth=1
decl %r12d
je .LBB5_22
.LBB5_19: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_21
# %bb.20: # in Loop: Header=BB5_19 Depth=1
movq 40(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl $1024, 20(%rsp) # imm = 0x400
movl $1024, 16(%rsp) # imm = 0x400
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 80(%rsp), %rdi
movq %r13, %rsi
movq %rbp, %rdx
movq %rbx, %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
movl $_Z24matrixTransposeSharedwBCPfS_ii, %edi
movq %r14, %r9
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB5_21
.LBB5_22:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq 24(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq 152(%rsp), %rdi # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.7, %edi
movb $1, %al
callq printf
movl $0, 4(%rsp)
movq 40(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq 160(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
xorl %esi, %esi
callq hipMemset
movq 32(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl $100, %r13d
movabsq $34359738400, %r14 # imm = 0x800000020
leaq 56(%rsp), %rbp
leaq 48(%rsp), %rbx
leaq 112(%rsp), %r12
jmp .LBB5_23
.p2align 4, 0x90
.LBB5_25: # in Loop: Header=BB5_23 Depth=1
decl %r13d
je .LBB5_26
.LBB5_23: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_25
# %bb.24: # in Loop: Header=BB5_23 Depth=1
movq 40(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl $1024, 20(%rsp) # imm = 0x400
movl $1024, 16(%rsp) # imm = 0x400
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
movq %rbp, %rdx
movq %rbx, %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
movl $_Z23matrixTransposeUnrolledPfS_ii, %edi
movq %r12, %r9
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB5_25
.LBB5_26:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq 24(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq 152(%rsp), %rdi # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI5_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15matrixTransposePfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21matrixTransposeSharedPfS_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23matrixTransposeUnrolledPfS_ii, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z24matrixTransposeSharedwBCPfS_ii, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15matrixTransposePfS_ii,@object # @_Z15matrixTransposePfS_ii
.section .rodata,"a",@progbits
.globl _Z15matrixTransposePfS_ii
.p2align 3, 0x0
_Z15matrixTransposePfS_ii:
.quad _Z30__device_stub__matrixTransposePfS_ii
.size _Z15matrixTransposePfS_ii, 8
.type _Z21matrixTransposeSharedPfS_ii,@object # @_Z21matrixTransposeSharedPfS_ii
.globl _Z21matrixTransposeSharedPfS_ii
.p2align 3, 0x0
_Z21matrixTransposeSharedPfS_ii:
.quad _Z36__device_stub__matrixTransposeSharedPfS_ii
.size _Z21matrixTransposeSharedPfS_ii, 8
.type _Z23matrixTransposeUnrolledPfS_ii,@object # @_Z23matrixTransposeUnrolledPfS_ii
.globl _Z23matrixTransposeUnrolledPfS_ii
.p2align 3, 0x0
_Z23matrixTransposeUnrolledPfS_ii:
.quad _Z38__device_stub__matrixTransposeUnrolledPfS_ii
.size _Z23matrixTransposeUnrolledPfS_ii, 8
.type _Z24matrixTransposeSharedwBCPfS_ii,@object # @_Z24matrixTransposeSharedwBCPfS_ii
.globl _Z24matrixTransposeSharedwBCPfS_ii
.p2align 3, 0x0
_Z24matrixTransposeSharedwBCPfS_ii:
.quad _Z39__device_stub__matrixTransposeSharedwBCPfS_ii
.size _Z24matrixTransposeSharedwBCPfS_ii, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Matrix: %d * %d\n"
.size .L.str.1, 17
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Grid: %d * %d * %d\n"
.size .L.str.2, 22
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Block: %d * %d * %d\n"
.size .L.str.3, 22
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\nNaive CPU: %f\n"
.size .L.str.4, 16
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "\nNaive GPU: %f\n"
.size .L.str.5, 16
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\nShared GPU: %f\n"
.size .L.str.6, 17
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "\nSharedwBC GPU: %f\n"
.size .L.str.7, 20
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "\nUnrolled GPU: %f\n"
.size .L.str.8, 19
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z15matrixTransposePfS_ii"
.size .L__unnamed_1, 26
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z21matrixTransposeSharedPfS_ii"
.size .L__unnamed_2, 32
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z23matrixTransposeUnrolledPfS_ii"
.size .L__unnamed_3, 34
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z24matrixTransposeSharedwBCPfS_ii"
.size .L__unnamed_4, 35
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.1,@object # @str.1
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.1:
.asciz "==================="
.size .Lstr.1, 20
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__matrixTransposePfS_ii
.addrsig_sym _Z36__device_stub__matrixTransposeSharedPfS_ii
.addrsig_sym _Z38__device_stub__matrixTransposeUnrolledPfS_ii
.addrsig_sym _Z39__device_stub__matrixTransposeSharedwBCPfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15matrixTransposePfS_ii
.addrsig_sym _Z21matrixTransposeSharedPfS_ii
.addrsig_sym _Z23matrixTransposeUnrolledPfS_ii
.addrsig_sym _Z24matrixTransposeSharedwBCPfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // RUN: %clang_cc1 -triple spirv64 -aux-triple x86_64-unknown-linux-gnu \
// RUN: -fcuda-is-device -verify -fsyntax-only %s
#define __device__ __attribute__((device))
__int128 h_glb;
__device__ __int128 d_unused;
// expected-note@+1 {{'d_glb' defined here}}
__device__ __int128 d_glb;
__device__ __int128 bar() {
// expected-error@+1 {{'d_glb' requires 128 bit size '__int128' type support, but target 'spirv64' does not support it}}
return d_glb;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // RUN: %clang_cc1 -triple spirv64 -aux-triple x86_64-unknown-linux-gnu \
// RUN: -fcuda-is-device -verify -fsyntax-only %s
#define __device__ __attribute__((device))
__int128 h_glb;
__device__ __int128 d_unused;
// expected-note@+1 {{'d_glb' defined here}}
__device__ __int128 d_glb;
__device__ __int128 bar() {
// expected-error@+1 {{'d_glb' requires 128 bit size '__int128' type support, but target 'spirv64' does not support it}}
return d_glb;
} | .file "tmpxft_0004820d_00000000-6_spirv-int128.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z3barv
.type _Z3barv, @function
_Z3barv:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z3barv, .-_Z3barv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "d_unused"
.LC1:
.string "d_glb"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2053:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $16, %r9d
movl $0, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _ZL8d_unused(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $16, %r9d
movl $0, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _ZL5d_glb(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL5d_glb
.comm _ZL5d_glb,16,16
.local _ZL8d_unused
.comm _ZL8d_unused,16,16
.globl h_glb
.bss
.align 16
.type h_glb, @object
.size h_glb, 16
h_glb:
.zero 16
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // RUN: %clang_cc1 -triple spirv64 -aux-triple x86_64-unknown-linux-gnu \
// RUN: -fcuda-is-device -verify -fsyntax-only %s
#define __device__ __attribute__((device))
__int128 h_glb;
__device__ __int128 d_unused;
// expected-note@+1 {{'d_glb' defined here}}
__device__ __int128 d_glb;
__device__ __int128 bar() {
// expected-error@+1 {{'d_glb' requires 128 bit size '__int128' type support, but target 'spirv64' does not support it}}
return d_glb;
} | #include <hip/hip_runtime.h>
// RUN: %clang_cc1 -triple spirv64 -aux-triple x86_64-unknown-linux-gnu \
// RUN: -fcuda-is-device -verify -fsyntax-only %s
#define __device__ __attribute__((device))
__int128 h_glb;
__device__ __int128 d_unused;
// expected-note@+1 {{'d_glb' defined here}}
__device__ __int128 d_glb;
__device__ __int128 bar() {
// expected-error@+1 {{'d_glb' requires 128 bit size '__int128' type support, but target 'spirv64' does not support it}}
return d_glb;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
// RUN: %clang_cc1 -triple spirv64 -aux-triple x86_64-unknown-linux-gnu \
// RUN: -fcuda-is-device -verify -fsyntax-only %s
#define __device__ __attribute__((device))
__int128 h_glb;
__device__ __int128 d_unused;
// expected-note@+1 {{'d_glb' defined here}}
__device__ __int128 d_glb;
__device__ __int128 bar() {
// expected-error@+1 {{'d_glb' requires 128 bit size '__int128' type support, but target 'spirv64' does not support it}}
return d_glb;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected d_unused
.type d_unused,@object
.section .bss,"aw",@nobits
.globl d_unused
.p2align 4, 0x0
d_unused:
.quad 0
.quad 0
.size d_unused, 16
.protected d_glb
.type d_glb,@object
.globl d_glb
.p2align 4, 0x0
d_glb:
.quad 0
.quad 0
.size d_glb, 16
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym d_glb
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
// RUN: %clang_cc1 -triple spirv64 -aux-triple x86_64-unknown-linux-gnu \
// RUN: -fcuda-is-device -verify -fsyntax-only %s
#define __device__ __attribute__((device))
__int128 h_glb;
__device__ __int128 d_unused;
// expected-note@+1 {{'d_glb' defined here}}
__device__ __int128 d_glb;
__device__ __int128 bar() {
// expected-error@+1 {{'d_glb' requires 128 bit size '__int128' type support, but target 'spirv64' does not support it}}
return d_glb;
} | .text
.file "spirv-int128.hip"
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB0_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB0_2:
movq __hip_gpubin_handle(%rip), %rbx
movl $d_unused, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $16, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $d_glb, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $16, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end0:
.size __hip_module_ctor, .Lfunc_end0-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB1_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB1_2:
retq
.Lfunc_end1:
.size __hip_module_dtor, .Lfunc_end1-__hip_module_dtor
.cfi_endproc
# -- End function
.type h_glb,@object # @h_glb
.bss
.globl h_glb
.p2align 4, 0x0
h_glb:
.quad 0
.quad 0
.size h_glb, 16
.type d_unused,@object # @d_unused
.local d_unused
.comm d_unused,16,16
.type d_glb,@object # @d_glb
.local d_glb
.comm d_glb,16,16
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "d_unused"
.size .L__unnamed_1, 9
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "d_glb"
.size .L__unnamed_2, 6
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym d_unused
.addrsig_sym d_glb
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected d_unused
.type d_unused,@object
.section .bss,"aw",@nobits
.globl d_unused
.p2align 4, 0x0
d_unused:
.quad 0
.quad 0
.size d_unused, 16
.protected d_glb
.type d_glb,@object
.globl d_glb
.p2align 4, 0x0
d_glb:
.quad 0
.quad 0
.size d_glb, 16
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym d_glb
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0004820d_00000000-6_spirv-int128.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z3barv
.type _Z3barv, @function
_Z3barv:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z3barv, .-_Z3barv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "d_unused"
.LC1:
.string "d_glb"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2053:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $16, %r9d
movl $0, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _ZL8d_unused(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $16, %r9d
movl $0, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _ZL5d_glb(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL5d_glb
.comm _ZL5d_glb,16,16
.local _ZL8d_unused
.comm _ZL8d_unused,16,16
.globl h_glb
.bss
.align 16
.type h_glb, @object
.size h_glb, 16
h_glb:
.zero 16
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "spirv-int128.hip"
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB0_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB0_2:
movq __hip_gpubin_handle(%rip), %rbx
movl $d_unused, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $16, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $d_glb, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $16, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end0:
.size __hip_module_ctor, .Lfunc_end0-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB1_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB1_2:
retq
.Lfunc_end1:
.size __hip_module_dtor, .Lfunc_end1-__hip_module_dtor
.cfi_endproc
# -- End function
.type h_glb,@object # @h_glb
.bss
.globl h_glb
.p2align 4, 0x0
h_glb:
.quad 0
.quad 0
.size h_glb, 16
.type d_unused,@object # @d_unused
.local d_unused
.comm d_unused,16,16
.type d_glb,@object # @d_glb
.local d_glb
.comm d_glb,16,16
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "d_unused"
.size .L__unnamed_1, 9
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "d_glb"
.size .L__unnamed_2, 6
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym d_unused
.addrsig_sym d_glb
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <math.h>
using namespace std;
__global__ void add(int n,float* a,float* b){
int index = blockIdx.x*blockDim.x+threadIdx.x;
int stride = blockDim.x*gridDim.x;
for(int i=index;i<n;i+=stride)
a[i] = a[i]+b[i];
}
int main(void){
int N=1<<20;
float *x,*y;
cudaMallocManaged(&x,N,sizeof(float)*N);
cudaMallocManaged(&y,N,sizeof(float)*N);
for(int i=0;i<N;i++){
x[i] = 1.f;
y[i] = 2.f;
}
int blockSize = 256;
int numBlocks = (N+blockSize-1)/blockSize;
add<<<numBlocks,blockSize>>>(N,x,y);
cudaDeviceSynchronize();
float maxError = 0.0f;
for(int i=0;i<N;i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
cudaFree(x);
cudaFree(y);
return 0;
} | code for sm_80
Function : _Z3addiPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x160], PT ; /* 0x0000580003007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ BSSY B0, 0x320 ; /* 0x0000029000007945 */
/* 0x000fe60003800000 */
/*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */
/* 0x000fc800078e02ff */
/*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */
/* 0x000e220000209000 */
/*00b0*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0a00 */
/*00c0*/ IADD3 R2, R0.reuse, R3, RZ ; /* 0x0000000300027210 */
/* 0x040fe40007ffe0ff */
/*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f45070 */
/*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */
/* 0x000fc800078e33ff */
/*00f0*/ IADD3 R7, R7, c[0x0][0x160], R0 ; /* 0x0000580007077a10 */
/* 0x000fe20007ffe000 */
/*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*0130*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */
/* 0x002fd200078e02ff */
/*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */
/* 0x000fcc00078e0004 */
/*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */
/* 0x000fc800078e00ff */
/*0170*/ IMAD.MOV R4, RZ, RZ, -R2 ; /* 0x000000ffff047224 */
/* 0x000fc800078e0a02 */
/*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06070 */
/*01a0*/ @P0 IADD3 R7, -R0, R7, RZ ; /* 0x0000000700070210 */
/* 0x000fe40007ffe1ff */
/*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f26070 */
/*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */
/* 0x000fe40007ffe0ff */
/*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */
/* 0x000fc800078e33ff */
/*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */
/* 0x040fe40007ffe0ff */
/*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fda000780c0ff */
/*0220*/ @!P0 BRA 0x310 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0230*/ MOV R6, 0x4 ; /* 0x0000000400067802 */
/* 0x000fe20000000f00 */
/*0240*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */
/* 0x000fc800078e0004 */
/*0250*/ IMAD.WIDE R4, R3, R6, c[0x0][0x170] ; /* 0x00005c0003047625 */
/* 0x000fc800078e0206 */
/*0260*/ IMAD.WIDE R6, R3, R6, c[0x0][0x168] ; /* 0x00005a0003067625 */
/* 0x000fc800078e0206 */
/*0270*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x0000a8000c1e1900 */
/*0280*/ LDG.E R9, [R6.64] ; /* 0x0000000406097981 */
/* 0x000ea2000c1e1900 */
/*0290*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe40007ffe0ff */
/*02a0*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*02b0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f05270 */
/*02c0*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */
/* 0x001fc800078e0204 */
/*02d0*/ FADD R9, R8, R9 ; /* 0x0000000908097221 */
/* 0x004fca0000000000 */
/*02e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e4000c101904 */
/*02f0*/ IMAD.WIDE R6, R0, 0x4, R6 ; /* 0x0000000400067825 */
/* 0x001fe200078e0206 */
/*0300*/ @P0 BRA 0x270 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*0310*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0320*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*0330*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x000fd400000001ff */
/*0340*/ IMAD.WIDE R4, R3, R6, c[0x0][0x168] ; /* 0x00005a0003047625 */
/* 0x002fc800078e0206 */
/*0350*/ IMAD.WIDE R6, R3, R6, c[0x0][0x170] ; /* 0x00005c0003067625 */
/* 0x000fe200078e0206 */
/*0360*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */
/* 0x000ea8000c1e1900 */
/*0370*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */
/* 0x000ea2000c1e1900 */
/*0380*/ IMAD.WIDE R10, R0, 0x4, R6 ; /* 0x00000004000a7825 */
/* 0x000fc800078e0206 */
/*0390*/ FADD R17, R2, R9 ; /* 0x0000000902117221 */
/* 0x004fe40000000000 */
/*03a0*/ IMAD.WIDE R8, R0, 0x4, R4 ; /* 0x0000000400087825 */
/* 0x000fc600078e0204 */
/*03b0*/ STG.E [R4.64], R17 ; /* 0x0000001104007986 */
/* 0x0001e8000c101904 */
/*03c0*/ LDG.E R2, [R10.64] ; /* 0x000000040a027981 */
/* 0x000ea8000c1e1900 */
/*03d0*/ LDG.E R13, [R8.64] ; /* 0x00000004080d7981 */
/* 0x000ea2000c1e1900 */
/*03e0*/ IMAD.WIDE R14, R0, 0x4, R10 ; /* 0x00000004000e7825 */
/* 0x000fc800078e020a */
/*03f0*/ FADD R19, R2, R13 ; /* 0x0000000d02137221 */
/* 0x004fe40000000000 */
/*0400*/ IMAD.WIDE R12, R0, 0x4, R8 ; /* 0x00000004000c7825 */
/* 0x000fc600078e0208 */
/*0410*/ STG.E [R8.64], R19 ; /* 0x0000001308007986 */
/* 0x0003e8000c101904 */
/*0420*/ LDG.E R2, [R14.64] ; /* 0x000000040e027981 */
/* 0x000ea8000c1e1900 */
/*0430*/ LDG.E R7, [R12.64] ; /* 0x000000040c077981 */
/* 0x000ea2000c1e1900 */
/*0440*/ IMAD.WIDE R4, R0, 0x4, R12 ; /* 0x0000000400047825 */
/* 0x001fc800078e020c */
/*0450*/ FADD R21, R2, R7 ; /* 0x0000000702157221 */
/* 0x004fe40000000000 */
/*0460*/ IMAD.WIDE R6, R0, 0x4, R14 ; /* 0x0000000400067825 */
/* 0x000fc600078e020e */
/*0470*/ STG.E [R12.64], R21 ; /* 0x000000150c007986 */
/* 0x0003e8000c101904 */
/*0480*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea8000c1e1900 */
/*0490*/ LDG.E R11, [R4.64] ; /* 0x00000004040b7981 */
/* 0x000ea2000c1e1900 */
/*04a0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*04b0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*04c0*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x160], PT ; /* 0x0000580003007a0c */
/* 0x000fe20003f06270 */
/*04d0*/ FADD R11, R6, R11 ; /* 0x0000000b060b7221 */
/* 0x004fca0000000000 */
/*04e0*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */
/* 0x0003ee000c101904 */
/*04f0*/ @!P0 BRA 0x330 ; /* 0xfffffe3000008947 */
/* 0x000fea000383ffff */
/*0500*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0510*/ BRA 0x510; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <math.h>
using namespace std;
__global__ void add(int n,float* a,float* b){
int index = blockIdx.x*blockDim.x+threadIdx.x;
int stride = blockDim.x*gridDim.x;
for(int i=index;i<n;i+=stride)
a[i] = a[i]+b[i];
}
int main(void){
int N=1<<20;
float *x,*y;
cudaMallocManaged(&x,N,sizeof(float)*N);
cudaMallocManaged(&y,N,sizeof(float)*N);
for(int i=0;i<N;i++){
x[i] = 1.f;
y[i] = 2.f;
}
int blockSize = 256;
int numBlocks = (N+blockSize-1)/blockSize;
add<<<numBlocks,blockSize>>>(N,x,y);
cudaDeviceSynchronize();
float maxError = 0.0f;
for(int i=0;i<N;i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
cudaFree(x);
cudaFree(y);
return 0;
} | .file "tmpxft_0007086c_00000000-6_unifie.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z3addiPfS_iPfS_
.type _Z25__device_stub__Z3addiPfS_iPfS_, @function
_Z25__device_stub__Z3addiPfS_iPfS_:
.LFB3694:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addiPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z25__device_stub__Z3addiPfS_iPfS_, .-_Z25__device_stub__Z3addiPfS_iPfS_
.globl _Z3addiPfS_
.type _Z3addiPfS_, @function
_Z3addiPfS_:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3addiPfS_iPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3addiPfS_, .-_Z3addiPfS_
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $4194304, %edx
movl $1048576, %esi
call cudaMallocManaged@PLT
leaq 8(%rsp), %rdi
movl $4194304, %edx
movl $1048576, %esi
call cudaMallocManaged@PLT
movl $0, %eax
movss .LC0(%rip), %xmm1
movss .LC1(%rip), %xmm0
.L12:
movq (%rsp), %rdx
movss %xmm1, (%rdx,%rax)
movq 8(%rsp), %rdx
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq $4194304, %rax
jne .L12
movl $256, 28(%rsp)
movl $1, 32(%rsp)
movl $4096, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
call cudaDeviceSynchronize@PLT
movl $1048576, %eax
.L14:
subl $1, %eax
jne .L14
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 8(%rsp), %rdx
movq (%rsp), %rsi
movl $1048576, %edi
call _Z25__device_stub__Z3addiPfS_iPfS_
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "_Z3addiPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addiPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.align 4
.LC1:
.long 1073741824
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <math.h>
using namespace std;
__global__ void add(int n,float* a,float* b){
int index = blockIdx.x*blockDim.x+threadIdx.x;
int stride = blockDim.x*gridDim.x;
for(int i=index;i<n;i+=stride)
a[i] = a[i]+b[i];
}
int main(void){
int N=1<<20;
float *x,*y;
cudaMallocManaged(&x,N,sizeof(float)*N);
cudaMallocManaged(&y,N,sizeof(float)*N);
for(int i=0;i<N;i++){
x[i] = 1.f;
y[i] = 2.f;
}
int blockSize = 256;
int numBlocks = (N+blockSize-1)/blockSize;
add<<<numBlocks,blockSize>>>(N,x,y);
cudaDeviceSynchronize();
float maxError = 0.0f;
for(int i=0;i<N;i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
cudaFree(x);
cudaFree(y);
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
using namespace std;
__global__ void add(int n,float* a,float* b){
int index = blockIdx.x*blockDim.x+threadIdx.x;
int stride = blockDim.x*gridDim.x;
for(int i=index;i<n;i+=stride)
a[i] = a[i]+b[i];
}
int main(void){
int N=1<<20;
float *x,*y;
hipMallocManaged(&x,N,sizeof(float)*N);
hipMallocManaged(&y,N,sizeof(float)*N);
for(int i=0;i<N;i++){
x[i] = 1.f;
y[i] = 2.f;
}
int blockSize = 256;
int numBlocks = (N+blockSize-1)/blockSize;
add<<<numBlocks,blockSize>>>(N,x,y);
hipDeviceSynchronize();
float maxError = 0.0f;
for(int i=0;i<N;i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
hipFree(x);
hipFree(y);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
using namespace std;
__global__ void add(int n,float* a,float* b){
int index = blockIdx.x*blockDim.x+threadIdx.x;
int stride = blockDim.x*gridDim.x;
for(int i=index;i<n;i+=stride)
a[i] = a[i]+b[i];
}
int main(void){
int N=1<<20;
float *x,*y;
hipMallocManaged(&x,N,sizeof(float)*N);
hipMallocManaged(&y,N,sizeof(float)*N);
for(int i=0;i<N;i++){
x[i] = 1.f;
y[i] = 2.f;
}
int blockSize = 256;
int numBlocks = (N+blockSize-1)/blockSize;
add<<<numBlocks,blockSize>>>(N,x,y);
hipDeviceSynchronize();
float maxError = 0.0f;
for(int i=0;i<N;i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
hipFree(x);
hipFree(y);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addiPfS_
.globl _Z3addiPfS_
.p2align 8
.type _Z3addiPfS_,@function
_Z3addiPfS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x0
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s10, v1
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s8
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[2:3], 2
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1)
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, s8
global_load_b32 v0, v[4:5], off
global_load_b32 v6, v[6:7], off
v_add_nc_u32_e32 v1, s2, v1
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v6
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e64 s0, s10, v1
global_store_b32 v[4:5], v0, off
s_or_b32 s1, s0, s1
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addiPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addiPfS_, .Lfunc_end0-_Z3addiPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addiPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addiPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
using namespace std;
__global__ void add(int n,float* a,float* b){
int index = blockIdx.x*blockDim.x+threadIdx.x;
int stride = blockDim.x*gridDim.x;
for(int i=index;i<n;i+=stride)
a[i] = a[i]+b[i];
}
int main(void){
int N=1<<20;
float *x,*y;
hipMallocManaged(&x,N,sizeof(float)*N);
hipMallocManaged(&y,N,sizeof(float)*N);
for(int i=0;i<N;i++){
x[i] = 1.f;
y[i] = 2.f;
}
int blockSize = 256;
int numBlocks = (N+blockSize-1)/blockSize;
add<<<numBlocks,blockSize>>>(N,x,y);
hipDeviceSynchronize();
float maxError = 0.0f;
for(int i=0;i<N;i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
hipFree(x);
hipFree(y);
return 0;
} | .text
.file "unifie.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addiPfS_ # -- Begin function _Z18__device_stub__addiPfS_
.p2align 4, 0x90
.type _Z18__device_stub__addiPfS_,@function
_Z18__device_stub__addiPfS_: # @_Z18__device_stub__addiPfS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addiPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addiPfS_, .Lfunc_end0-_Z18__device_stub__addiPfS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
leaq 16(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
movl $4194304, %edx # imm = 0x400000
callq hipMallocManaged
leaq 8(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
movl $4194304, %edx # imm = 0x400000
callq hipMallocManaged
movq 16(%rsp), %rax
xorl %ecx, %ecx
movq 8(%rsp), %rdx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%rax,%rcx,4) # imm = 0x3F800000
movl $1073741824, (%rdx,%rcx,4) # imm = 0x40000000
incq %rcx
cmpq $1048576, %rcx # imm = 0x100000
jne .LBB1_1
# %bb.2:
movabsq $4294967552, %rdx # imm = 0x100000100
leaq 3840(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movl $1048576, 28(%rsp) # imm = 0x100000
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 88(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addiPfS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq hipDeviceSynchronize
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addiPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addiPfS_,@object # @_Z3addiPfS_
.section .rodata,"a",@progbits
.globl _Z3addiPfS_
.p2align 3, 0x0
_Z3addiPfS_:
.quad _Z18__device_stub__addiPfS_
.size _Z3addiPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z3addiPfS_"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addiPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addiPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addiPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x160], PT ; /* 0x0000580003007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ BSSY B0, 0x320 ; /* 0x0000029000007945 */
/* 0x000fe60003800000 */
/*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */
/* 0x000fc800078e02ff */
/*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */
/* 0x000e220000209000 */
/*00b0*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0a00 */
/*00c0*/ IADD3 R2, R0.reuse, R3, RZ ; /* 0x0000000300027210 */
/* 0x040fe40007ffe0ff */
/*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f45070 */
/*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */
/* 0x000fc800078e33ff */
/*00f0*/ IADD3 R7, R7, c[0x0][0x160], R0 ; /* 0x0000580007077a10 */
/* 0x000fe20007ffe000 */
/*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*0130*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */
/* 0x002fd200078e02ff */
/*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */
/* 0x000fcc00078e0004 */
/*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */
/* 0x000fc800078e00ff */
/*0170*/ IMAD.MOV R4, RZ, RZ, -R2 ; /* 0x000000ffff047224 */
/* 0x000fc800078e0a02 */
/*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06070 */
/*01a0*/ @P0 IADD3 R7, -R0, R7, RZ ; /* 0x0000000700070210 */
/* 0x000fe40007ffe1ff */
/*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f26070 */
/*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */
/* 0x000fe40007ffe0ff */
/*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */
/* 0x000fc800078e33ff */
/*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */
/* 0x040fe40007ffe0ff */
/*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fda000780c0ff */
/*0220*/ @!P0 BRA 0x310 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0230*/ MOV R6, 0x4 ; /* 0x0000000400067802 */
/* 0x000fe20000000f00 */
/*0240*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */
/* 0x000fc800078e0004 */
/*0250*/ IMAD.WIDE R4, R3, R6, c[0x0][0x170] ; /* 0x00005c0003047625 */
/* 0x000fc800078e0206 */
/*0260*/ IMAD.WIDE R6, R3, R6, c[0x0][0x168] ; /* 0x00005a0003067625 */
/* 0x000fc800078e0206 */
/*0270*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x0000a8000c1e1900 */
/*0280*/ LDG.E R9, [R6.64] ; /* 0x0000000406097981 */
/* 0x000ea2000c1e1900 */
/*0290*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe40007ffe0ff */
/*02a0*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*02b0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f05270 */
/*02c0*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */
/* 0x001fc800078e0204 */
/*02d0*/ FADD R9, R8, R9 ; /* 0x0000000908097221 */
/* 0x004fca0000000000 */
/*02e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e4000c101904 */
/*02f0*/ IMAD.WIDE R6, R0, 0x4, R6 ; /* 0x0000000400067825 */
/* 0x001fe200078e0206 */
/*0300*/ @P0 BRA 0x270 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*0310*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0320*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*0330*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x000fd400000001ff */
/*0340*/ IMAD.WIDE R4, R3, R6, c[0x0][0x168] ; /* 0x00005a0003047625 */
/* 0x002fc800078e0206 */
/*0350*/ IMAD.WIDE R6, R3, R6, c[0x0][0x170] ; /* 0x00005c0003067625 */
/* 0x000fe200078e0206 */
/*0360*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */
/* 0x000ea8000c1e1900 */
/*0370*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */
/* 0x000ea2000c1e1900 */
/*0380*/ IMAD.WIDE R10, R0, 0x4, R6 ; /* 0x00000004000a7825 */
/* 0x000fc800078e0206 */
/*0390*/ FADD R17, R2, R9 ; /* 0x0000000902117221 */
/* 0x004fe40000000000 */
/*03a0*/ IMAD.WIDE R8, R0, 0x4, R4 ; /* 0x0000000400087825 */
/* 0x000fc600078e0204 */
/*03b0*/ STG.E [R4.64], R17 ; /* 0x0000001104007986 */
/* 0x0001e8000c101904 */
/*03c0*/ LDG.E R2, [R10.64] ; /* 0x000000040a027981 */
/* 0x000ea8000c1e1900 */
/*03d0*/ LDG.E R13, [R8.64] ; /* 0x00000004080d7981 */
/* 0x000ea2000c1e1900 */
/*03e0*/ IMAD.WIDE R14, R0, 0x4, R10 ; /* 0x00000004000e7825 */
/* 0x000fc800078e020a */
/*03f0*/ FADD R19, R2, R13 ; /* 0x0000000d02137221 */
/* 0x004fe40000000000 */
/*0400*/ IMAD.WIDE R12, R0, 0x4, R8 ; /* 0x00000004000c7825 */
/* 0x000fc600078e0208 */
/*0410*/ STG.E [R8.64], R19 ; /* 0x0000001308007986 */
/* 0x0003e8000c101904 */
/*0420*/ LDG.E R2, [R14.64] ; /* 0x000000040e027981 */
/* 0x000ea8000c1e1900 */
/*0430*/ LDG.E R7, [R12.64] ; /* 0x000000040c077981 */
/* 0x000ea2000c1e1900 */
/*0440*/ IMAD.WIDE R4, R0, 0x4, R12 ; /* 0x0000000400047825 */
/* 0x001fc800078e020c */
/*0450*/ FADD R21, R2, R7 ; /* 0x0000000702157221 */
/* 0x004fe40000000000 */
/*0460*/ IMAD.WIDE R6, R0, 0x4, R14 ; /* 0x0000000400067825 */
/* 0x000fc600078e020e */
/*0470*/ STG.E [R12.64], R21 ; /* 0x000000150c007986 */
/* 0x0003e8000c101904 */
/*0480*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea8000c1e1900 */
/*0490*/ LDG.E R11, [R4.64] ; /* 0x00000004040b7981 */
/* 0x000ea2000c1e1900 */
/*04a0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*04b0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*04c0*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x160], PT ; /* 0x0000580003007a0c */
/* 0x000fe20003f06270 */
/*04d0*/ FADD R11, R6, R11 ; /* 0x0000000b060b7221 */
/* 0x004fca0000000000 */
/*04e0*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */
/* 0x0003ee000c101904 */
/*04f0*/ @!P0 BRA 0x330 ; /* 0xfffffe3000008947 */
/* 0x000fea000383ffff */
/*0500*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0510*/ BRA 0x510; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addiPfS_
.globl _Z3addiPfS_
.p2align 8
.type _Z3addiPfS_,@function
_Z3addiPfS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x0
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s10, v1
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s8
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[2:3], 2
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1)
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, s8
global_load_b32 v0, v[4:5], off
global_load_b32 v6, v[6:7], off
v_add_nc_u32_e32 v1, s2, v1
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v6
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e64 s0, s10, v1
global_store_b32 v[4:5], v0, off
s_or_b32 s1, s0, s1
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addiPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addiPfS_, .Lfunc_end0-_Z3addiPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addiPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addiPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0007086c_00000000-6_unifie.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z3addiPfS_iPfS_
.type _Z25__device_stub__Z3addiPfS_iPfS_, @function
_Z25__device_stub__Z3addiPfS_iPfS_:
.LFB3694:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addiPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z25__device_stub__Z3addiPfS_iPfS_, .-_Z25__device_stub__Z3addiPfS_iPfS_
.globl _Z3addiPfS_
.type _Z3addiPfS_, @function
_Z3addiPfS_:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3addiPfS_iPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3addiPfS_, .-_Z3addiPfS_
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $4194304, %edx
movl $1048576, %esi
call cudaMallocManaged@PLT
leaq 8(%rsp), %rdi
movl $4194304, %edx
movl $1048576, %esi
call cudaMallocManaged@PLT
movl $0, %eax
movss .LC0(%rip), %xmm1
movss .LC1(%rip), %xmm0
.L12:
movq (%rsp), %rdx
movss %xmm1, (%rdx,%rax)
movq 8(%rsp), %rdx
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq $4194304, %rax
jne .L12
movl $256, 28(%rsp)
movl $1, 32(%rsp)
movl $4096, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
call cudaDeviceSynchronize@PLT
movl $1048576, %eax
.L14:
subl $1, %eax
jne .L14
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 8(%rsp), %rdx
movq (%rsp), %rsi
movl $1048576, %edi
call _Z25__device_stub__Z3addiPfS_iPfS_
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "_Z3addiPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addiPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.align 4
.LC1:
.long 1073741824
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "unifie.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addiPfS_ # -- Begin function _Z18__device_stub__addiPfS_
.p2align 4, 0x90
.type _Z18__device_stub__addiPfS_,@function
_Z18__device_stub__addiPfS_: # @_Z18__device_stub__addiPfS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addiPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addiPfS_, .Lfunc_end0-_Z18__device_stub__addiPfS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
leaq 16(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
movl $4194304, %edx # imm = 0x400000
callq hipMallocManaged
leaq 8(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
movl $4194304, %edx # imm = 0x400000
callq hipMallocManaged
movq 16(%rsp), %rax
xorl %ecx, %ecx
movq 8(%rsp), %rdx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%rax,%rcx,4) # imm = 0x3F800000
movl $1073741824, (%rdx,%rcx,4) # imm = 0x40000000
incq %rcx
cmpq $1048576, %rcx # imm = 0x100000
jne .LBB1_1
# %bb.2:
movabsq $4294967552, %rdx # imm = 0x100000100
leaq 3840(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movl $1048576, 28(%rsp) # imm = 0x100000
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 88(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addiPfS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq hipDeviceSynchronize
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addiPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addiPfS_,@object # @_Z3addiPfS_
.section .rodata,"a",@progbits
.globl _Z3addiPfS_
.p2align 3, 0x0
_Z3addiPfS_:
.quad _Z18__device_stub__addiPfS_
.size _Z3addiPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z3addiPfS_"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addiPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addiPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <cuda.h>
using namespace std;
int *a, *b; // host data
int *c, *c2; // results
__global__ void vecAdd(int *A,int *B,int *C,int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void vecAdd_h(int *A1,int *B1, int *C1, int N)
{
for(int i=0;i<N;i++)
C1[i] = A1[i] * B1[i];
}
int main(int argc,char **argv)
{
printf("Begin \n");
int n=10000000;
int nBytes = n*sizeof(int);
int block_size, block_no;
a = (int *)malloc(nBytes);
b = (int *)malloc(nBytes);
c = (int *)malloc(nBytes);
c2 = (int *)malloc(nBytes);
int *a_d,*b_d,*c_d;
block_size=4000;
block_no = n/block_size;
dim3 dimBlock(block_size,1,1);
dim3 dimGrid(block_no,1,1);
for(int i=0;i<n;i++)
a[i]=i,b[i]=i;
printf("Allocating device memory on host..\n");
cudaMalloc((void **)&a_d,n*sizeof(int));
cudaMalloc((void **)&b_d,n*sizeof(int));
cudaMalloc((void **)&c_d,n*sizeof(int));
printf("Copying to device..\n");
cudaMemcpy(a_d,a,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(b_d,b,n*sizeof(int),cudaMemcpyHostToDevice);
clock_t start_d=clock();
printf("Doing GPU Vector add\n");
vecAdd<<<block_no,block_size>>>(a_d,b_d,c_d,n);
cudaThreadSynchronize();
clock_t end_d = clock();
clock_t start_h = clock();
printf("Doing CPU Vector add\n");
vecAdd_h(a,b,c2,n);
clock_t end_h = clock();
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
cudaMemcpy(c,c_d,n*sizeof(int),cudaMemcpyDeviceToHost);
printf("%d %f %f\n",n,time_d,time_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
} | code for sm_80
Function : _Z6vecAddPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00b0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <cuda.h>
using namespace std;
int *a, *b; // host data
int *c, *c2; // results
__global__ void vecAdd(int *A,int *B,int *C,int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void vecAdd_h(int *A1,int *B1, int *C1, int N)
{
for(int i=0;i<N;i++)
C1[i] = A1[i] * B1[i];
}
int main(int argc,char **argv)
{
printf("Begin \n");
int n=10000000;
int nBytes = n*sizeof(int);
int block_size, block_no;
a = (int *)malloc(nBytes);
b = (int *)malloc(nBytes);
c = (int *)malloc(nBytes);
c2 = (int *)malloc(nBytes);
int *a_d,*b_d,*c_d;
block_size=4000;
block_no = n/block_size;
dim3 dimBlock(block_size,1,1);
dim3 dimGrid(block_no,1,1);
for(int i=0;i<n;i++)
a[i]=i,b[i]=i;
printf("Allocating device memory on host..\n");
cudaMalloc((void **)&a_d,n*sizeof(int));
cudaMalloc((void **)&b_d,n*sizeof(int));
cudaMalloc((void **)&c_d,n*sizeof(int));
printf("Copying to device..\n");
cudaMemcpy(a_d,a,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(b_d,b,n*sizeof(int),cudaMemcpyHostToDevice);
clock_t start_d=clock();
printf("Doing GPU Vector add\n");
vecAdd<<<block_no,block_size>>>(a_d,b_d,c_d,n);
cudaThreadSynchronize();
clock_t end_d = clock();
clock_t start_h = clock();
printf("Doing CPU Vector add\n");
vecAdd_h(a,b,c2,n);
clock_t end_h = clock();
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
cudaMemcpy(c,c_d,n*sizeof(int),cudaMemcpyDeviceToHost);
printf("%d %f %f\n",n,time_d,time_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
} | .file "tmpxft_0014619c_00000000-6_vecadd.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8vecAdd_hPiS_S_i
.type _Z8vecAdd_hPiS_S_i, @function
_Z8vecAdd_hPiS_S_i:
.LFB3669:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L3
movslq %ecx, %rcx
leaq 0(,%rcx,4), %r8
movl $0, %eax
.L5:
movl (%rdi,%rax), %ecx
imull (%rsi,%rax), %ecx
movl %ecx, (%rdx,%rax)
addq $4, %rax
cmpq %r8, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE3669:
.size _Z8vecAdd_hPiS_S_i, .-_Z8vecAdd_hPiS_S_i
.globl _Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i
.type _Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i, @function
_Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i:
.LFB3695:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6vecAddPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i, .-_Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i
.globl _Z6vecAddPiS_S_i
.type _Z6vecAddPiS_S_i, @function
_Z6vecAddPiS_S_i:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z6vecAddPiS_S_i, .-_Z6vecAddPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Begin \n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Allocating device memory on host..\n"
.section .rodata.str1.1
.LC2:
.string "Copying to device..\n"
.LC3:
.string "Doing GPU Vector add\n"
.LC4:
.string "Doing CPU Vector add\n"
.LC6:
.string "%d %f %f\n"
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $80, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $40000000, %edi
call malloc@PLT
movq %rax, a(%rip)
movl $40000000, %edi
call malloc@PLT
movq %rax, b(%rip)
movl $40000000, %edi
call malloc@PLT
movq %rax, c(%rip)
movl $40000000, %edi
call malloc@PLT
movq %rax, c2(%rip)
movl $0, %eax
.L16:
movq a(%rip), %rdx
movl %eax, (%rdx,%rax,4)
movq b(%rip), %rdx
movl %eax, (%rdx,%rax,4)
addq $1, %rax
cmpq $10000000, %rax
jne .L16
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 24(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $40000000, %edx
movq a(%rip), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $40000000, %edx
movq b(%rip), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
call clock@PLT
movq %rax, %r12
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $4000, 60(%rsp)
movl $1, 64(%rsp)
movl $2500, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
call cudaThreadSynchronize@PLT
call clock@PLT
movq %rax, %rbx
call clock@PLT
movq %rax, %rbp
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $10000000, %ecx
movq c2(%rip), %rdx
movq b(%rip), %rsi
movq a(%rip), %rdi
call _Z8vecAdd_hPiS_S_i
call clock@PLT
subq %r12, %rbx
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
movsd .LC5(%rip), %xmm1
divsd %xmm1, %xmm0
movq %xmm0, %rbx
subq %rbp, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd %xmm1, %xmm0
movsd %xmm0, 8(%rsp)
movl $2, %ecx
movl $40000000, %edx
movq 40(%rsp), %rsi
movq c(%rip), %rdi
call cudaMemcpy@PLT
movsd 8(%rsp), %xmm1
movq %rbx, %xmm0
movl $10000000, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L22
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
movl $10000000, %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z6vecAddPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z6vecAddPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl c2
.bss
.align 8
.type c2, @object
.size c2, 8
c2:
.zero 8
.globl c
.align 8
.type c, @object
.size c, 8
c:
.zero 8
.globl b
.align 8
.type b, @object
.size b, 8
b:
.zero 8
.globl a
.align 8
.type a, @object
.size a, 8
a:
.zero 8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC5:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <cuda.h>
using namespace std;
int *a, *b; // host data
int *c, *c2; // results
__global__ void vecAdd(int *A,int *B,int *C,int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void vecAdd_h(int *A1,int *B1, int *C1, int N)
{
for(int i=0;i<N;i++)
C1[i] = A1[i] * B1[i];
}
int main(int argc,char **argv)
{
printf("Begin \n");
int n=10000000;
int nBytes = n*sizeof(int);
int block_size, block_no;
a = (int *)malloc(nBytes);
b = (int *)malloc(nBytes);
c = (int *)malloc(nBytes);
c2 = (int *)malloc(nBytes);
int *a_d,*b_d,*c_d;
block_size=4000;
block_no = n/block_size;
dim3 dimBlock(block_size,1,1);
dim3 dimGrid(block_no,1,1);
for(int i=0;i<n;i++)
a[i]=i,b[i]=i;
printf("Allocating device memory on host..\n");
cudaMalloc((void **)&a_d,n*sizeof(int));
cudaMalloc((void **)&b_d,n*sizeof(int));
cudaMalloc((void **)&c_d,n*sizeof(int));
printf("Copying to device..\n");
cudaMemcpy(a_d,a,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(b_d,b,n*sizeof(int),cudaMemcpyHostToDevice);
clock_t start_d=clock();
printf("Doing GPU Vector add\n");
vecAdd<<<block_no,block_size>>>(a_d,b_d,c_d,n);
cudaThreadSynchronize();
clock_t end_d = clock();
clock_t start_h = clock();
printf("Doing CPU Vector add\n");
vecAdd_h(a,b,c2,n);
clock_t end_h = clock();
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
cudaMemcpy(c,c_d,n*sizeof(int),cudaMemcpyDeviceToHost);
printf("%d %f %f\n",n,time_d,time_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
} | #include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
int *a, *b; // host data
int *c, *c2; // results
__global__ void vecAdd(int *A,int *B,int *C,int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void vecAdd_h(int *A1,int *B1, int *C1, int N)
{
for(int i=0;i<N;i++)
C1[i] = A1[i] * B1[i];
}
int main(int argc,char **argv)
{
printf("Begin \n");
int n=10000000;
int nBytes = n*sizeof(int);
int block_size, block_no;
a = (int *)malloc(nBytes);
b = (int *)malloc(nBytes);
c = (int *)malloc(nBytes);
c2 = (int *)malloc(nBytes);
int *a_d,*b_d,*c_d;
block_size=4000;
block_no = n/block_size;
dim3 dimBlock(block_size,1,1);
dim3 dimGrid(block_no,1,1);
for(int i=0;i<n;i++)
a[i]=i,b[i]=i;
printf("Allocating device memory on host..\n");
hipMalloc((void **)&a_d,n*sizeof(int));
hipMalloc((void **)&b_d,n*sizeof(int));
hipMalloc((void **)&c_d,n*sizeof(int));
printf("Copying to device..\n");
hipMemcpy(a_d,a,n*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(b_d,b,n*sizeof(int),hipMemcpyHostToDevice);
clock_t start_d=clock();
printf("Doing GPU Vector add\n");
vecAdd<<<block_no,block_size>>>(a_d,b_d,c_d,n);
hipDeviceSynchronize();
clock_t end_d = clock();
clock_t start_h = clock();
printf("Doing CPU Vector add\n");
vecAdd_h(a,b,c2,n);
clock_t end_h = clock();
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
hipMemcpy(c,c_d,n*sizeof(int),hipMemcpyDeviceToHost);
printf("%d %f %f\n",n,time_d,time_h);
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
int *a, *b; // host data
int *c, *c2; // results
__global__ void vecAdd(int *A,int *B,int *C,int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void vecAdd_h(int *A1,int *B1, int *C1, int N)
{
for(int i=0;i<N;i++)
C1[i] = A1[i] * B1[i];
}
int main(int argc,char **argv)
{
printf("Begin \n");
int n=10000000;
int nBytes = n*sizeof(int);
int block_size, block_no;
a = (int *)malloc(nBytes);
b = (int *)malloc(nBytes);
c = (int *)malloc(nBytes);
c2 = (int *)malloc(nBytes);
int *a_d,*b_d,*c_d;
block_size=4000;
block_no = n/block_size;
dim3 dimBlock(block_size,1,1);
dim3 dimGrid(block_no,1,1);
for(int i=0;i<n;i++)
a[i]=i,b[i]=i;
printf("Allocating device memory on host..\n");
hipMalloc((void **)&a_d,n*sizeof(int));
hipMalloc((void **)&b_d,n*sizeof(int));
hipMalloc((void **)&c_d,n*sizeof(int));
printf("Copying to device..\n");
hipMemcpy(a_d,a,n*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(b_d,b,n*sizeof(int),hipMemcpyHostToDevice);
clock_t start_d=clock();
printf("Doing GPU Vector add\n");
vecAdd<<<block_no,block_size>>>(a_d,b_d,c_d,n);
hipDeviceSynchronize();
clock_t end_d = clock();
clock_t start_h = clock();
printf("Doing CPU Vector add\n");
vecAdd_h(a,b,c2,n);
clock_t end_h = clock();
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
hipMemcpy(c,c_d,n*sizeof(int),hipMemcpyDeviceToHost);
printf("%d %f %f\n",n,time_d,time_h);
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6vecAddPiS_S_i
.globl _Z6vecAddPiS_S_i
.p2align 8
.type _Z6vecAddPiS_S_i,@function
_Z6vecAddPiS_S_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6vecAddPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6vecAddPiS_S_i, .Lfunc_end0-_Z6vecAddPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6vecAddPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6vecAddPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
int *a, *b; // host data
int *c, *c2; // results
__global__ void vecAdd(int *A,int *B,int *C,int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void vecAdd_h(int *A1,int *B1, int *C1, int N)
{
for(int i=0;i<N;i++)
C1[i] = A1[i] * B1[i];
}
int main(int argc,char **argv)
{
printf("Begin \n");
int n=10000000;
int nBytes = n*sizeof(int);
int block_size, block_no;
a = (int *)malloc(nBytes);
b = (int *)malloc(nBytes);
c = (int *)malloc(nBytes);
c2 = (int *)malloc(nBytes);
int *a_d,*b_d,*c_d;
block_size=4000;
block_no = n/block_size;
dim3 dimBlock(block_size,1,1);
dim3 dimGrid(block_no,1,1);
for(int i=0;i<n;i++)
a[i]=i,b[i]=i;
printf("Allocating device memory on host..\n");
hipMalloc((void **)&a_d,n*sizeof(int));
hipMalloc((void **)&b_d,n*sizeof(int));
hipMalloc((void **)&c_d,n*sizeof(int));
printf("Copying to device..\n");
hipMemcpy(a_d,a,n*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(b_d,b,n*sizeof(int),hipMemcpyHostToDevice);
clock_t start_d=clock();
printf("Doing GPU Vector add\n");
vecAdd<<<block_no,block_size>>>(a_d,b_d,c_d,n);
hipDeviceSynchronize();
clock_t end_d = clock();
clock_t start_h = clock();
printf("Doing CPU Vector add\n");
vecAdd_h(a,b,c2,n);
clock_t end_h = clock();
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
hipMemcpy(c,c_d,n*sizeof(int),hipMemcpyDeviceToHost);
printf("%d %f %f\n",n,time_d,time_h);
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
return 0;
} | .text
.file "vecadd.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z21__device_stub__vecAddPiS_S_i # -- Begin function _Z21__device_stub__vecAddPiS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__vecAddPiS_S_i,@function
_Z21__device_stub__vecAddPiS_S_i: # @_Z21__device_stub__vecAddPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6vecAddPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__vecAddPiS_S_i, .Lfunc_end0-_Z21__device_stub__vecAddPiS_S_i
.cfi_endproc
# -- End function
.globl _Z8vecAdd_hPiS_S_i # -- Begin function _Z8vecAdd_hPiS_S_i
.p2align 4, 0x90
.type _Z8vecAdd_hPiS_S_i,@function
_Z8vecAdd_hPiS_S_i: # @_Z8vecAdd_hPiS_S_i
.cfi_startproc
# %bb.0:
testl %ecx, %ecx
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
movl %ecx, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%rsi,%rcx,4), %r8d
imull (%rdi,%rcx,4), %r8d
movl %r8d, (%rdx,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB1_2
.LBB1_3: # %._crit_edge
retq
.Lfunc_end1:
.size _Z8vecAdd_hPiS_S_i, .Lfunc_end1-_Z8vecAdd_hPiS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $160, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $.Lstr, %edi
callq puts@PLT
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, %rbx
movq %rax, a(%rip)
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, %r14
movq %rax, b(%rip)
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, c(%rip)
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, c2(%rip)
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
movl %eax, (%rbx,%rax,4)
movl %eax, (%r14,%rax,4)
incq %rax
cmpq $10000000, %rax # imm = 0x989680
jne .LBB2_1
# %bb.2:
movl $.Lstr.1, %edi
callq puts@PLT
leaq 24(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
leaq 16(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
leaq 8(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
movl $.Lstr.2, %edi
callq puts@PLT
movq 24(%rsp), %rdi
movq a(%rip), %rsi
movl $40000000, %edx # imm = 0x2625A00
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movq b(%rip), %rsi
movl $40000000, %edx # imm = 0x2625A00
movl $1, %ecx
callq hipMemcpy
callq clock
movq %rax, %rbx
movl $.Lstr.3, %edi
callq puts@PLT
movabsq $4294969796, %rdi # imm = 0x1000009C4
leaq 1500(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq %rdx, 104(%rsp)
movl $10000000, 36(%rsp) # imm = 0x989680
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 36(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z6vecAddPiS_S_i, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
callq hipDeviceSynchronize
callq clock
movq %rax, %r15
callq clock
movq %rax, %r14
movl $.Lstr.4, %edi
callq puts@PLT
movq a(%rip), %rax
movq b(%rip), %rcx
xorl %edx, %edx
movq c2(%rip), %rsi
.p2align 4, 0x90
.LBB2_5: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movl (%rcx,%rdx,4), %edi
imull (%rax,%rdx,4), %edi
movl %edi, (%rsi,%rdx,4)
incq %rdx
cmpq $10000000, %rdx # imm = 0x989680
jne .LBB2_5
# %bb.6: # %_Z8vecAdd_hPiS_S_i.exit
callq clock
subq %rbx, %r15
cvtsi2sd %r15, %xmm1
movsd .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero
subq %r14, %rax
cvtsi2sd %rax, %xmm2
divsd %xmm0, %xmm1
movsd %xmm1, 48(%rsp) # 8-byte Spill
divsd %xmm0, %xmm2
movsd %xmm2, 40(%rsp) # 8-byte Spill
movq c(%rip), %rdi
movq 8(%rsp), %rsi
movl $40000000, %edx # imm = 0x2625A00
movl $2, %ecx
callq hipMemcpy
movl $.L.str.5, %edi
movl $10000000, %esi # imm = 0x989680
movsd 48(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd 40(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movb $2, %al
callq printf
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6vecAddPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type a,@object # @a
.bss
.globl a
.p2align 3, 0x0
a:
.quad 0
.size a, 8
.type b,@object # @b
.globl b
.p2align 3, 0x0
b:
.quad 0
.size b, 8
.type c,@object # @c
.globl c
.p2align 3, 0x0
c:
.quad 0
.size c, 8
.type c2,@object # @c2
.globl c2
.p2align 3, 0x0
c2:
.quad 0
.size c2, 8
.type _Z6vecAddPiS_S_i,@object # @_Z6vecAddPiS_S_i
.section .rodata,"a",@progbits
.globl _Z6vecAddPiS_S_i
.p2align 3, 0x0
_Z6vecAddPiS_S_i:
.quad _Z21__device_stub__vecAddPiS_S_i
.size _Z6vecAddPiS_S_i, 8
.type .L.str.5,@object # @.str.5
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.5:
.asciz "%d %f %f\n"
.size .L.str.5, 10
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6vecAddPiS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Begin "
.size .Lstr, 7
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Allocating device memory on host.."
.size .Lstr.1, 35
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Copying to device.."
.size .Lstr.2, 20
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Doing GPU Vector add"
.size .Lstr.3, 21
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "Doing CPU Vector add"
.size .Lstr.4, 21
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__vecAddPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6vecAddPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6vecAddPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00b0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6vecAddPiS_S_i
.globl _Z6vecAddPiS_S_i
.p2align 8
.type _Z6vecAddPiS_S_i,@function
_Z6vecAddPiS_S_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6vecAddPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6vecAddPiS_S_i, .Lfunc_end0-_Z6vecAddPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6vecAddPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6vecAddPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0014619c_00000000-6_vecadd.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8vecAdd_hPiS_S_i
.type _Z8vecAdd_hPiS_S_i, @function
_Z8vecAdd_hPiS_S_i:
.LFB3669:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L3
movslq %ecx, %rcx
leaq 0(,%rcx,4), %r8
movl $0, %eax
.L5:
movl (%rdi,%rax), %ecx
imull (%rsi,%rax), %ecx
movl %ecx, (%rdx,%rax)
addq $4, %rax
cmpq %r8, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE3669:
.size _Z8vecAdd_hPiS_S_i, .-_Z8vecAdd_hPiS_S_i
.globl _Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i
.type _Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i, @function
_Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i:
.LFB3695:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6vecAddPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i, .-_Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i
.globl _Z6vecAddPiS_S_i
.type _Z6vecAddPiS_S_i, @function
_Z6vecAddPiS_S_i:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z6vecAddPiS_S_i, .-_Z6vecAddPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Begin \n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Allocating device memory on host..\n"
.section .rodata.str1.1
.LC2:
.string "Copying to device..\n"
.LC3:
.string "Doing GPU Vector add\n"
.LC4:
.string "Doing CPU Vector add\n"
.LC6:
.string "%d %f %f\n"
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $80, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $40000000, %edi
call malloc@PLT
movq %rax, a(%rip)
movl $40000000, %edi
call malloc@PLT
movq %rax, b(%rip)
movl $40000000, %edi
call malloc@PLT
movq %rax, c(%rip)
movl $40000000, %edi
call malloc@PLT
movq %rax, c2(%rip)
movl $0, %eax
.L16:
movq a(%rip), %rdx
movl %eax, (%rdx,%rax,4)
movq b(%rip), %rdx
movl %eax, (%rdx,%rax,4)
addq $1, %rax
cmpq $10000000, %rax
jne .L16
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 24(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $40000000, %edx
movq a(%rip), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $40000000, %edx
movq b(%rip), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
call clock@PLT
movq %rax, %r12
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $4000, 60(%rsp)
movl $1, 64(%rsp)
movl $2500, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
call cudaThreadSynchronize@PLT
call clock@PLT
movq %rax, %rbx
call clock@PLT
movq %rax, %rbp
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $10000000, %ecx
movq c2(%rip), %rdx
movq b(%rip), %rsi
movq a(%rip), %rdi
call _Z8vecAdd_hPiS_S_i
call clock@PLT
subq %r12, %rbx
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
movsd .LC5(%rip), %xmm1
divsd %xmm1, %xmm0
movq %xmm0, %rbx
subq %rbp, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd %xmm1, %xmm0
movsd %xmm0, 8(%rsp)
movl $2, %ecx
movl $40000000, %edx
movq 40(%rsp), %rsi
movq c(%rip), %rdi
call cudaMemcpy@PLT
movsd 8(%rsp), %xmm1
movq %rbx, %xmm0
movl $10000000, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L22
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
movl $10000000, %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z30__device_stub__Z6vecAddPiS_S_iPiS_S_i
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z6vecAddPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z6vecAddPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl c2
.bss
.align 8
.type c2, @object
.size c2, 8
c2:
.zero 8
.globl c
.align 8
.type c, @object
.size c, 8
c:
.zero 8
.globl b
.align 8
.type b, @object
.size b, 8
b:
.zero 8
.globl a
.align 8
.type a, @object
.size a, 8
a:
.zero 8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC5:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "vecadd.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z21__device_stub__vecAddPiS_S_i # -- Begin function _Z21__device_stub__vecAddPiS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__vecAddPiS_S_i,@function
_Z21__device_stub__vecAddPiS_S_i: # @_Z21__device_stub__vecAddPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6vecAddPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__vecAddPiS_S_i, .Lfunc_end0-_Z21__device_stub__vecAddPiS_S_i
.cfi_endproc
# -- End function
.globl _Z8vecAdd_hPiS_S_i # -- Begin function _Z8vecAdd_hPiS_S_i
.p2align 4, 0x90
.type _Z8vecAdd_hPiS_S_i,@function
_Z8vecAdd_hPiS_S_i: # @_Z8vecAdd_hPiS_S_i
.cfi_startproc
# %bb.0:
testl %ecx, %ecx
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
movl %ecx, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%rsi,%rcx,4), %r8d
imull (%rdi,%rcx,4), %r8d
movl %r8d, (%rdx,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB1_2
.LBB1_3: # %._crit_edge
retq
.Lfunc_end1:
.size _Z8vecAdd_hPiS_S_i, .Lfunc_end1-_Z8vecAdd_hPiS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $160, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $.Lstr, %edi
callq puts@PLT
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, %rbx
movq %rax, a(%rip)
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, %r14
movq %rax, b(%rip)
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, c(%rip)
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, c2(%rip)
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
movl %eax, (%rbx,%rax,4)
movl %eax, (%r14,%rax,4)
incq %rax
cmpq $10000000, %rax # imm = 0x989680
jne .LBB2_1
# %bb.2:
movl $.Lstr.1, %edi
callq puts@PLT
leaq 24(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
leaq 16(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
leaq 8(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
movl $.Lstr.2, %edi
callq puts@PLT
movq 24(%rsp), %rdi
movq a(%rip), %rsi
movl $40000000, %edx # imm = 0x2625A00
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movq b(%rip), %rsi
movl $40000000, %edx # imm = 0x2625A00
movl $1, %ecx
callq hipMemcpy
callq clock
movq %rax, %rbx
movl $.Lstr.3, %edi
callq puts@PLT
movabsq $4294969796, %rdi # imm = 0x1000009C4
leaq 1500(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq %rdx, 104(%rsp)
movl $10000000, 36(%rsp) # imm = 0x989680
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 36(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z6vecAddPiS_S_i, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
callq hipDeviceSynchronize
callq clock
movq %rax, %r15
callq clock
movq %rax, %r14
movl $.Lstr.4, %edi
callq puts@PLT
movq a(%rip), %rax
movq b(%rip), %rcx
xorl %edx, %edx
movq c2(%rip), %rsi
.p2align 4, 0x90
.LBB2_5: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movl (%rcx,%rdx,4), %edi
imull (%rax,%rdx,4), %edi
movl %edi, (%rsi,%rdx,4)
incq %rdx
cmpq $10000000, %rdx # imm = 0x989680
jne .LBB2_5
# %bb.6: # %_Z8vecAdd_hPiS_S_i.exit
callq clock
subq %rbx, %r15
cvtsi2sd %r15, %xmm1
movsd .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero
subq %r14, %rax
cvtsi2sd %rax, %xmm2
divsd %xmm0, %xmm1
movsd %xmm1, 48(%rsp) # 8-byte Spill
divsd %xmm0, %xmm2
movsd %xmm2, 40(%rsp) # 8-byte Spill
movq c(%rip), %rdi
movq 8(%rsp), %rsi
movl $40000000, %edx # imm = 0x2625A00
movl $2, %ecx
callq hipMemcpy
movl $.L.str.5, %edi
movl $10000000, %esi # imm = 0x989680
movsd 48(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd 40(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movb $2, %al
callq printf
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6vecAddPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type a,@object # @a
.bss
.globl a
.p2align 3, 0x0
a:
.quad 0
.size a, 8
.type b,@object # @b
.globl b
.p2align 3, 0x0
b:
.quad 0
.size b, 8
.type c,@object # @c
.globl c
.p2align 3, 0x0
c:
.quad 0
.size c, 8
.type c2,@object # @c2
.globl c2
.p2align 3, 0x0
c2:
.quad 0
.size c2, 8
.type _Z6vecAddPiS_S_i,@object # @_Z6vecAddPiS_S_i
.section .rodata,"a",@progbits
.globl _Z6vecAddPiS_S_i
.p2align 3, 0x0
_Z6vecAddPiS_S_i:
.quad _Z21__device_stub__vecAddPiS_S_i
.size _Z6vecAddPiS_S_i, 8
.type .L.str.5,@object # @.str.5
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.5:
.asciz "%d %f %f\n"
.size .L.str.5, 10
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6vecAddPiS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Begin "
.size .Lstr, 7
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Allocating device memory on host.."
.size .Lstr.1, 35
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Copying to device.."
.size .Lstr.2, 20
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Doing GPU Vector add"
.size .Lstr.3, 21
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "Doing CPU Vector add"
.size .Lstr.4, 21
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__vecAddPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6vecAddPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#define MATRIX_ROWS 5
#define MATRIX_COLUMNS 5
#define SHARED_MEMORY_PADDING 1
__global__
void createMatrixStatic(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS];
int idx = blockIdx.y * blockDim.x + threadIdx.x;
int idy = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < MATRIX_COLUMNS && idy < MATRIX_ROWS) {
matrix[idy][idx] = idx + idy;
out[idy*MATRIX_COLUMNS+idx] = matrix[idy][idx];
}
}
__global__
void createMatrixStaticPadding(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS+SHARED_MEMORY_PADDING];
int idx = threadIdx.y * blockDim.x + threadIdx.x;
matrix[threadIdx.y][threadIdx.x] = idx;
__syncthreads();
out[idx] = matrix[threadIdx.y][threadIdx.x];
}
void printMatrix(float* matrix) {
for (int y = 0; y < MATRIX_COLUMNS; y++) {
for (int x = 0; x < MATRIX_ROWS; x++) {
printf("%-3d ", int(matrix[y*MATRIX_ROWS+x]));
}
printf("\n");
}
}
int main(void) {
printf("\n");
dim3 block(MATRIX_ROWS, MATRIX_COLUMNS);
dim3 grid((MATRIX_ROWS+block.x-1)/block.x, (MATRIX_COLUMNS+block.y-1)/block.y);
float* host_matrix = (float*)malloc(MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
float* device_matrix;
cudaMalloc((float**)&device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
createMatrixStatic<<<grid,block>>>(device_matrix);
cudaDeviceSynchronize();
cudaMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), cudaMemcpyDeviceToHost);
printf("createMatrixStatic\n");
printMatrix(host_matrix);
printf("\n");
createMatrixStaticPadding<<<grid.x,block.x>>>(device_matrix);
cudaDeviceSynchronize();
cudaMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), cudaMemcpyDeviceToHost);
printf("createMatrixStaticPadding\n");
printMatrix(host_matrix);
printf("\n");
free(host_matrix);
cudaFree(device_matrix);
cudaDeviceReset();
printf("\n");
return 0;
} | code for sm_80
Function : _Z25createMatrixStaticPaddingPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e220000002200 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0040*/ IMAD R0, R2.reuse, c[0x0][0x0], R3.reuse ; /* 0x0000000002007a24 */
/* 0x141fe400078e0203 */
/*0050*/ IMAD R7, R2, 0x6, R3 ; /* 0x0000000602077824 */
/* 0x000fe200078e0203 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0070*/ I2F R4, R0 ; /* 0x0000000000047306 */
/* 0x000e320000201400 */
/*0080*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e0203 */
/*0090*/ STS [R7.X4], R4 ; /* 0x0000000407007388 */
/* 0x001fe80000004800 */
/*00a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00b0*/ LDS R5, [R7.X4] ; /* 0x0000000007057984 */
/* 0x000e280000004800 */
/*00c0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z18createMatrixStaticPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GT.AND P0, PT, R3, 0x4, PT ; /* 0x000000040300780c */
/* 0x000fe20003f04270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GT.OR P0, PT, R0, 0x4, P0 ; /* 0x000000040000780c */
/* 0x000fda0000704670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IADD3 R0, R0, R3, RZ ; /* 0x0000000300007210 */
/* 0x000fe20007ffe0ff */
/*00b0*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00d0*/ I2F R5, R0 ; /* 0x0000000000057306 */
/* 0x000e220000201400 */
/*00e0*/ LEA R3, R3, R0, 0x2 ; /* 0x0000000003037211 */
/* 0x000fcc00078e10ff */
/*00f0*/ IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fca00078e0202 */
/*0100*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#define MATRIX_ROWS 5
#define MATRIX_COLUMNS 5
#define SHARED_MEMORY_PADDING 1
__global__
void createMatrixStatic(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS];
int idx = blockIdx.y * blockDim.x + threadIdx.x;
int idy = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < MATRIX_COLUMNS && idy < MATRIX_ROWS) {
matrix[idy][idx] = idx + idy;
out[idy*MATRIX_COLUMNS+idx] = matrix[idy][idx];
}
}
__global__
void createMatrixStaticPadding(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS+SHARED_MEMORY_PADDING];
int idx = threadIdx.y * blockDim.x + threadIdx.x;
matrix[threadIdx.y][threadIdx.x] = idx;
__syncthreads();
out[idx] = matrix[threadIdx.y][threadIdx.x];
}
void printMatrix(float* matrix) {
for (int y = 0; y < MATRIX_COLUMNS; y++) {
for (int x = 0; x < MATRIX_ROWS; x++) {
printf("%-3d ", int(matrix[y*MATRIX_ROWS+x]));
}
printf("\n");
}
}
int main(void) {
printf("\n");
dim3 block(MATRIX_ROWS, MATRIX_COLUMNS);
dim3 grid((MATRIX_ROWS+block.x-1)/block.x, (MATRIX_COLUMNS+block.y-1)/block.y);
float* host_matrix = (float*)malloc(MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
float* device_matrix;
cudaMalloc((float**)&device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
createMatrixStatic<<<grid,block>>>(device_matrix);
cudaDeviceSynchronize();
cudaMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), cudaMemcpyDeviceToHost);
printf("createMatrixStatic\n");
printMatrix(host_matrix);
printf("\n");
createMatrixStaticPadding<<<grid.x,block.x>>>(device_matrix);
cudaDeviceSynchronize();
cudaMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), cudaMemcpyDeviceToHost);
printf("createMatrixStaticPadding\n");
printMatrix(host_matrix);
printf("\n");
free(host_matrix);
cudaFree(device_matrix);
cudaDeviceReset();
printf("\n");
return 0;
} | .file "tmpxft_001bda43_00000000-6_shared_memory_2D.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%-3d "
.LC1:
.string "\n"
.text
.globl _Z11printMatrixPf
.type _Z11printMatrixPf, @function
_Z11printMatrixPf:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movq %rdi, %rbp
leaq 100(%rdi), %r14
leaq .LC0(%rip), %r12
leaq .LC1(%rip), %r13
.L4:
movl $0, %ebx
.L5:
cvttss2sil 0(%rbp,%rbx,4), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $5, %rbx
jne .L5
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $20, %rbp
cmpq %r14, %rbp
jne .L4
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z11printMatrixPf, .-_Z11printMatrixPf
.globl _Z38__device_stub__Z18createMatrixStaticPfPf
.type _Z38__device_stub__Z18createMatrixStaticPfPf, @function
_Z38__device_stub__Z18createMatrixStaticPfPf:
.LFB2083:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z18createMatrixStaticPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z38__device_stub__Z18createMatrixStaticPfPf, .-_Z38__device_stub__Z18createMatrixStaticPfPf
.globl _Z18createMatrixStaticPf
.type _Z18createMatrixStaticPf, @function
_Z18createMatrixStaticPf:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z18createMatrixStaticPfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z18createMatrixStaticPf, .-_Z18createMatrixStaticPf
.globl _Z45__device_stub__Z25createMatrixStaticPaddingPfPf
.type _Z45__device_stub__Z25createMatrixStaticPaddingPfPf, @function
_Z45__device_stub__Z25createMatrixStaticPaddingPfPf:
.LFB2085:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z25createMatrixStaticPaddingPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z45__device_stub__Z25createMatrixStaticPaddingPfPf, .-_Z45__device_stub__Z25createMatrixStaticPaddingPfPf
.globl _Z25createMatrixStaticPaddingPf
.type _Z25createMatrixStaticPaddingPf, @function
_Z25createMatrixStaticPaddingPf:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z25createMatrixStaticPaddingPfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z25createMatrixStaticPaddingPf, .-_Z25createMatrixStaticPaddingPf
.section .rodata.str1.1
.LC2:
.string "createMatrixStatic\n"
.LC3:
.string "createMatrixStaticPadding\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $72, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq .LC1(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $1, 16(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $100, %edi
call malloc@PLT
movq %rax, %rbx
movq %rsp, %rdi
movl $100, %esi
call cudaMalloc@PLT
movl $1, 20(%rsp)
movl $5, 8(%rsp)
movl $5, 12(%rsp)
movl 16(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 8(%rsp), %rdx
movq 20(%rsp), %rdi
movl 28(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L30
.L26:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movl $100, %edx
movq (%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdi
call _Z11printMatrixPf
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $5, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movl $100, %edx
movq (%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdi
call _Z11printMatrixPf
leaq .LC1(%rip), %rbp
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdi
call free@PLT
movq (%rsp), %rdi
call cudaFree@PLT
call cudaDeviceReset@PLT
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L32
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore_state
movq (%rsp), %rdi
call _Z38__device_stub__Z18createMatrixStaticPfPf
jmp .L26
.L31:
movq (%rsp), %rdi
call _Z45__device_stub__Z25createMatrixStaticPaddingPfPf
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "_Z25createMatrixStaticPaddingPf"
.section .rodata.str1.1
.LC5:
.string "_Z18createMatrixStaticPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z25createMatrixStaticPaddingPf(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z18createMatrixStaticPf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#define MATRIX_ROWS 5
#define MATRIX_COLUMNS 5
#define SHARED_MEMORY_PADDING 1
__global__
void createMatrixStatic(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS];
int idx = blockIdx.y * blockDim.x + threadIdx.x;
int idy = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < MATRIX_COLUMNS && idy < MATRIX_ROWS) {
matrix[idy][idx] = idx + idy;
out[idy*MATRIX_COLUMNS+idx] = matrix[idy][idx];
}
}
__global__
void createMatrixStaticPadding(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS+SHARED_MEMORY_PADDING];
int idx = threadIdx.y * blockDim.x + threadIdx.x;
matrix[threadIdx.y][threadIdx.x] = idx;
__syncthreads();
out[idx] = matrix[threadIdx.y][threadIdx.x];
}
void printMatrix(float* matrix) {
for (int y = 0; y < MATRIX_COLUMNS; y++) {
for (int x = 0; x < MATRIX_ROWS; x++) {
printf("%-3d ", int(matrix[y*MATRIX_ROWS+x]));
}
printf("\n");
}
}
int main(void) {
printf("\n");
dim3 block(MATRIX_ROWS, MATRIX_COLUMNS);
dim3 grid((MATRIX_ROWS+block.x-1)/block.x, (MATRIX_COLUMNS+block.y-1)/block.y);
float* host_matrix = (float*)malloc(MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
float* device_matrix;
cudaMalloc((float**)&device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
createMatrixStatic<<<grid,block>>>(device_matrix);
cudaDeviceSynchronize();
cudaMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), cudaMemcpyDeviceToHost);
printf("createMatrixStatic\n");
printMatrix(host_matrix);
printf("\n");
createMatrixStaticPadding<<<grid.x,block.x>>>(device_matrix);
cudaDeviceSynchronize();
cudaMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), cudaMemcpyDeviceToHost);
printf("createMatrixStaticPadding\n");
printMatrix(host_matrix);
printf("\n");
free(host_matrix);
cudaFree(device_matrix);
cudaDeviceReset();
printf("\n");
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#define MATRIX_ROWS 5
#define MATRIX_COLUMNS 5
#define SHARED_MEMORY_PADDING 1
__global__
void createMatrixStatic(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS];
int idx = blockIdx.y * blockDim.x + threadIdx.x;
int idy = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < MATRIX_COLUMNS && idy < MATRIX_ROWS) {
matrix[idy][idx] = idx + idy;
out[idy*MATRIX_COLUMNS+idx] = matrix[idy][idx];
}
}
__global__
void createMatrixStaticPadding(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS+SHARED_MEMORY_PADDING];
int idx = threadIdx.y * blockDim.x + threadIdx.x;
matrix[threadIdx.y][threadIdx.x] = idx;
__syncthreads();
out[idx] = matrix[threadIdx.y][threadIdx.x];
}
void printMatrix(float* matrix) {
for (int y = 0; y < MATRIX_COLUMNS; y++) {
for (int x = 0; x < MATRIX_ROWS; x++) {
printf("%-3d ", int(matrix[y*MATRIX_ROWS+x]));
}
printf("\n");
}
}
int main(void) {
printf("\n");
dim3 block(MATRIX_ROWS, MATRIX_COLUMNS);
dim3 grid((MATRIX_ROWS+block.x-1)/block.x, (MATRIX_COLUMNS+block.y-1)/block.y);
float* host_matrix = (float*)malloc(MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
float* device_matrix;
hipMalloc((float**)&device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
createMatrixStatic<<<grid,block>>>(device_matrix);
hipDeviceSynchronize();
hipMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), hipMemcpyDeviceToHost);
printf("createMatrixStatic\n");
printMatrix(host_matrix);
printf("\n");
createMatrixStaticPadding<<<grid.x,block.x>>>(device_matrix);
hipDeviceSynchronize();
hipMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), hipMemcpyDeviceToHost);
printf("createMatrixStaticPadding\n");
printMatrix(host_matrix);
printf("\n");
free(host_matrix);
hipFree(device_matrix);
hipDeviceReset();
printf("\n");
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#define MATRIX_ROWS 5
#define MATRIX_COLUMNS 5
#define SHARED_MEMORY_PADDING 1
__global__
void createMatrixStatic(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS];
int idx = blockIdx.y * blockDim.x + threadIdx.x;
int idy = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < MATRIX_COLUMNS && idy < MATRIX_ROWS) {
matrix[idy][idx] = idx + idy;
out[idy*MATRIX_COLUMNS+idx] = matrix[idy][idx];
}
}
__global__
void createMatrixStaticPadding(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS+SHARED_MEMORY_PADDING];
int idx = threadIdx.y * blockDim.x + threadIdx.x;
matrix[threadIdx.y][threadIdx.x] = idx;
__syncthreads();
out[idx] = matrix[threadIdx.y][threadIdx.x];
}
void printMatrix(float* matrix) {
for (int y = 0; y < MATRIX_COLUMNS; y++) {
for (int x = 0; x < MATRIX_ROWS; x++) {
printf("%-3d ", int(matrix[y*MATRIX_ROWS+x]));
}
printf("\n");
}
}
int main(void) {
printf("\n");
dim3 block(MATRIX_ROWS, MATRIX_COLUMNS);
dim3 grid((MATRIX_ROWS+block.x-1)/block.x, (MATRIX_COLUMNS+block.y-1)/block.y);
float* host_matrix = (float*)malloc(MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
float* device_matrix;
hipMalloc((float**)&device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
createMatrixStatic<<<grid,block>>>(device_matrix);
hipDeviceSynchronize();
hipMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), hipMemcpyDeviceToHost);
printf("createMatrixStatic\n");
printMatrix(host_matrix);
printf("\n");
createMatrixStaticPadding<<<grid.x,block.x>>>(device_matrix);
hipDeviceSynchronize();
hipMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), hipMemcpyDeviceToHost);
printf("createMatrixStaticPadding\n");
printMatrix(host_matrix);
printf("\n");
free(host_matrix);
hipFree(device_matrix);
hipDeviceReset();
printf("\n");
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z18createMatrixStaticPf
.globl _Z18createMatrixStaticPf
.p2align 8
.type _Z18createMatrixStaticPf,@function
_Z18createMatrixStaticPf:
s_load_b32 s2, s[0:1], 0x14
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4]
s_mov_b32 s2, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 5, v2
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v1, 5, v[0:1]
v_add_nc_u32_e32 v4, v1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cvt_f32_i32_e32 v2, v4
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18createMatrixStaticPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z18createMatrixStaticPf, .Lfunc_end0-_Z18createMatrixStaticPf
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z25createMatrixStaticPaddingPf
.globl _Z25createMatrixStaticPaddingPf
.p2align 8
.type _Z25createMatrixStaticPaddingPf,@function
_Z25createMatrixStaticPaddingPf:
s_load_b32 s2, s[0:1], 0x14
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_load_b64 s[0:1], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b32_e32 v2, 2, v1
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
v_mad_u32_u24 v1, v0, s2, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mad_u32_u24 v0, v0, 24, v2
v_cvt_f32_i32_e32 v2, v1
v_lshlrev_b32_e32 v1, 2, v1
ds_store_b32 v0, v2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z25createMatrixStaticPaddingPf
.amdhsa_group_segment_fixed_size 120
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 3
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z25createMatrixStaticPaddingPf, .Lfunc_end1-_Z25createMatrixStaticPaddingPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18createMatrixStaticPf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z18createMatrixStaticPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 120
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z25createMatrixStaticPaddingPf
.private_segment_fixed_size: 0
.sgpr_count: 3
.sgpr_spill_count: 0
.symbol: _Z25createMatrixStaticPaddingPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#define MATRIX_ROWS 5
#define MATRIX_COLUMNS 5
#define SHARED_MEMORY_PADDING 1
__global__
void createMatrixStatic(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS];
int idx = blockIdx.y * blockDim.x + threadIdx.x;
int idy = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < MATRIX_COLUMNS && idy < MATRIX_ROWS) {
matrix[idy][idx] = idx + idy;
out[idy*MATRIX_COLUMNS+idx] = matrix[idy][idx];
}
}
__global__
void createMatrixStaticPadding(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS+SHARED_MEMORY_PADDING];
int idx = threadIdx.y * blockDim.x + threadIdx.x;
matrix[threadIdx.y][threadIdx.x] = idx;
__syncthreads();
out[idx] = matrix[threadIdx.y][threadIdx.x];
}
void printMatrix(float* matrix) {
for (int y = 0; y < MATRIX_COLUMNS; y++) {
for (int x = 0; x < MATRIX_ROWS; x++) {
printf("%-3d ", int(matrix[y*MATRIX_ROWS+x]));
}
printf("\n");
}
}
int main(void) {
printf("\n");
dim3 block(MATRIX_ROWS, MATRIX_COLUMNS);
dim3 grid((MATRIX_ROWS+block.x-1)/block.x, (MATRIX_COLUMNS+block.y-1)/block.y);
float* host_matrix = (float*)malloc(MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
float* device_matrix;
hipMalloc((float**)&device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
createMatrixStatic<<<grid,block>>>(device_matrix);
hipDeviceSynchronize();
hipMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), hipMemcpyDeviceToHost);
printf("createMatrixStatic\n");
printMatrix(host_matrix);
printf("\n");
createMatrixStaticPadding<<<grid.x,block.x>>>(device_matrix);
hipDeviceSynchronize();
hipMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), hipMemcpyDeviceToHost);
printf("createMatrixStaticPadding\n");
printMatrix(host_matrix);
printf("\n");
free(host_matrix);
hipFree(device_matrix);
hipDeviceReset();
printf("\n");
return 0;
} | .text
.file "shared_memory_2D.hip"
.globl _Z33__device_stub__createMatrixStaticPf # -- Begin function _Z33__device_stub__createMatrixStaticPf
.p2align 4, 0x90
.type _Z33__device_stub__createMatrixStaticPf,@function
_Z33__device_stub__createMatrixStaticPf: # @_Z33__device_stub__createMatrixStaticPf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z18createMatrixStaticPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z33__device_stub__createMatrixStaticPf, .Lfunc_end0-_Z33__device_stub__createMatrixStaticPf
.cfi_endproc
# -- End function
.globl _Z40__device_stub__createMatrixStaticPaddingPf # -- Begin function _Z40__device_stub__createMatrixStaticPaddingPf
.p2align 4, 0x90
.type _Z40__device_stub__createMatrixStaticPaddingPf,@function
_Z40__device_stub__createMatrixStaticPaddingPf: # @_Z40__device_stub__createMatrixStaticPaddingPf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z25createMatrixStaticPaddingPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end1:
.size _Z40__device_stub__createMatrixStaticPaddingPf, .Lfunc_end1-_Z40__device_stub__createMatrixStaticPaddingPf
.cfi_endproc
# -- End function
.globl _Z11printMatrixPf # -- Begin function _Z11printMatrixPf
.p2align 4, 0x90
.type _Z11printMatrixPf,@function
_Z11printMatrixPf: # @_Z11printMatrixPf
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
cvttss2si (%rbx,%r15,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r15
cmpq $5, %r15
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $20, %rbx
cmpq $5, %r14
jne .LBB2_1
# %bb.4:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z11printMatrixPf, .Lfunc_end2-_Z11printMatrixPf
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $80, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movabsq $4294967297, %r14 # imm = 0x100000001
movl $10, %edi
callq putchar@PLT
movl $100, %edi
callq malloc
movq %rax, %rbx
leaq 8(%rsp), %rdi
movl $100, %esi
callq hipMalloc
movabsq $21474836485, %rdx # imm = 0x500000005
movq %r14, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z18createMatrixStaticPf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movl $100, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
movq %rbx, %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_3: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_4: # Parent Loop BB3_3 Depth=1
# => This Inner Loop Header: Depth=2
cvttss2si (%r15,%r13,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r13
cmpq $5, %r13
jne .LBB3_4
# %bb.5: # in Loop: Header=BB3_3 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r12
addq $20, %r15
cmpq $5, %r12
jne .LBB3_3
# %bb.6: # %_Z11printMatrixPf.exit
movl $10, %edi
callq putchar@PLT
leaq 4(%r14), %rdx
movq %r14, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_8
# %bb.7:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z25createMatrixStaticPaddingPf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_8:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movl $100, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.1, %edi
callq puts@PLT
movq %rbx, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_9: # %.preheader.i26
# =>This Loop Header: Depth=1
# Child Loop BB3_10 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_10: # Parent Loop BB3_9 Depth=1
# => This Inner Loop Header: Depth=2
cvttss2si (%r14,%r12,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r12
cmpq $5, %r12
jne .LBB3_10
# %bb.11: # in Loop: Header=BB3_9 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r15
addq $20, %r14
cmpq $5, %r15
jne .LBB3_9
# %bb.12: # %_Z11printMatrixPf.exit34
movl $10, %edi
callq putchar@PLT
movq %rbx, %rdi
callq free
movq 8(%rsp), %rdi
callq hipFree
callq hipDeviceReset
movl $10, %edi
callq putchar@PLT
xorl %eax, %eax
addq $80, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18createMatrixStaticPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z25createMatrixStaticPaddingPf, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z18createMatrixStaticPf,@object # @_Z18createMatrixStaticPf
.section .rodata,"a",@progbits
.globl _Z18createMatrixStaticPf
.p2align 3, 0x0
_Z18createMatrixStaticPf:
.quad _Z33__device_stub__createMatrixStaticPf
.size _Z18createMatrixStaticPf, 8
.type _Z25createMatrixStaticPaddingPf,@object # @_Z25createMatrixStaticPaddingPf
.globl _Z25createMatrixStaticPaddingPf
.p2align 3, 0x0
_Z25createMatrixStaticPaddingPf:
.quad _Z40__device_stub__createMatrixStaticPaddingPf
.size _Z25createMatrixStaticPaddingPf, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%-3d "
.size .L.str, 6
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z18createMatrixStaticPf"
.size .L__unnamed_1, 25
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z25createMatrixStaticPaddingPf"
.size .L__unnamed_2, 32
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "createMatrixStatic"
.size .Lstr, 19
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "createMatrixStaticPadding"
.size .Lstr.1, 26
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z33__device_stub__createMatrixStaticPf
.addrsig_sym _Z40__device_stub__createMatrixStaticPaddingPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z18createMatrixStaticPf
.addrsig_sym _Z25createMatrixStaticPaddingPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z25createMatrixStaticPaddingPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e220000002200 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0040*/ IMAD R0, R2.reuse, c[0x0][0x0], R3.reuse ; /* 0x0000000002007a24 */
/* 0x141fe400078e0203 */
/*0050*/ IMAD R7, R2, 0x6, R3 ; /* 0x0000000602077824 */
/* 0x000fe200078e0203 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0070*/ I2F R4, R0 ; /* 0x0000000000047306 */
/* 0x000e320000201400 */
/*0080*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e0203 */
/*0090*/ STS [R7.X4], R4 ; /* 0x0000000407007388 */
/* 0x001fe80000004800 */
/*00a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00b0*/ LDS R5, [R7.X4] ; /* 0x0000000007057984 */
/* 0x000e280000004800 */
/*00c0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z18createMatrixStaticPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GT.AND P0, PT, R3, 0x4, PT ; /* 0x000000040300780c */
/* 0x000fe20003f04270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GT.OR P0, PT, R0, 0x4, P0 ; /* 0x000000040000780c */
/* 0x000fda0000704670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IADD3 R0, R0, R3, RZ ; /* 0x0000000300007210 */
/* 0x000fe20007ffe0ff */
/*00b0*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00d0*/ I2F R5, R0 ; /* 0x0000000000057306 */
/* 0x000e220000201400 */
/*00e0*/ LEA R3, R3, R0, 0x2 ; /* 0x0000000003037211 */
/* 0x000fcc00078e10ff */
/*00f0*/ IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fca00078e0202 */
/*0100*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z18createMatrixStaticPf
.globl _Z18createMatrixStaticPf
.p2align 8
.type _Z18createMatrixStaticPf,@function
_Z18createMatrixStaticPf:
s_load_b32 s2, s[0:1], 0x14
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4]
s_mov_b32 s2, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 5, v2
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v1, 5, v[0:1]
v_add_nc_u32_e32 v4, v1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cvt_f32_i32_e32 v2, v4
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18createMatrixStaticPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z18createMatrixStaticPf, .Lfunc_end0-_Z18createMatrixStaticPf
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z25createMatrixStaticPaddingPf
.globl _Z25createMatrixStaticPaddingPf
.p2align 8
.type _Z25createMatrixStaticPaddingPf,@function
_Z25createMatrixStaticPaddingPf:
s_load_b32 s2, s[0:1], 0x14
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_load_b64 s[0:1], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b32_e32 v2, 2, v1
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
v_mad_u32_u24 v1, v0, s2, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mad_u32_u24 v0, v0, 24, v2
v_cvt_f32_i32_e32 v2, v1
v_lshlrev_b32_e32 v1, 2, v1
ds_store_b32 v0, v2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z25createMatrixStaticPaddingPf
.amdhsa_group_segment_fixed_size 120
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 3
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z25createMatrixStaticPaddingPf, .Lfunc_end1-_Z25createMatrixStaticPaddingPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18createMatrixStaticPf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z18createMatrixStaticPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 120
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z25createMatrixStaticPaddingPf
.private_segment_fixed_size: 0
.sgpr_count: 3
.sgpr_spill_count: 0
.symbol: _Z25createMatrixStaticPaddingPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001bda43_00000000-6_shared_memory_2D.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%-3d "
.LC1:
.string "\n"
.text
.globl _Z11printMatrixPf
.type _Z11printMatrixPf, @function
_Z11printMatrixPf:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movq %rdi, %rbp
leaq 100(%rdi), %r14
leaq .LC0(%rip), %r12
leaq .LC1(%rip), %r13
.L4:
movl $0, %ebx
.L5:
cvttss2sil 0(%rbp,%rbx,4), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $5, %rbx
jne .L5
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $20, %rbp
cmpq %r14, %rbp
jne .L4
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z11printMatrixPf, .-_Z11printMatrixPf
.globl _Z38__device_stub__Z18createMatrixStaticPfPf
.type _Z38__device_stub__Z18createMatrixStaticPfPf, @function
_Z38__device_stub__Z18createMatrixStaticPfPf:
.LFB2083:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z18createMatrixStaticPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z38__device_stub__Z18createMatrixStaticPfPf, .-_Z38__device_stub__Z18createMatrixStaticPfPf
.globl _Z18createMatrixStaticPf
.type _Z18createMatrixStaticPf, @function
_Z18createMatrixStaticPf:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z18createMatrixStaticPfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z18createMatrixStaticPf, .-_Z18createMatrixStaticPf
.globl _Z45__device_stub__Z25createMatrixStaticPaddingPfPf
.type _Z45__device_stub__Z25createMatrixStaticPaddingPfPf, @function
_Z45__device_stub__Z25createMatrixStaticPaddingPfPf:
.LFB2085:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z25createMatrixStaticPaddingPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z45__device_stub__Z25createMatrixStaticPaddingPfPf, .-_Z45__device_stub__Z25createMatrixStaticPaddingPfPf
.globl _Z25createMatrixStaticPaddingPf
.type _Z25createMatrixStaticPaddingPf, @function
_Z25createMatrixStaticPaddingPf:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z25createMatrixStaticPaddingPfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z25createMatrixStaticPaddingPf, .-_Z25createMatrixStaticPaddingPf
.section .rodata.str1.1
.LC2:
.string "createMatrixStatic\n"
.LC3:
.string "createMatrixStaticPadding\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $72, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq .LC1(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $1, 16(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $100, %edi
call malloc@PLT
movq %rax, %rbx
movq %rsp, %rdi
movl $100, %esi
call cudaMalloc@PLT
movl $1, 20(%rsp)
movl $5, 8(%rsp)
movl $5, 12(%rsp)
movl 16(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 8(%rsp), %rdx
movq 20(%rsp), %rdi
movl 28(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L30
.L26:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movl $100, %edx
movq (%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdi
call _Z11printMatrixPf
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $5, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movl $100, %edx
movq (%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdi
call _Z11printMatrixPf
leaq .LC1(%rip), %rbp
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdi
call free@PLT
movq (%rsp), %rdi
call cudaFree@PLT
call cudaDeviceReset@PLT
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L32
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore_state
movq (%rsp), %rdi
call _Z38__device_stub__Z18createMatrixStaticPfPf
jmp .L26
.L31:
movq (%rsp), %rdi
call _Z45__device_stub__Z25createMatrixStaticPaddingPfPf
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "_Z25createMatrixStaticPaddingPf"
.section .rodata.str1.1
.LC5:
.string "_Z18createMatrixStaticPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z25createMatrixStaticPaddingPf(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z18createMatrixStaticPf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "shared_memory_2D.hip"
.globl _Z33__device_stub__createMatrixStaticPf # -- Begin function _Z33__device_stub__createMatrixStaticPf
.p2align 4, 0x90
.type _Z33__device_stub__createMatrixStaticPf,@function
_Z33__device_stub__createMatrixStaticPf: # @_Z33__device_stub__createMatrixStaticPf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z18createMatrixStaticPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z33__device_stub__createMatrixStaticPf, .Lfunc_end0-_Z33__device_stub__createMatrixStaticPf
.cfi_endproc
# -- End function
.globl _Z40__device_stub__createMatrixStaticPaddingPf # -- Begin function _Z40__device_stub__createMatrixStaticPaddingPf
.p2align 4, 0x90
.type _Z40__device_stub__createMatrixStaticPaddingPf,@function
_Z40__device_stub__createMatrixStaticPaddingPf: # @_Z40__device_stub__createMatrixStaticPaddingPf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z25createMatrixStaticPaddingPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end1:
.size _Z40__device_stub__createMatrixStaticPaddingPf, .Lfunc_end1-_Z40__device_stub__createMatrixStaticPaddingPf
.cfi_endproc
# -- End function
.globl _Z11printMatrixPf # -- Begin function _Z11printMatrixPf
.p2align 4, 0x90
.type _Z11printMatrixPf,@function
_Z11printMatrixPf: # @_Z11printMatrixPf
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
cvttss2si (%rbx,%r15,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r15
cmpq $5, %r15
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $20, %rbx
cmpq $5, %r14
jne .LBB2_1
# %bb.4:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z11printMatrixPf, .Lfunc_end2-_Z11printMatrixPf
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $80, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movabsq $4294967297, %r14 # imm = 0x100000001
movl $10, %edi
callq putchar@PLT
movl $100, %edi
callq malloc
movq %rax, %rbx
leaq 8(%rsp), %rdi
movl $100, %esi
callq hipMalloc
movabsq $21474836485, %rdx # imm = 0x500000005
movq %r14, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z18createMatrixStaticPf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movl $100, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
movq %rbx, %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_3: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_4: # Parent Loop BB3_3 Depth=1
# => This Inner Loop Header: Depth=2
cvttss2si (%r15,%r13,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r13
cmpq $5, %r13
jne .LBB3_4
# %bb.5: # in Loop: Header=BB3_3 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r12
addq $20, %r15
cmpq $5, %r12
jne .LBB3_3
# %bb.6: # %_Z11printMatrixPf.exit
movl $10, %edi
callq putchar@PLT
leaq 4(%r14), %rdx
movq %r14, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_8
# %bb.7:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z25createMatrixStaticPaddingPf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_8:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movl $100, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.1, %edi
callq puts@PLT
movq %rbx, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_9: # %.preheader.i26
# =>This Loop Header: Depth=1
# Child Loop BB3_10 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_10: # Parent Loop BB3_9 Depth=1
# => This Inner Loop Header: Depth=2
cvttss2si (%r14,%r12,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r12
cmpq $5, %r12
jne .LBB3_10
# %bb.11: # in Loop: Header=BB3_9 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r15
addq $20, %r14
cmpq $5, %r15
jne .LBB3_9
# %bb.12: # %_Z11printMatrixPf.exit34
movl $10, %edi
callq putchar@PLT
movq %rbx, %rdi
callq free
movq 8(%rsp), %rdi
callq hipFree
callq hipDeviceReset
movl $10, %edi
callq putchar@PLT
xorl %eax, %eax
addq $80, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18createMatrixStaticPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z25createMatrixStaticPaddingPf, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z18createMatrixStaticPf,@object # @_Z18createMatrixStaticPf
.section .rodata,"a",@progbits
.globl _Z18createMatrixStaticPf
.p2align 3, 0x0
_Z18createMatrixStaticPf:
.quad _Z33__device_stub__createMatrixStaticPf
.size _Z18createMatrixStaticPf, 8
.type _Z25createMatrixStaticPaddingPf,@object # @_Z25createMatrixStaticPaddingPf
.globl _Z25createMatrixStaticPaddingPf
.p2align 3, 0x0
_Z25createMatrixStaticPaddingPf:
.quad _Z40__device_stub__createMatrixStaticPaddingPf
.size _Z25createMatrixStaticPaddingPf, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%-3d "
.size .L.str, 6
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z18createMatrixStaticPf"
.size .L__unnamed_1, 25
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z25createMatrixStaticPaddingPf"
.size .L__unnamed_2, 32
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "createMatrixStatic"
.size .Lstr, 19
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "createMatrixStaticPadding"
.size .Lstr.1, 26
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z33__device_stub__createMatrixStaticPf
.addrsig_sym _Z40__device_stub__createMatrixStaticPaddingPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z18createMatrixStaticPf
.addrsig_sym _Z25createMatrixStaticPaddingPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // gpu (device) based matrix/matrix gpu code
//-------------------------------------------------------------------------
// Included CUDA libraries
//-------------------------------------------------------------------------
#include <stdio.h>
// iceil macro
// returns an integer ceil value where integer numerator is first parameter
// and integer denominator is the second parameter. iceil is the rounded
// up value of numerator/denominator when there is a remainder
// equivalent to ((num%den!=0) ? num/den+1 : num/den)
#define iceil(num,den) (num+den-1)/den
#define TILE_WIDTH 16 // block x and y dimensions
void check_error(cudaError_t error_id){
if (error_id != cudaSuccess) {
printf("Error is %d", error_id);
exit(EXIT_FAILURE);
}
}
// GPU device MatrixMulKernel kernel code
__global__ void MatrixMulKernel(float *Pd, float *Md, float *Nd, int Mh,
int Mw, int Nw) {
// ==================================================================
// Solution part 4
// Determine the output index of each thread.
// Compute the dot product of one row of Md and one column of Nd
// for each thread.
// Write the computed value to matrix P at the correct output index
// ==================================================================
// Calculate the global row and column indices of the Pd matrix
int Row;
int Col;
//**** ENTER YOUR CODE HERE ****
Row = blockIdx.y * TILE_WIDTH + threadIdx.y;
Col = blockIdx.x * TILE_WIDTH + threadIdx.x;
if(Row <Mh && Col< Nw){
// Each thread computes one dot product element of the block sub-matrix
// access correct row of Md and Column of Nd assuming row-major allocations
// (Note: in second part of hw1 you will want to make sure that only
// the threads that are assigned valid regions of the computation are
// active
//**** ENTER YOUR CODE HERE ****
float Pvalue = 0;
//**** ENTER YOUR CODE HERE ****
for(int k=0; k < Mw ;++k){
Pvalue += Md[Row*Mw+k] * Nd[k*Nw + Col];
}
// place final result in specified location of global Pd memory
//**** ENTER YOUR CODE HERE ****
Pd[Row * Nw + Col] = Pvalue;
}
// End of solution part 4 ===========================================
}
__global__ void MatrixMulKernelSingleBlock(float *Pd, float *Md, float *Nd, int Mh,
int Mw, int Nw) {
// ==================================================================
// Solution part 4
// Determine the output index of each thread.
// Compute the dot product of one row of Md and one column of Nd
// for each thread.
// Write the computed value to matrix P at the correct output index
// ==================================================================
// Calculate the global row and column indices of the Pd matrix
int Row;
int Col;
//**** ENTER YOUR CODE HERE ****
Row = threadIdx.y;
Col = threadIdx.x;
if(Row > Mh || Col > Nw) return;
// Each thread computes one dot product element of the block sub-matrix
// access correct row of Md and Column of Nd assuming row-major allocations
// (Note: in second part of hw1 you will want to make sure that only
// the threads that are assigned valid regions of the computation are
// active
//**** ENTER YOUR CODE HERE ****
float Pvalue = 0;
//**** ENTER YOUR CODE HERE ****
for(int k=0; k < Mw ;++k){
Pvalue += Md[Row*Mw+k] * Nd[k*Nw + Col];
}
// place final result in specified location of global Pd memory
//**** ENTER YOUR CODE HERE ****
Pd[Row * Nw + Col] = Pvalue;
// End of solution part 4 ===========================================
}
void compute_GPU(float *P, float *M, float *N, int Mh,
int Mw, int Nw) {
float *Md, *Nd, *Pd;
cudaError_t error_id;
// ===================================================================
// Solution part 1: Copy Input Data from Host to Device
// Create Device Buffers for the two input matrices
// Copy memory from the host memory to the device buffer (device memory)
// Check for error generated while using each OpenCL API call
// ===================================================================
// Allocate device memory and Transfer host arrays M and N
//**** ENTER YOUR CODE HERE ****
size_t size_M = Mh * Mw * sizeof(float);
size_t size_N = Mw*Nw * sizeof(float);
error_id = cudaMalloc((void**)&Md, size_M);
check_error(error_id);
error_id = cudaMemcpy(Md, M, size_M, cudaMemcpyHostToDevice);
check_error(error_id);
error_id = cudaMalloc((void**)&Nd,size_N);
check_error(error_id);
error_id = cudaMemcpy(Nd, N, size_N, cudaMemcpyHostToDevice);
check_error(error_id);
// Allocate device memory of P array for results
//**** ENTER YOUR CODE HERE ****
size_t size_P = Mh*Nw*sizeof(float);
error_id = cudaMalloc((void**)&Pd, size_P );
check_error(error_id);
// End of solution Part 1 ============================================
// ===================================================================
// Solution part 2
// A. Initialize the block and grid dimensions of the kernel about
// to be launched.
// [You may assume that each matrix dimension is a multiple of the
// defined constant block_size.]
// B. Launch the kernel with appropriate kernel arguments
// Do not forget to check for success at each stage before proceeding.
// ===================================================================
// Setup the kernel execution configuration parameters/launch kernel
// Stage A: Setup the kernel execution configuration parameters
// (in second part of homework take into account the case where
// the dimmensions are not an even multiple of block size)
//**** ENTER YOUR CODE HERE ****
// Stage B: Launch the kernel!! -- using the appropriate function arguments
// (remember to check for kernel launch failure!)
//**** ENTER YOUR CODE HERE ****
if(Mh == 16 && Mw == 16 && Nw == 16){
// Single Block 16*16 testing
dim3 grid(1,1);
dim3 block(TILE_WIDTH, TILE_WIDTH);
MatrixMulKernelSingleBlock<<<grid,block>>>(Pd, Md, Nd,Mh, Mw, Nw);
}
else
{
int a1 = iceil(Mh, TILE_WIDTH);
int a2 = iceil(Nw, TILE_WIDTH);
dim3 grid(a2,a1);
dim3 block(TILE_WIDTH, TILE_WIDTH);
MatrixMulKernel<<<grid,block>>>(Pd, Md, Nd,Mh, Mw, Nw);
}
// End of solution Part 2 ============================================
// ===================================================================
// Solution part 3
// Copy Results Device back to Host
// ===================================================================
// Transfer P from device to host
//**** ENTER YOUR CODE HERE ****
error_id = cudaMemcpy(P,Pd,size_P,cudaMemcpyDeviceToHost);
check_error(error_id);
// End of solution Part 3 ============================================
// CLEAN UP -- Free device memory when finished
//**** ENTER YOUR CODE HERE ****
error_id = cudaFree(Md);
check_error(error_id);
error_id = cudaFree(Nd);
check_error(error_id);
error_id = cudaFree(Pd);
check_error(error_id);
} | .file "tmpxft_00111ecf_00000000-6_gpu_matrixmul.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Error is %d"
.text
.globl _Z11check_error9cudaError
.type _Z11check_error9cudaError, @function
_Z11check_error9cudaError:
.LFB2057:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
subq $8, %rsp
.cfi_def_cfa_offset 16
movl %edi, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z11check_error9cudaError, .-_Z11check_error9cudaError
.globl _Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii
.type _Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii, @function
_Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii:
.LFB2083:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15MatrixMulKernelPfS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii, .-_Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii
.globl _Z15MatrixMulKernelPfS_S_iii
.type _Z15MatrixMulKernelPfS_S_iii, @function
_Z15MatrixMulKernelPfS_S_iii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z15MatrixMulKernelPfS_S_iii, .-_Z15MatrixMulKernelPfS_S_iii
.globl _Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii
.type _Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii, @function
_Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii:
.LFB2085:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z26MatrixMulKernelSingleBlockPfS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii, .-_Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii
.globl _Z26MatrixMulKernelSingleBlockPfS_S_iii
.type _Z26MatrixMulKernelSingleBlockPfS_S_iii, @function
_Z26MatrixMulKernelSingleBlockPfS_S_iii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z26MatrixMulKernelSingleBlockPfS_S_iii, .-_Z26MatrixMulKernelSingleBlockPfS_S_iii
.globl _Z11compute_GPUPfS_S_iii
.type _Z11compute_GPUPfS_S_iii, @function
_Z11compute_GPUPfS_S_iii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %rdi, (%rsp)
movq %rsi, %r15
movq %rdx, 8(%rsp)
movl %ecx, %ebp
movl %r8d, %r14d
movl %r9d, %ebx
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl %ecx, %r13d
imull %r8d, %r13d
movslq %r13d, %r13
salq $2, %r13
movl %r8d, %r12d
imull %r9d, %r12d
movslq %r12d, %r12
salq $2, %r12
leaq 24(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movl $1, %ecx
movq %r13, %rdx
movq %r15, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z11check_error9cudaError
leaq 32(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movl $1, %ecx
movq %r12, %rdx
movq 8(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movl %ebp, %r12d
imull %ebx, %r12d
movslq %r12d, %r12
salq $2, %r12
leaq 40(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movl %eax, %edi
call _Z11check_error9cudaError
cmpl $16, %r14d
sete %dl
cmpl $16, %ebx
sete %al
testb %al, %dl
je .L26
cmpl $16, %ebp
je .L32
.L26:
leal 30(%rbx), %eax
movl %ebx, %edx
addl $15, %edx
cmovns %edx, %eax
sarl $4, %eax
movl %eax, 48(%rsp)
leal 30(%rbp), %eax
movl %ebp, %edx
addl $15, %edx
cmovns %edx, %eax
sarl $4, %eax
movl %eax, 52(%rsp)
movl $1, 56(%rsp)
movl $16, 60(%rsp)
movl $16, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L33
.L28:
movl $2, %ecx
movq %r12, %rdx
movq 40(%rsp), %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movq 24(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movq 32(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movq 40(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L32:
.cfi_restore_state
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $16, 60(%rsp)
movl $16, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L28
movl $16, %r9d
movl $16, %r8d
movl $16, %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii
jmp .L28
.L33:
movl %ebx, %r9d
movl %r14d, %r8d
movl %ebp, %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii
jmp .L28
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z11compute_GPUPfS_S_iii, .-_Z11compute_GPUPfS_S_iii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "_Z26MatrixMulKernelSingleBlockPfS_S_iii"
.section .rodata.str1.1
.LC2:
.string "_Z15MatrixMulKernelPfS_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z26MatrixMulKernelSingleBlockPfS_S_iii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z15MatrixMulKernelPfS_S_iii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // gpu (device) based matrix/matrix gpu code
//-------------------------------------------------------------------------
// Included CUDA libraries
//-------------------------------------------------------------------------
#include <stdio.h>
// iceil macro
// returns an integer ceil value where integer numerator is first parameter
// and integer denominator is the second parameter. iceil is the rounded
// up value of numerator/denominator when there is a remainder
// equivalent to ((num%den!=0) ? num/den+1 : num/den)
#define iceil(num,den) (num+den-1)/den
#define TILE_WIDTH 16 // block x and y dimensions
void check_error(cudaError_t error_id){
if (error_id != cudaSuccess) {
printf("Error is %d", error_id);
exit(EXIT_FAILURE);
}
}
// GPU device MatrixMulKernel kernel code
__global__ void MatrixMulKernel(float *Pd, float *Md, float *Nd, int Mh,
int Mw, int Nw) {
// ==================================================================
// Solution part 4
// Determine the output index of each thread.
// Compute the dot product of one row of Md and one column of Nd
// for each thread.
// Write the computed value to matrix P at the correct output index
// ==================================================================
// Calculate the global row and column indices of the Pd matrix
int Row;
int Col;
//**** ENTER YOUR CODE HERE ****
Row = blockIdx.y * TILE_WIDTH + threadIdx.y;
Col = blockIdx.x * TILE_WIDTH + threadIdx.x;
if(Row <Mh && Col< Nw){
// Each thread computes one dot product element of the block sub-matrix
// access correct row of Md and Column of Nd assuming row-major allocations
// (Note: in second part of hw1 you will want to make sure that only
// the threads that are assigned valid regions of the computation are
// active
//**** ENTER YOUR CODE HERE ****
float Pvalue = 0;
//**** ENTER YOUR CODE HERE ****
for(int k=0; k < Mw ;++k){
Pvalue += Md[Row*Mw+k] * Nd[k*Nw + Col];
}
// place final result in specified location of global Pd memory
//**** ENTER YOUR CODE HERE ****
Pd[Row * Nw + Col] = Pvalue;
}
// End of solution part 4 ===========================================
}
__global__ void MatrixMulKernelSingleBlock(float *Pd, float *Md, float *Nd, int Mh,
int Mw, int Nw) {
// ==================================================================
// Solution part 4
// Determine the output index of each thread.
// Compute the dot product of one row of Md and one column of Nd
// for each thread.
// Write the computed value to matrix P at the correct output index
// ==================================================================
// Calculate the global row and column indices of the Pd matrix
int Row;
int Col;
//**** ENTER YOUR CODE HERE ****
Row = threadIdx.y;
Col = threadIdx.x;
if(Row > Mh || Col > Nw) return;
// Each thread computes one dot product element of the block sub-matrix
// access correct row of Md and Column of Nd assuming row-major allocations
// (Note: in second part of hw1 you will want to make sure that only
// the threads that are assigned valid regions of the computation are
// active
//**** ENTER YOUR CODE HERE ****
float Pvalue = 0;
//**** ENTER YOUR CODE HERE ****
for(int k=0; k < Mw ;++k){
Pvalue += Md[Row*Mw+k] * Nd[k*Nw + Col];
}
// place final result in specified location of global Pd memory
//**** ENTER YOUR CODE HERE ****
Pd[Row * Nw + Col] = Pvalue;
// End of solution part 4 ===========================================
}
void compute_GPU(float *P, float *M, float *N, int Mh,
int Mw, int Nw) {
float *Md, *Nd, *Pd;
cudaError_t error_id;
// ===================================================================
// Solution part 1: Copy Input Data from Host to Device
// Create Device Buffers for the two input matrices
// Copy memory from the host memory to the device buffer (device memory)
// Check for error generated while using each OpenCL API call
// ===================================================================
// Allocate device memory and Transfer host arrays M and N
//**** ENTER YOUR CODE HERE ****
size_t size_M = Mh * Mw * sizeof(float);
size_t size_N = Mw*Nw * sizeof(float);
error_id = cudaMalloc((void**)&Md, size_M);
check_error(error_id);
error_id = cudaMemcpy(Md, M, size_M, cudaMemcpyHostToDevice);
check_error(error_id);
error_id = cudaMalloc((void**)&Nd,size_N);
check_error(error_id);
error_id = cudaMemcpy(Nd, N, size_N, cudaMemcpyHostToDevice);
check_error(error_id);
// Allocate device memory of P array for results
//**** ENTER YOUR CODE HERE ****
size_t size_P = Mh*Nw*sizeof(float);
error_id = cudaMalloc((void**)&Pd, size_P );
check_error(error_id);
// End of solution Part 1 ============================================
// ===================================================================
// Solution part 2
// A. Initialize the block and grid dimensions of the kernel about
// to be launched.
// [You may assume that each matrix dimension is a multiple of the
// defined constant block_size.]
// B. Launch the kernel with appropriate kernel arguments
// Do not forget to check for success at each stage before proceeding.
// ===================================================================
// Setup the kernel execution configuration parameters/launch kernel
// Stage A: Setup the kernel execution configuration parameters
// (in second part of homework take into account the case where
// the dimmensions are not an even multiple of block size)
//**** ENTER YOUR CODE HERE ****
// Stage B: Launch the kernel!! -- using the appropriate function arguments
// (remember to check for kernel launch failure!)
//**** ENTER YOUR CODE HERE ****
if(Mh == 16 && Mw == 16 && Nw == 16){
// Single Block 16*16 testing
dim3 grid(1,1);
dim3 block(TILE_WIDTH, TILE_WIDTH);
MatrixMulKernelSingleBlock<<<grid,block>>>(Pd, Md, Nd,Mh, Mw, Nw);
}
else
{
int a1 = iceil(Mh, TILE_WIDTH);
int a2 = iceil(Nw, TILE_WIDTH);
dim3 grid(a2,a1);
dim3 block(TILE_WIDTH, TILE_WIDTH);
MatrixMulKernel<<<grid,block>>>(Pd, Md, Nd,Mh, Mw, Nw);
}
// End of solution Part 2 ============================================
// ===================================================================
// Solution part 3
// Copy Results Device back to Host
// ===================================================================
// Transfer P from device to host
//**** ENTER YOUR CODE HERE ****
error_id = cudaMemcpy(P,Pd,size_P,cudaMemcpyDeviceToHost);
check_error(error_id);
// End of solution Part 3 ============================================
// CLEAN UP -- Free device memory when finished
//**** ENTER YOUR CODE HERE ****
error_id = cudaFree(Md);
check_error(error_id);
error_id = cudaFree(Nd);
check_error(error_id);
error_id = cudaFree(Pd);
check_error(error_id);
} | // gpu (device) based matrix/matrix gpu code
//-------------------------------------------------------------------------
// Included CUDA libraries
//-------------------------------------------------------------------------
#include <hip/hip_runtime.h>
#include <stdio.h>
// iceil macro
// returns an integer ceil value where integer numerator is first parameter
// and integer denominator is the second parameter. iceil is the rounded
// up value of numerator/denominator when there is a remainder
// equivalent to ((num%den!=0) ? num/den+1 : num/den)
#define iceil(num,den) (num+den-1)/den
#define TILE_WIDTH 16 // block x and y dimensions
void check_error(hipError_t error_id){
if (error_id != hipSuccess) {
printf("Error is %d", error_id);
exit(EXIT_FAILURE);
}
}
// GPU device MatrixMulKernel kernel code
__global__ void MatrixMulKernel(float *Pd, float *Md, float *Nd, int Mh,
int Mw, int Nw) {
// ==================================================================
// Solution part 4
// Determine the output index of each thread.
// Compute the dot product of one row of Md and one column of Nd
// for each thread.
// Write the computed value to matrix P at the correct output index
// ==================================================================
// Calculate the global row and column indices of the Pd matrix
int Row;
int Col;
//**** ENTER YOUR CODE HERE ****
Row = blockIdx.y * TILE_WIDTH + threadIdx.y;
Col = blockIdx.x * TILE_WIDTH + threadIdx.x;
if(Row <Mh && Col< Nw){
// Each thread computes one dot product element of the block sub-matrix
// access correct row of Md and Column of Nd assuming row-major allocations
// (Note: in second part of hw1 you will want to make sure that only
// the threads that are assigned valid regions of the computation are
// active
//**** ENTER YOUR CODE HERE ****
float Pvalue = 0;
//**** ENTER YOUR CODE HERE ****
for(int k=0; k < Mw ;++k){
Pvalue += Md[Row*Mw+k] * Nd[k*Nw + Col];
}
// place final result in specified location of global Pd memory
//**** ENTER YOUR CODE HERE ****
Pd[Row * Nw + Col] = Pvalue;
}
// End of solution part 4 ===========================================
}
__global__ void MatrixMulKernelSingleBlock(float *Pd, float *Md, float *Nd, int Mh,
int Mw, int Nw) {
// ==================================================================
// Solution part 4
// Determine the output index of each thread.
// Compute the dot product of one row of Md and one column of Nd
// for each thread.
// Write the computed value to matrix P at the correct output index
// ==================================================================
// Calculate the global row and column indices of the Pd matrix
int Row;
int Col;
//**** ENTER YOUR CODE HERE ****
Row = threadIdx.y;
Col = threadIdx.x;
if(Row > Mh || Col > Nw) return;
// Each thread computes one dot product element of the block sub-matrix
// access correct row of Md and Column of Nd assuming row-major allocations
// (Note: in second part of hw1 you will want to make sure that only
// the threads that are assigned valid regions of the computation are
// active
//**** ENTER YOUR CODE HERE ****
float Pvalue = 0;
//**** ENTER YOUR CODE HERE ****
for(int k=0; k < Mw ;++k){
Pvalue += Md[Row*Mw+k] * Nd[k*Nw + Col];
}
// place final result in specified location of global Pd memory
//**** ENTER YOUR CODE HERE ****
Pd[Row * Nw + Col] = Pvalue;
// End of solution part 4 ===========================================
}
void compute_GPU(float *P, float *M, float *N, int Mh,
int Mw, int Nw) {
float *Md, *Nd, *Pd;
hipError_t error_id;
// ===================================================================
// Solution part 1: Copy Input Data from Host to Device
// Create Device Buffers for the two input matrices
// Copy memory from the host memory to the device buffer (device memory)
// Check for error generated while using each OpenCL API call
// ===================================================================
// Allocate device memory and Transfer host arrays M and N
//**** ENTER YOUR CODE HERE ****
size_t size_M = Mh * Mw * sizeof(float);
size_t size_N = Mw*Nw * sizeof(float);
error_id = hipMalloc((void**)&Md, size_M);
check_error(error_id);
error_id = hipMemcpy(Md, M, size_M, hipMemcpyHostToDevice);
check_error(error_id);
error_id = hipMalloc((void**)&Nd,size_N);
check_error(error_id);
error_id = hipMemcpy(Nd, N, size_N, hipMemcpyHostToDevice);
check_error(error_id);
// Allocate device memory of P array for results
//**** ENTER YOUR CODE HERE ****
size_t size_P = Mh*Nw*sizeof(float);
error_id = hipMalloc((void**)&Pd, size_P );
check_error(error_id);
// End of solution Part 1 ============================================
// ===================================================================
// Solution part 2
// A. Initialize the block and grid dimensions of the kernel about
// to be launched.
// [You may assume that each matrix dimension is a multiple of the
// defined constant block_size.]
// B. Launch the kernel with appropriate kernel arguments
// Do not forget to check for success at each stage before proceeding.
// ===================================================================
// Setup the kernel execution configuration parameters/launch kernel
// Stage A: Setup the kernel execution configuration parameters
// (in second part of homework take into account the case where
// the dimmensions are not an even multiple of block size)
//**** ENTER YOUR CODE HERE ****
// Stage B: Launch the kernel!! -- using the appropriate function arguments
// (remember to check for kernel launch failure!)
//**** ENTER YOUR CODE HERE ****
if(Mh == 16 && Mw == 16 && Nw == 16){
// Single Block 16*16 testing
dim3 grid(1,1);
dim3 block(TILE_WIDTH, TILE_WIDTH);
MatrixMulKernelSingleBlock<<<grid,block>>>(Pd, Md, Nd,Mh, Mw, Nw);
}
else
{
int a1 = iceil(Mh, TILE_WIDTH);
int a2 = iceil(Nw, TILE_WIDTH);
dim3 grid(a2,a1);
dim3 block(TILE_WIDTH, TILE_WIDTH);
MatrixMulKernel<<<grid,block>>>(Pd, Md, Nd,Mh, Mw, Nw);
}
// End of solution Part 2 ============================================
// ===================================================================
// Solution part 3
// Copy Results Device back to Host
// ===================================================================
// Transfer P from device to host
//**** ENTER YOUR CODE HERE ****
error_id = hipMemcpy(P,Pd,size_P,hipMemcpyDeviceToHost);
check_error(error_id);
// End of solution Part 3 ============================================
// CLEAN UP -- Free device memory when finished
//**** ENTER YOUR CODE HERE ****
error_id = hipFree(Md);
check_error(error_id);
error_id = hipFree(Nd);
check_error(error_id);
error_id = hipFree(Pd);
check_error(error_id);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // gpu (device) based matrix/matrix gpu code
//-------------------------------------------------------------------------
// Included CUDA libraries
//-------------------------------------------------------------------------
#include <hip/hip_runtime.h>
#include <stdio.h>
// iceil macro
// returns an integer ceil value where integer numerator is first parameter
// and integer denominator is the second parameter. iceil is the rounded
// up value of numerator/denominator when there is a remainder
// equivalent to ((num%den!=0) ? num/den+1 : num/den)
#define iceil(num,den) (num+den-1)/den
#define TILE_WIDTH 16 // block x and y dimensions
void check_error(hipError_t error_id){
if (error_id != hipSuccess) {
printf("Error is %d", error_id);
exit(EXIT_FAILURE);
}
}
// GPU device MatrixMulKernel kernel code
__global__ void MatrixMulKernel(float *Pd, float *Md, float *Nd, int Mh,
int Mw, int Nw) {
// ==================================================================
// Solution part 4
// Determine the output index of each thread.
// Compute the dot product of one row of Md and one column of Nd
// for each thread.
// Write the computed value to matrix P at the correct output index
// ==================================================================
// Calculate the global row and column indices of the Pd matrix
int Row;
int Col;
//**** ENTER YOUR CODE HERE ****
Row = blockIdx.y * TILE_WIDTH + threadIdx.y;
Col = blockIdx.x * TILE_WIDTH + threadIdx.x;
if(Row <Mh && Col< Nw){
// Each thread computes one dot product element of the block sub-matrix
// access correct row of Md and Column of Nd assuming row-major allocations
// (Note: in second part of hw1 you will want to make sure that only
// the threads that are assigned valid regions of the computation are
// active
//**** ENTER YOUR CODE HERE ****
float Pvalue = 0;
//**** ENTER YOUR CODE HERE ****
for(int k=0; k < Mw ;++k){
Pvalue += Md[Row*Mw+k] * Nd[k*Nw + Col];
}
// place final result in specified location of global Pd memory
//**** ENTER YOUR CODE HERE ****
Pd[Row * Nw + Col] = Pvalue;
}
// End of solution part 4 ===========================================
}
__global__ void MatrixMulKernelSingleBlock(float *Pd, float *Md, float *Nd, int Mh,
int Mw, int Nw) {
// ==================================================================
// Solution part 4
// Determine the output index of each thread.
// Compute the dot product of one row of Md and one column of Nd
// for each thread.
// Write the computed value to matrix P at the correct output index
// ==================================================================
// Calculate the global row and column indices of the Pd matrix
int Row;
int Col;
//**** ENTER YOUR CODE HERE ****
Row = threadIdx.y;
Col = threadIdx.x;
if(Row > Mh || Col > Nw) return;
// Each thread computes one dot product element of the block sub-matrix
// access correct row of Md and Column of Nd assuming row-major allocations
// (Note: in second part of hw1 you will want to make sure that only
// the threads that are assigned valid regions of the computation are
// active
//**** ENTER YOUR CODE HERE ****
float Pvalue = 0;
//**** ENTER YOUR CODE HERE ****
for(int k=0; k < Mw ;++k){
Pvalue += Md[Row*Mw+k] * Nd[k*Nw + Col];
}
// place final result in specified location of global Pd memory
//**** ENTER YOUR CODE HERE ****
Pd[Row * Nw + Col] = Pvalue;
// End of solution part 4 ===========================================
}
void compute_GPU(float *P, float *M, float *N, int Mh,
int Mw, int Nw) {
float *Md, *Nd, *Pd;
hipError_t error_id;
// ===================================================================
// Solution part 1: Copy Input Data from Host to Device
// Create Device Buffers for the two input matrices
// Copy memory from the host memory to the device buffer (device memory)
// Check for error generated while using each OpenCL API call
// ===================================================================
// Allocate device memory and Transfer host arrays M and N
//**** ENTER YOUR CODE HERE ****
size_t size_M = Mh * Mw * sizeof(float);
size_t size_N = Mw*Nw * sizeof(float);
error_id = hipMalloc((void**)&Md, size_M);
check_error(error_id);
error_id = hipMemcpy(Md, M, size_M, hipMemcpyHostToDevice);
check_error(error_id);
error_id = hipMalloc((void**)&Nd,size_N);
check_error(error_id);
error_id = hipMemcpy(Nd, N, size_N, hipMemcpyHostToDevice);
check_error(error_id);
// Allocate device memory of P array for results
//**** ENTER YOUR CODE HERE ****
size_t size_P = Mh*Nw*sizeof(float);
error_id = hipMalloc((void**)&Pd, size_P );
check_error(error_id);
// End of solution Part 1 ============================================
// ===================================================================
// Solution part 2
// A. Initialize the block and grid dimensions of the kernel about
// to be launched.
// [You may assume that each matrix dimension is a multiple of the
// defined constant block_size.]
// B. Launch the kernel with appropriate kernel arguments
// Do not forget to check for success at each stage before proceeding.
// ===================================================================
// Setup the kernel execution configuration parameters/launch kernel
// Stage A: Setup the kernel execution configuration parameters
// (in second part of homework take into account the case where
// the dimmensions are not an even multiple of block size)
//**** ENTER YOUR CODE HERE ****
// Stage B: Launch the kernel!! -- using the appropriate function arguments
// (remember to check for kernel launch failure!)
//**** ENTER YOUR CODE HERE ****
if(Mh == 16 && Mw == 16 && Nw == 16){
// Single Block 16*16 testing
dim3 grid(1,1);
dim3 block(TILE_WIDTH, TILE_WIDTH);
MatrixMulKernelSingleBlock<<<grid,block>>>(Pd, Md, Nd,Mh, Mw, Nw);
}
else
{
int a1 = iceil(Mh, TILE_WIDTH);
int a2 = iceil(Nw, TILE_WIDTH);
dim3 grid(a2,a1);
dim3 block(TILE_WIDTH, TILE_WIDTH);
MatrixMulKernel<<<grid,block>>>(Pd, Md, Nd,Mh, Mw, Nw);
}
// End of solution Part 2 ============================================
// ===================================================================
// Solution part 3
// Copy Results Device back to Host
// ===================================================================
// Transfer P from device to host
//**** ENTER YOUR CODE HERE ****
error_id = hipMemcpy(P,Pd,size_P,hipMemcpyDeviceToHost);
check_error(error_id);
// End of solution Part 3 ============================================
// CLEAN UP -- Free device memory when finished
//**** ENTER YOUR CODE HERE ****
error_id = hipFree(Md);
check_error(error_id);
error_id = hipFree(Nd);
check_error(error_id);
error_id = hipFree(Pd);
check_error(error_id);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15MatrixMulKernelPfS_S_iii
.globl _Z15MatrixMulKernelPfS_S_iii
.p2align 8
.type _Z15MatrixMulKernelPfS_S_iii,@function
_Z15MatrixMulKernelPfS_S_iii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x18
s_load_b32 s3, s[0:1], 0x20
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshl_add_u32 v5, s15, 4, v1
v_lshl_add_u32 v0, s14, 4, v0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s2, v5
v_cmp_gt_i32_e64 s2, s3, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB0_6
s_load_b32 s2, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x8
v_mul_lo_u32 v1, v5, s2
v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v3, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
.p2align 6
.LBB0_3:
v_ashrrev_i32_e32 v4, 31, v3
s_add_i32 s2, s2, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s2, 0
v_lshlrev_b64 v[7:8], 2, v[3:4]
v_add_nc_u32_e32 v3, s3, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s6, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v8, vcc_lo
global_load_b32 v4, v[1:2], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v1, vcc_lo, v1, 4
v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v4, v7
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v6, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x0
v_mad_u64_u32 v[1:2], null, v5, s3, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v6, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15MatrixMulKernelPfS_S_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 36
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15MatrixMulKernelPfS_S_iii, .Lfunc_end0-_Z15MatrixMulKernelPfS_S_iii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z26MatrixMulKernelSingleBlockPfS_S_iii
.globl _Z26MatrixMulKernelSingleBlockPfS_S_iii
.p2align 8
.type _Z26MatrixMulKernelSingleBlockPfS_S_iii,@function
_Z26MatrixMulKernelSingleBlockPfS_S_iii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x18
s_load_b32 s4, s[0:1], 0x20
v_bfe_u32 v5, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_ge_i32_e32 vcc_lo, s2, v5
v_cmp_ge_i32_e64 s2, s4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_6
s_load_b32 s6, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s6, 1
s_cbranch_scc1 .LBB1_4
s_load_b128 s[8:11], s[0:1], 0x8
v_mul_lo_u32 v1, v5, s6
v_mov_b32_e32 v2, 0
v_lshlrev_b32_e32 v6, 2, v0
s_ashr_i32 s5, s4, 31
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, s2, s10, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_add_co_u32 v3, vcc_lo, s8, v3
v_add_co_ci_u32_e64 v2, null, s11, 0, s2
v_add_co_ci_u32_e32 v4, vcc_lo, s9, v4, vcc_lo
v_mov_b32_e32 v6, 0
s_lshl_b64 s[2:3], s[4:5], 2
.LBB1_3:
global_load_b32 v7, v[3:4], off
global_load_b32 v8, v[1:2], off
v_add_co_u32 v1, vcc_lo, v1, s2
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_add_i32 s6, s6, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s6, 0
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v7, v8
s_cbranch_scc0 .LBB1_3
s_branch .LBB1_5
.LBB1_4:
v_mov_b32_e32 v6, 0
.LBB1_5:
s_load_b64 s[0:1], s[0:1], 0x0
v_mad_u64_u32 v[1:2], null, v5, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v6, off
.LBB1_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z26MatrixMulKernelSingleBlockPfS_S_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 36
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 12
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z26MatrixMulKernelSingleBlockPfS_S_iii, .Lfunc_end1-_Z26MatrixMulKernelSingleBlockPfS_S_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 36
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15MatrixMulKernelPfS_S_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15MatrixMulKernelPfS_S_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 36
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z26MatrixMulKernelSingleBlockPfS_S_iii
.private_segment_fixed_size: 0
.sgpr_count: 14
.sgpr_spill_count: 0
.symbol: _Z26MatrixMulKernelSingleBlockPfS_S_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // gpu (device) based matrix/matrix gpu code
//-------------------------------------------------------------------------
// Included CUDA libraries
//-------------------------------------------------------------------------
#include <hip/hip_runtime.h>
#include <stdio.h>
// iceil macro
// returns an integer ceil value where integer numerator is first parameter
// and integer denominator is the second parameter. iceil is the rounded
// up value of numerator/denominator when there is a remainder
// equivalent to ((num%den!=0) ? num/den+1 : num/den)
#define iceil(num,den) (num+den-1)/den
#define TILE_WIDTH 16 // block x and y dimensions
void check_error(hipError_t error_id){
if (error_id != hipSuccess) {
printf("Error is %d", error_id);
exit(EXIT_FAILURE);
}
}
// GPU device MatrixMulKernel kernel code
__global__ void MatrixMulKernel(float *Pd, float *Md, float *Nd, int Mh,
int Mw, int Nw) {
// ==================================================================
// Solution part 4
// Determine the output index of each thread.
// Compute the dot product of one row of Md and one column of Nd
// for each thread.
// Write the computed value to matrix P at the correct output index
// ==================================================================
// Calculate the global row and column indices of the Pd matrix
int Row;
int Col;
//**** ENTER YOUR CODE HERE ****
Row = blockIdx.y * TILE_WIDTH + threadIdx.y;
Col = blockIdx.x * TILE_WIDTH + threadIdx.x;
if(Row <Mh && Col< Nw){
// Each thread computes one dot product element of the block sub-matrix
// access correct row of Md and Column of Nd assuming row-major allocations
// (Note: in second part of hw1 you will want to make sure that only
// the threads that are assigned valid regions of the computation are
// active
//**** ENTER YOUR CODE HERE ****
float Pvalue = 0;
//**** ENTER YOUR CODE HERE ****
for(int k=0; k < Mw ;++k){
Pvalue += Md[Row*Mw+k] * Nd[k*Nw + Col];
}
// place final result in specified location of global Pd memory
//**** ENTER YOUR CODE HERE ****
Pd[Row * Nw + Col] = Pvalue;
}
// End of solution part 4 ===========================================
}
__global__ void MatrixMulKernelSingleBlock(float *Pd, float *Md, float *Nd, int Mh,
int Mw, int Nw) {
// ==================================================================
// Solution part 4
// Determine the output index of each thread.
// Compute the dot product of one row of Md and one column of Nd
// for each thread.
// Write the computed value to matrix P at the correct output index
// ==================================================================
// Calculate the global row and column indices of the Pd matrix
int Row;
int Col;
//**** ENTER YOUR CODE HERE ****
Row = threadIdx.y;
Col = threadIdx.x;
if(Row > Mh || Col > Nw) return;
// Each thread computes one dot product element of the block sub-matrix
// access correct row of Md and Column of Nd assuming row-major allocations
// (Note: in second part of hw1 you will want to make sure that only
// the threads that are assigned valid regions of the computation are
// active
//**** ENTER YOUR CODE HERE ****
float Pvalue = 0;
//**** ENTER YOUR CODE HERE ****
for(int k=0; k < Mw ;++k){
Pvalue += Md[Row*Mw+k] * Nd[k*Nw + Col];
}
// place final result in specified location of global Pd memory
//**** ENTER YOUR CODE HERE ****
Pd[Row * Nw + Col] = Pvalue;
// End of solution part 4 ===========================================
}
void compute_GPU(float *P, float *M, float *N, int Mh,
int Mw, int Nw) {
float *Md, *Nd, *Pd;
hipError_t error_id;
// ===================================================================
// Solution part 1: Copy Input Data from Host to Device
// Create Device Buffers for the two input matrices
// Copy memory from the host memory to the device buffer (device memory)
// Check for error generated while using each OpenCL API call
// ===================================================================
// Allocate device memory and Transfer host arrays M and N
//**** ENTER YOUR CODE HERE ****
size_t size_M = Mh * Mw * sizeof(float);
size_t size_N = Mw*Nw * sizeof(float);
error_id = hipMalloc((void**)&Md, size_M);
check_error(error_id);
error_id = hipMemcpy(Md, M, size_M, hipMemcpyHostToDevice);
check_error(error_id);
error_id = hipMalloc((void**)&Nd,size_N);
check_error(error_id);
error_id = hipMemcpy(Nd, N, size_N, hipMemcpyHostToDevice);
check_error(error_id);
// Allocate device memory of P array for results
//**** ENTER YOUR CODE HERE ****
size_t size_P = Mh*Nw*sizeof(float);
error_id = hipMalloc((void**)&Pd, size_P );
check_error(error_id);
// End of solution Part 1 ============================================
// ===================================================================
// Solution part 2
// A. Initialize the block and grid dimensions of the kernel about
// to be launched.
// [You may assume that each matrix dimension is a multiple of the
// defined constant block_size.]
// B. Launch the kernel with appropriate kernel arguments
// Do not forget to check for success at each stage before proceeding.
// ===================================================================
// Setup the kernel execution configuration parameters/launch kernel
// Stage A: Setup the kernel execution configuration parameters
// (in second part of homework take into account the case where
// the dimmensions are not an even multiple of block size)
//**** ENTER YOUR CODE HERE ****
// Stage B: Launch the kernel!! -- using the appropriate function arguments
// (remember to check for kernel launch failure!)
//**** ENTER YOUR CODE HERE ****
if(Mh == 16 && Mw == 16 && Nw == 16){
// Single Block 16*16 testing
dim3 grid(1,1);
dim3 block(TILE_WIDTH, TILE_WIDTH);
MatrixMulKernelSingleBlock<<<grid,block>>>(Pd, Md, Nd,Mh, Mw, Nw);
}
else
{
int a1 = iceil(Mh, TILE_WIDTH);
int a2 = iceil(Nw, TILE_WIDTH);
dim3 grid(a2,a1);
dim3 block(TILE_WIDTH, TILE_WIDTH);
MatrixMulKernel<<<grid,block>>>(Pd, Md, Nd,Mh, Mw, Nw);
}
// End of solution Part 2 ============================================
// ===================================================================
// Solution part 3
// Copy Results Device back to Host
// ===================================================================
// Transfer P from device to host
//**** ENTER YOUR CODE HERE ****
error_id = hipMemcpy(P,Pd,size_P,hipMemcpyDeviceToHost);
check_error(error_id);
// End of solution Part 3 ============================================
// CLEAN UP -- Free device memory when finished
//**** ENTER YOUR CODE HERE ****
error_id = hipFree(Md);
check_error(error_id);
error_id = hipFree(Nd);
check_error(error_id);
error_id = hipFree(Pd);
check_error(error_id);
} | .text
.file "gpu_matrixmul.hip"
.globl _Z11check_error10hipError_t # -- Begin function _Z11check_error10hipError_t
.p2align 4, 0x90
.type _Z11check_error10hipError_t,@function
_Z11check_error10hipError_t: # @_Z11check_error10hipError_t
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB0_2
# %bb.1:
retq
.LBB0_2:
pushq %rax
.cfi_def_cfa_offset 16
movl %edi, %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end0:
.size _Z11check_error10hipError_t, .Lfunc_end0-_Z11check_error10hipError_t
.cfi_endproc
# -- End function
.globl _Z30__device_stub__MatrixMulKernelPfS_S_iii # -- Begin function _Z30__device_stub__MatrixMulKernelPfS_S_iii
.p2align 4, 0x90
.type _Z30__device_stub__MatrixMulKernelPfS_S_iii,@function
_Z30__device_stub__MatrixMulKernelPfS_S_iii: # @_Z30__device_stub__MatrixMulKernelPfS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15MatrixMulKernelPfS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z30__device_stub__MatrixMulKernelPfS_S_iii, .Lfunc_end1-_Z30__device_stub__MatrixMulKernelPfS_S_iii
.cfi_endproc
# -- End function
.globl _Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii # -- Begin function _Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii
.p2align 4, 0x90
.type _Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii,@function
_Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii: # @_Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z26MatrixMulKernelSingleBlockPfS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii, .Lfunc_end2-_Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii
.cfi_endproc
# -- End function
.globl _Z11compute_GPUPfS_S_iii # -- Begin function _Z11compute_GPUPfS_S_iii
.p2align 4, 0x90
.type _Z11compute_GPUPfS_S_iii,@function
_Z11compute_GPUPfS_S_iii: # @_Z11compute_GPUPfS_S_iii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %r15d
movl %r8d, %ebp
movl %ecx, %r12d
movq %rdx, %r14
movq %rsi, %r13
movq %rdi, 160(%rsp) # 8-byte Spill
movl %r8d, %eax
imull %ecx, %eax
movslq %eax, %rbx
shlq $2, %rbx
leaq 32(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB3_18
# %bb.1: # %_Z11check_error10hipError_t.exit
movq 32(%rsp), %rdi
movq %r13, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB3_18
# %bb.2: # %_Z11check_error10hipError_t.exit57
movl %r15d, %eax
imull %ebp, %eax
movslq %eax, %rbx
shlq $2, %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB3_18
# %bb.3: # %_Z11check_error10hipError_t.exit59
movq 24(%rsp), %rdi
movq %r14, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB3_18
# %bb.4: # %_Z11check_error10hipError_t.exit61
movl %r15d, %eax
imull %r12d, %eax
movslq %eax, %r14
shlq $2, %r14
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB3_18
# %bb.5: # %_Z11check_error10hipError_t.exit63
cmpl $16, %r12d
jne .LBB3_10
# %bb.6: # %_Z11check_error10hipError_t.exit63
cmpl $16, %ebp
jne .LBB3_10
# %bb.7: # %_Z11check_error10hipError_t.exit63
cmpl $16, %r15d
jne .LBB3_10
# %bb.8:
movabsq $4294967297, %rdi # imm = 0x100000001
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_13
# %bb.9:
movq 16(%rsp), %rax
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $16, 12(%rsp)
movl $16, 8(%rsp)
movl $16, 4(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z26MatrixMulKernelSingleBlockPfS_S_iii, %edi
jmp .LBB3_12
.LBB3_10:
leal 15(%r12), %eax
leal 30(%r12), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $4, %edi
leal 15(%r15), %eax
leal 30(%r15), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $4, %ecx
shlq $32, %rdi
orq %rcx, %rdi
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_13
# %bb.11:
movq 16(%rsp), %rax
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %r12d, 12(%rsp)
movl %ebp, 8(%rsp)
movl %r15d, 4(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z15MatrixMulKernelPfS_S_iii, %edi
.LBB3_12:
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_13:
movq 16(%rsp), %rsi
movq 160(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB3_18
# %bb.14: # %_Z11check_error10hipError_t.exit71
movq 32(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB3_18
# %bb.15: # %_Z11check_error10hipError_t.exit73
movq 24(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB3_18
# %bb.16: # %_Z11check_error10hipError_t.exit75
movq 16(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB3_18
# %bb.17: # %_Z11check_error10hipError_t.exit77
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_18:
.cfi_def_cfa_offset 224
movl $.L.str, %edi
movl %eax, %esi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end3:
.size _Z11compute_GPUPfS_S_iii, .Lfunc_end3-_Z11compute_GPUPfS_S_iii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15MatrixMulKernelPfS_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z26MatrixMulKernelSingleBlockPfS_S_iii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error is %d"
.size .L.str, 12
.type _Z15MatrixMulKernelPfS_S_iii,@object # @_Z15MatrixMulKernelPfS_S_iii
.section .rodata,"a",@progbits
.globl _Z15MatrixMulKernelPfS_S_iii
.p2align 3, 0x0
_Z15MatrixMulKernelPfS_S_iii:
.quad _Z30__device_stub__MatrixMulKernelPfS_S_iii
.size _Z15MatrixMulKernelPfS_S_iii, 8
.type _Z26MatrixMulKernelSingleBlockPfS_S_iii,@object # @_Z26MatrixMulKernelSingleBlockPfS_S_iii
.globl _Z26MatrixMulKernelSingleBlockPfS_S_iii
.p2align 3, 0x0
_Z26MatrixMulKernelSingleBlockPfS_S_iii:
.quad _Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii
.size _Z26MatrixMulKernelSingleBlockPfS_S_iii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15MatrixMulKernelPfS_S_iii"
.size .L__unnamed_1, 29
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z26MatrixMulKernelSingleBlockPfS_S_iii"
.size .L__unnamed_2, 40
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__MatrixMulKernelPfS_S_iii
.addrsig_sym _Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15MatrixMulKernelPfS_S_iii
.addrsig_sym _Z26MatrixMulKernelSingleBlockPfS_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00111ecf_00000000-6_gpu_matrixmul.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Error is %d"
.text
.globl _Z11check_error9cudaError
.type _Z11check_error9cudaError, @function
_Z11check_error9cudaError:
.LFB2057:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
subq $8, %rsp
.cfi_def_cfa_offset 16
movl %edi, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z11check_error9cudaError, .-_Z11check_error9cudaError
.globl _Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii
.type _Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii, @function
_Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii:
.LFB2083:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15MatrixMulKernelPfS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii, .-_Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii
.globl _Z15MatrixMulKernelPfS_S_iii
.type _Z15MatrixMulKernelPfS_S_iii, @function
_Z15MatrixMulKernelPfS_S_iii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z15MatrixMulKernelPfS_S_iii, .-_Z15MatrixMulKernelPfS_S_iii
.globl _Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii
.type _Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii, @function
_Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii:
.LFB2085:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z26MatrixMulKernelSingleBlockPfS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii, .-_Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii
.globl _Z26MatrixMulKernelSingleBlockPfS_S_iii
.type _Z26MatrixMulKernelSingleBlockPfS_S_iii, @function
_Z26MatrixMulKernelSingleBlockPfS_S_iii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z26MatrixMulKernelSingleBlockPfS_S_iii, .-_Z26MatrixMulKernelSingleBlockPfS_S_iii
.globl _Z11compute_GPUPfS_S_iii
.type _Z11compute_GPUPfS_S_iii, @function
_Z11compute_GPUPfS_S_iii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %rdi, (%rsp)
movq %rsi, %r15
movq %rdx, 8(%rsp)
movl %ecx, %ebp
movl %r8d, %r14d
movl %r9d, %ebx
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl %ecx, %r13d
imull %r8d, %r13d
movslq %r13d, %r13
salq $2, %r13
movl %r8d, %r12d
imull %r9d, %r12d
movslq %r12d, %r12
salq $2, %r12
leaq 24(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movl $1, %ecx
movq %r13, %rdx
movq %r15, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z11check_error9cudaError
leaq 32(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movl $1, %ecx
movq %r12, %rdx
movq 8(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movl %ebp, %r12d
imull %ebx, %r12d
movslq %r12d, %r12
salq $2, %r12
leaq 40(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movl %eax, %edi
call _Z11check_error9cudaError
cmpl $16, %r14d
sete %dl
cmpl $16, %ebx
sete %al
testb %al, %dl
je .L26
cmpl $16, %ebp
je .L32
.L26:
leal 30(%rbx), %eax
movl %ebx, %edx
addl $15, %edx
cmovns %edx, %eax
sarl $4, %eax
movl %eax, 48(%rsp)
leal 30(%rbp), %eax
movl %ebp, %edx
addl $15, %edx
cmovns %edx, %eax
sarl $4, %eax
movl %eax, 52(%rsp)
movl $1, 56(%rsp)
movl $16, 60(%rsp)
movl $16, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L33
.L28:
movl $2, %ecx
movq %r12, %rdx
movq 40(%rsp), %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movq 24(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movq 32(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movq 40(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
call _Z11check_error9cudaError
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L32:
.cfi_restore_state
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $16, 60(%rsp)
movl $16, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L28
movl $16, %r9d
movl $16, %r8d
movl $16, %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z53__device_stub__Z26MatrixMulKernelSingleBlockPfS_S_iiiPfS_S_iii
jmp .L28
.L33:
movl %ebx, %r9d
movl %r14d, %r8d
movl %ebp, %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z42__device_stub__Z15MatrixMulKernelPfS_S_iiiPfS_S_iii
jmp .L28
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z11compute_GPUPfS_S_iii, .-_Z11compute_GPUPfS_S_iii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "_Z26MatrixMulKernelSingleBlockPfS_S_iii"
.section .rodata.str1.1
.LC2:
.string "_Z15MatrixMulKernelPfS_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z26MatrixMulKernelSingleBlockPfS_S_iii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z15MatrixMulKernelPfS_S_iii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "gpu_matrixmul.hip"
.globl _Z11check_error10hipError_t # -- Begin function _Z11check_error10hipError_t
.p2align 4, 0x90
.type _Z11check_error10hipError_t,@function
_Z11check_error10hipError_t: # @_Z11check_error10hipError_t
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB0_2
# %bb.1:
retq
.LBB0_2:
pushq %rax
.cfi_def_cfa_offset 16
movl %edi, %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end0:
.size _Z11check_error10hipError_t, .Lfunc_end0-_Z11check_error10hipError_t
.cfi_endproc
# -- End function
.globl _Z30__device_stub__MatrixMulKernelPfS_S_iii # -- Begin function _Z30__device_stub__MatrixMulKernelPfS_S_iii
.p2align 4, 0x90
.type _Z30__device_stub__MatrixMulKernelPfS_S_iii,@function
_Z30__device_stub__MatrixMulKernelPfS_S_iii: # @_Z30__device_stub__MatrixMulKernelPfS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15MatrixMulKernelPfS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z30__device_stub__MatrixMulKernelPfS_S_iii, .Lfunc_end1-_Z30__device_stub__MatrixMulKernelPfS_S_iii
.cfi_endproc
# -- End function
.globl _Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii # -- Begin function _Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii
.p2align 4, 0x90
.type _Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii,@function
_Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii: # @_Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z26MatrixMulKernelSingleBlockPfS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii, .Lfunc_end2-_Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii
.cfi_endproc
# -- End function
.globl _Z11compute_GPUPfS_S_iii # -- Begin function _Z11compute_GPUPfS_S_iii
.p2align 4, 0x90
.type _Z11compute_GPUPfS_S_iii,@function
_Z11compute_GPUPfS_S_iii: # @_Z11compute_GPUPfS_S_iii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %r15d
movl %r8d, %ebp
movl %ecx, %r12d
movq %rdx, %r14
movq %rsi, %r13
movq %rdi, 160(%rsp) # 8-byte Spill
movl %r8d, %eax
imull %ecx, %eax
movslq %eax, %rbx
shlq $2, %rbx
leaq 32(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB3_18
# %bb.1: # %_Z11check_error10hipError_t.exit
movq 32(%rsp), %rdi
movq %r13, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB3_18
# %bb.2: # %_Z11check_error10hipError_t.exit57
movl %r15d, %eax
imull %ebp, %eax
movslq %eax, %rbx
shlq $2, %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB3_18
# %bb.3: # %_Z11check_error10hipError_t.exit59
movq 24(%rsp), %rdi
movq %r14, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB3_18
# %bb.4: # %_Z11check_error10hipError_t.exit61
movl %r15d, %eax
imull %r12d, %eax
movslq %eax, %r14
shlq $2, %r14
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB3_18
# %bb.5: # %_Z11check_error10hipError_t.exit63
cmpl $16, %r12d
jne .LBB3_10
# %bb.6: # %_Z11check_error10hipError_t.exit63
cmpl $16, %ebp
jne .LBB3_10
# %bb.7: # %_Z11check_error10hipError_t.exit63
cmpl $16, %r15d
jne .LBB3_10
# %bb.8:
movabsq $4294967297, %rdi # imm = 0x100000001
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_13
# %bb.9:
movq 16(%rsp), %rax
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $16, 12(%rsp)
movl $16, 8(%rsp)
movl $16, 4(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z26MatrixMulKernelSingleBlockPfS_S_iii, %edi
jmp .LBB3_12
.LBB3_10:
leal 15(%r12), %eax
leal 30(%r12), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $4, %edi
leal 15(%r15), %eax
leal 30(%r15), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $4, %ecx
shlq $32, %rdi
orq %rcx, %rdi
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_13
# %bb.11:
movq 16(%rsp), %rax
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %r12d, 12(%rsp)
movl %ebp, 8(%rsp)
movl %r15d, 4(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z15MatrixMulKernelPfS_S_iii, %edi
.LBB3_12:
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_13:
movq 16(%rsp), %rsi
movq 160(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB3_18
# %bb.14: # %_Z11check_error10hipError_t.exit71
movq 32(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB3_18
# %bb.15: # %_Z11check_error10hipError_t.exit73
movq 24(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB3_18
# %bb.16: # %_Z11check_error10hipError_t.exit75
movq 16(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB3_18
# %bb.17: # %_Z11check_error10hipError_t.exit77
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_18:
.cfi_def_cfa_offset 224
movl $.L.str, %edi
movl %eax, %esi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end3:
.size _Z11compute_GPUPfS_S_iii, .Lfunc_end3-_Z11compute_GPUPfS_S_iii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15MatrixMulKernelPfS_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z26MatrixMulKernelSingleBlockPfS_S_iii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error is %d"
.size .L.str, 12
.type _Z15MatrixMulKernelPfS_S_iii,@object # @_Z15MatrixMulKernelPfS_S_iii
.section .rodata,"a",@progbits
.globl _Z15MatrixMulKernelPfS_S_iii
.p2align 3, 0x0
_Z15MatrixMulKernelPfS_S_iii:
.quad _Z30__device_stub__MatrixMulKernelPfS_S_iii
.size _Z15MatrixMulKernelPfS_S_iii, 8
.type _Z26MatrixMulKernelSingleBlockPfS_S_iii,@object # @_Z26MatrixMulKernelSingleBlockPfS_S_iii
.globl _Z26MatrixMulKernelSingleBlockPfS_S_iii
.p2align 3, 0x0
_Z26MatrixMulKernelSingleBlockPfS_S_iii:
.quad _Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii
.size _Z26MatrixMulKernelSingleBlockPfS_S_iii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15MatrixMulKernelPfS_S_iii"
.size .L__unnamed_1, 29
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z26MatrixMulKernelSingleBlockPfS_S_iii"
.size .L__unnamed_2, 40
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__MatrixMulKernelPfS_S_iii
.addrsig_sym _Z41__device_stub__MatrixMulKernelSingleBlockPfS_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15MatrixMulKernelPfS_S_iii
.addrsig_sym _Z26MatrixMulKernelSingleBlockPfS_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void kernelSumaMatrices(float *a, float *b,int m, int n) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
while(i<m){
j = threadIdx.y + blockIdx.y*blockDim.y;
while(j<n){
a[i*n+j]+=b[i*n+j];
j+= blockDim.y*gridDim.y;
}
i+=blockDim.x*gridDim.x;
}
} | code for sm_80
Function : _Z18kernelSumaMatricesPfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ S2R R6, SR_TID.Y ; /* 0x0000000000067919 */
/* 0x000e220000002200 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e240000002600 */
/*0090*/ IMAD R6, R3, c[0x0][0x4], R6 ; /* 0x0000010003067a24 */
/* 0x001fca00078e0206 */
/*00a0*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x174], PT ; /* 0x00005d0006007a0c */
/* 0x000fe20003f06270 */
/*00b0*/ BSSY B0, 0x1c0 ; /* 0x0000010000007945 */
/* 0x000fe20003800000 */
/*00c0*/ MOV R11, c[0x0][0x0] ; /* 0x00000000000b7a02 */
/* 0x000fd60000000f00 */
/*00d0*/ @P0 BRA 0x1b0 ; /* 0x000000d000000947 */
/* 0x000fea0003800000 */
/*00e0*/ MOV R7, R6 ; /* 0x0000000600077202 */
/* 0x000fc80000000f00 */
/*00f0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0100*/ IMAD R4, R0, c[0x0][0x174], R7 ; /* 0x00005d0000047a24 */
/* 0x000fd200078e0207 */
/*0110*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0205 */
/*0120*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*0130*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*0140*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea4000c1e1900 */
/*0150*/ FADD R9, R8, R3 ; /* 0x0000000308097221 */
/* 0x004fe20000000000 */
/*0160*/ MOV R8, c[0x0][0x4] ; /* 0x0000010000087a02 */
/* 0x000fc80000000f00 */
/*0170*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001e2000c101904 */
/*0180*/ IMAD R7, R8, c[0x0][0x10], R7 ; /* 0x0000040008077a24 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x174], PT ; /* 0x00005d0007007a0c */
/* 0x000fda0003f06270 */
/*01a0*/ @!P0 BRA 0xf0 ; /* 0xffffff4000008947 */
/* 0x001fea000383ffff */
/*01b0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01c0*/ IMAD R0, R11, c[0x0][0xc], R0 ; /* 0x000003000b007a24 */
/* 0x000fca00078e0200 */
/*01d0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*01e0*/ @!P0 BRA 0xa0 ; /* 0xfffffeb000008947 */
/* 0x000fea000383ffff */
/*01f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0200*/ BRA 0x200; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void kernelSumaMatrices(float *a, float *b,int m, int n) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
while(i<m){
j = threadIdx.y + blockIdx.y*blockDim.y;
while(j<n){
a[i*n+j]+=b[i*n+j];
j+= blockDim.y*gridDim.y;
}
i+=blockDim.x*gridDim.x;
}
} | .file "tmpxft_000d95ac_00000000-6_kernelSumaMatrices.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z18kernelSumaMatricesPfS_iiPfS_ii
.type _Z42__device_stub__Z18kernelSumaMatricesPfS_iiPfS_ii, @function
_Z42__device_stub__Z18kernelSumaMatricesPfS_iiPfS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z18kernelSumaMatricesPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z42__device_stub__Z18kernelSumaMatricesPfS_iiPfS_ii, .-_Z42__device_stub__Z18kernelSumaMatricesPfS_iiPfS_ii
.globl _Z18kernelSumaMatricesPfS_ii
.type _Z18kernelSumaMatricesPfS_ii, @function
_Z18kernelSumaMatricesPfS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z18kernelSumaMatricesPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z18kernelSumaMatricesPfS_ii, .-_Z18kernelSumaMatricesPfS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z18kernelSumaMatricesPfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z18kernelSumaMatricesPfS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void kernelSumaMatrices(float *a, float *b,int m, int n) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
while(i<m){
j = threadIdx.y + blockIdx.y*blockDim.y;
while(j<n){
a[i*n+j]+=b[i*n+j];
j+= blockDim.y*gridDim.y;
}
i+=blockDim.x*gridDim.x;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void kernelSumaMatrices(float *a, float *b,int m, int n) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
while(i<m){
j = threadIdx.y + blockIdx.y*blockDim.y;
while(j<n){
a[i*n+j]+=b[i*n+j];
j+= blockDim.y*gridDim.y;
}
i+=blockDim.x*gridDim.x;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void kernelSumaMatrices(float *a, float *b,int m, int n) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
while(i<m){
j = threadIdx.y + blockIdx.y*blockDim.y;
while(j<n){
a[i*n+j]+=b[i*n+j];
j+= blockDim.y*gridDim.y;
}
i+=blockDim.x*gridDim.x;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z18kernelSumaMatricesPfS_ii
.globl _Z18kernelSumaMatricesPfS_ii
.p2align 8
.type _Z18kernelSumaMatricesPfS_ii,@function
_Z18kernelSumaMatricesPfS_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s8, s[0:1], 0x10
v_and_b32_e32 v3, 0x3ff, v0
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s10, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s14, s10, v[3:4]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_6
s_load_b32 s11, s[2:3], 0xc
s_load_b32 s9, s[0:1], 0x14
s_load_b64 s[2:3], s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s0, s11, 16
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s15, s0, v[0:1]
v_mul_lo_u32 v0, s9, v1
s_mul_i32 s1, s2, s10
s_mul_i32 s2, s3, s0
s_mul_i32 s3, s1, s9
s_mov_b32 s10, 0
s_delay_alu instid0(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s9, v2
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s11
v_add_nc_u32_e32 v1, s1, v1
v_add_nc_u32_e32 v0, s3, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_le_i32_e64 s0, s8, v1
s_or_b32 s10, s0, s10
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s10
s_cbranch_execz .LBB0_6
.LBB0_3:
s_and_saveexec_b32 s11, vcc_lo
s_cbranch_execz .LBB0_2
v_mov_b32_e32 v3, v2
s_mov_b32 s12, 0
.p2align 6
.LBB0_5:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v4, v0, v3
v_add_nc_u32_e32 v3, s2, v3
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_add_co_u32 v6, s0, s6, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v7, s0, s7, v5, s0
v_add_co_u32 v4, s0, s4, v4
v_add_co_ci_u32_e64 v5, s0, s5, v5, s0
v_cmp_le_i32_e64 s0, s9, v3
global_load_b32 v6, v[6:7], off
global_load_b32 v7, v[4:5], off
s_or_b32 s12, s0, s12
s_waitcnt vmcnt(0)
v_add_f32_e32 v6, v6, v7
global_store_b32 v[4:5], v6, off
s_and_not1_b32 exec_lo, exec_lo, s12
s_cbranch_execnz .LBB0_5
s_branch .LBB0_2
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18kernelSumaMatricesPfS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z18kernelSumaMatricesPfS_ii, .Lfunc_end0-_Z18kernelSumaMatricesPfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18kernelSumaMatricesPfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z18kernelSumaMatricesPfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void kernelSumaMatrices(float *a, float *b,int m, int n) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
while(i<m){
j = threadIdx.y + blockIdx.y*blockDim.y;
while(j<n){
a[i*n+j]+=b[i*n+j];
j+= blockDim.y*gridDim.y;
}
i+=blockDim.x*gridDim.x;
}
} | .text
.file "kernelSumaMatrices.hip"
.globl _Z33__device_stub__kernelSumaMatricesPfS_ii # -- Begin function _Z33__device_stub__kernelSumaMatricesPfS_ii
.p2align 4, 0x90
.type _Z33__device_stub__kernelSumaMatricesPfS_ii,@function
_Z33__device_stub__kernelSumaMatricesPfS_ii: # @_Z33__device_stub__kernelSumaMatricesPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z18kernelSumaMatricesPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z33__device_stub__kernelSumaMatricesPfS_ii, .Lfunc_end0-_Z33__device_stub__kernelSumaMatricesPfS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18kernelSumaMatricesPfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z18kernelSumaMatricesPfS_ii,@object # @_Z18kernelSumaMatricesPfS_ii
.section .rodata,"a",@progbits
.globl _Z18kernelSumaMatricesPfS_ii
.p2align 3, 0x0
_Z18kernelSumaMatricesPfS_ii:
.quad _Z33__device_stub__kernelSumaMatricesPfS_ii
.size _Z18kernelSumaMatricesPfS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z18kernelSumaMatricesPfS_ii"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z33__device_stub__kernelSumaMatricesPfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z18kernelSumaMatricesPfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z18kernelSumaMatricesPfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ S2R R6, SR_TID.Y ; /* 0x0000000000067919 */
/* 0x000e220000002200 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e240000002600 */
/*0090*/ IMAD R6, R3, c[0x0][0x4], R6 ; /* 0x0000010003067a24 */
/* 0x001fca00078e0206 */
/*00a0*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x174], PT ; /* 0x00005d0006007a0c */
/* 0x000fe20003f06270 */
/*00b0*/ BSSY B0, 0x1c0 ; /* 0x0000010000007945 */
/* 0x000fe20003800000 */
/*00c0*/ MOV R11, c[0x0][0x0] ; /* 0x00000000000b7a02 */
/* 0x000fd60000000f00 */
/*00d0*/ @P0 BRA 0x1b0 ; /* 0x000000d000000947 */
/* 0x000fea0003800000 */
/*00e0*/ MOV R7, R6 ; /* 0x0000000600077202 */
/* 0x000fc80000000f00 */
/*00f0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0100*/ IMAD R4, R0, c[0x0][0x174], R7 ; /* 0x00005d0000047a24 */
/* 0x000fd200078e0207 */
/*0110*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0205 */
/*0120*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*0130*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*0140*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea4000c1e1900 */
/*0150*/ FADD R9, R8, R3 ; /* 0x0000000308097221 */
/* 0x004fe20000000000 */
/*0160*/ MOV R8, c[0x0][0x4] ; /* 0x0000010000087a02 */
/* 0x000fc80000000f00 */
/*0170*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001e2000c101904 */
/*0180*/ IMAD R7, R8, c[0x0][0x10], R7 ; /* 0x0000040008077a24 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x174], PT ; /* 0x00005d0007007a0c */
/* 0x000fda0003f06270 */
/*01a0*/ @!P0 BRA 0xf0 ; /* 0xffffff4000008947 */
/* 0x001fea000383ffff */
/*01b0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01c0*/ IMAD R0, R11, c[0x0][0xc], R0 ; /* 0x000003000b007a24 */
/* 0x000fca00078e0200 */
/*01d0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*01e0*/ @!P0 BRA 0xa0 ; /* 0xfffffeb000008947 */
/* 0x000fea000383ffff */
/*01f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0200*/ BRA 0x200; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z18kernelSumaMatricesPfS_ii
.globl _Z18kernelSumaMatricesPfS_ii
.p2align 8
.type _Z18kernelSumaMatricesPfS_ii,@function
_Z18kernelSumaMatricesPfS_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s8, s[0:1], 0x10
v_and_b32_e32 v3, 0x3ff, v0
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s10, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s14, s10, v[3:4]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_6
s_load_b32 s11, s[2:3], 0xc
s_load_b32 s9, s[0:1], 0x14
s_load_b64 s[2:3], s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s0, s11, 16
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s15, s0, v[0:1]
v_mul_lo_u32 v0, s9, v1
s_mul_i32 s1, s2, s10
s_mul_i32 s2, s3, s0
s_mul_i32 s3, s1, s9
s_mov_b32 s10, 0
s_delay_alu instid0(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s9, v2
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s11
v_add_nc_u32_e32 v1, s1, v1
v_add_nc_u32_e32 v0, s3, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_le_i32_e64 s0, s8, v1
s_or_b32 s10, s0, s10
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s10
s_cbranch_execz .LBB0_6
.LBB0_3:
s_and_saveexec_b32 s11, vcc_lo
s_cbranch_execz .LBB0_2
v_mov_b32_e32 v3, v2
s_mov_b32 s12, 0
.p2align 6
.LBB0_5:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v4, v0, v3
v_add_nc_u32_e32 v3, s2, v3
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_add_co_u32 v6, s0, s6, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v7, s0, s7, v5, s0
v_add_co_u32 v4, s0, s4, v4
v_add_co_ci_u32_e64 v5, s0, s5, v5, s0
v_cmp_le_i32_e64 s0, s9, v3
global_load_b32 v6, v[6:7], off
global_load_b32 v7, v[4:5], off
s_or_b32 s12, s0, s12
s_waitcnt vmcnt(0)
v_add_f32_e32 v6, v6, v7
global_store_b32 v[4:5], v6, off
s_and_not1_b32 exec_lo, exec_lo, s12
s_cbranch_execnz .LBB0_5
s_branch .LBB0_2
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18kernelSumaMatricesPfS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z18kernelSumaMatricesPfS_ii, .Lfunc_end0-_Z18kernelSumaMatricesPfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18kernelSumaMatricesPfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z18kernelSumaMatricesPfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000d95ac_00000000-6_kernelSumaMatrices.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z18kernelSumaMatricesPfS_iiPfS_ii
.type _Z42__device_stub__Z18kernelSumaMatricesPfS_iiPfS_ii, @function
_Z42__device_stub__Z18kernelSumaMatricesPfS_iiPfS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z18kernelSumaMatricesPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z42__device_stub__Z18kernelSumaMatricesPfS_iiPfS_ii, .-_Z42__device_stub__Z18kernelSumaMatricesPfS_iiPfS_ii
.globl _Z18kernelSumaMatricesPfS_ii
.type _Z18kernelSumaMatricesPfS_ii, @function
_Z18kernelSumaMatricesPfS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z18kernelSumaMatricesPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z18kernelSumaMatricesPfS_ii, .-_Z18kernelSumaMatricesPfS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z18kernelSumaMatricesPfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z18kernelSumaMatricesPfS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernelSumaMatrices.hip"
.globl _Z33__device_stub__kernelSumaMatricesPfS_ii # -- Begin function _Z33__device_stub__kernelSumaMatricesPfS_ii
.p2align 4, 0x90
.type _Z33__device_stub__kernelSumaMatricesPfS_ii,@function
_Z33__device_stub__kernelSumaMatricesPfS_ii: # @_Z33__device_stub__kernelSumaMatricesPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z18kernelSumaMatricesPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z33__device_stub__kernelSumaMatricesPfS_ii, .Lfunc_end0-_Z33__device_stub__kernelSumaMatricesPfS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18kernelSumaMatricesPfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z18kernelSumaMatricesPfS_ii,@object # @_Z18kernelSumaMatricesPfS_ii
.section .rodata,"a",@progbits
.globl _Z18kernelSumaMatricesPfS_ii
.p2align 3, 0x0
_Z18kernelSumaMatricesPfS_ii:
.quad _Z33__device_stub__kernelSumaMatricesPfS_ii
.size _Z18kernelSumaMatricesPfS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z18kernelSumaMatricesPfS_ii"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z33__device_stub__kernelSumaMatricesPfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z18kernelSumaMatricesPfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
// filename: eeTanh.cu
// a simple CUDA kernel to square the elements of a matrix
extern "C" // ensure function name to be exactly "eeTanh"
{
}
__global__ void normLogErr(int N, int M, float *A, float *Y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j*N + i;
int L = N*M;
if (i < N && j < M)
{
// A2 in this case is stored in the doubled rows of A, the length of A is
// doublt that of Y
float a = __expf(__fmul_rn(2.0, A[index+L]));
A[index] = __fmul_rn(a, __fmaf_rn(0.5, __fmul_rn(Y[index], Y[index]), __fsub_rn(__fmul_rn(0.5, __fmul_rn(A[index], A[index])), __fmul_rn(A[index], Y[index]))));
A[index+L] = __fsub_rn(0.9189385332, A[index+L]); // stick final sum factor in 2nd part of A so when it sums to total the cost will be correct
// A[index] = a*(A[index]*(0.5*A[index] - Y[index]) + 0.5*Y[index]*Y[index]);
// A[index+L] = __fsub_rn(0.9189385332, A[index+L]);
}
} | code for sm_80
Function : _Z10normLogErriiPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x164], PT ; /* 0x0000590003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x160], P0 ; /* 0x0000580000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ MOV R5, c[0x0][0x164] ; /* 0x0000590000057a02 */
/* 0x000fe20000000f00 */
/*00c0*/ IMAD R0, R3, c[0x0][0x160], R0 ; /* 0x0000580003007a24 */
/* 0x000fe200078e0200 */
/*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00e0*/ IMAD R2, R5, c[0x0][0x160], R0 ; /* 0x0000580005027a24 */
/* 0x000fc800078e0200 */
/*00f0*/ IMAD.WIDE R2, R2, R7, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0207 */
/*0100*/ LDG.E R8, [R2.64] ; /* 0x0000000402087981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*0120*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fe200078e0207 */
/*0130*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */
/* 0x000eea000c1e1900 */
/*0140*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000f22000c1e1900 */
/*0150*/ FMUL R8, R8, 2 ; /* 0x4000000008087820 */
/* 0x004fc80000400000 */
/*0160*/ FMUL R8, R8, 1.4426950216293334961 ; /* 0x3fb8aa3b08087820 */
/* 0x000fe40000400000 */
/*0170*/ FMUL R10, R9, R9 ; /* 0x00000009090a7220 */
/* 0x008fc60000400000 */
/*0180*/ FSETP.GEU.AND P0, PT, R8, -126, PT ; /* 0xc2fc00000800780b */
/* 0x000fe20003f0e000 */
/*0190*/ FMUL R10, R10, 0.5 ; /* 0x3f0000000a0a7820 */
/* 0x000fe40000400000 */
/*01a0*/ FMUL R11, R6.reuse, R9 ; /* 0x00000009060b7220 */
/* 0x050fe40000400000 */
/*01b0*/ FMUL R9, R6, R6 ; /* 0x0000000606097220 */
/* 0x000fe40000400000 */
/*01c0*/ FADD R10, R10, -R11 ; /* 0x8000000b0a0a7221 */
/* 0x000fc80000000000 */
/*01d0*/ FFMA R9, R9, 0.5, R10 ; /* 0x3f00000009097823 */
/* 0x000fe4000000000a */
/*01e0*/ @!P0 FMUL R8, R8, 0.5 ; /* 0x3f00000008088820 */
/* 0x000fc80000400000 */
/*01f0*/ MUFU.EX2 R0, R8 ; /* 0x0000000800007308 */
/* 0x000e240000000800 */
/*0200*/ @!P0 FMUL R0, R0, R0 ; /* 0x0000000000008220 */
/* 0x001fc80000400000 */
/*0210*/ FMUL R9, R0, R9 ; /* 0x0000000900097220 */
/* 0x000fca0000400000 */
/*0220*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x000fe8000c101904 */
/*0230*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0240*/ FADD R7, -R0, 0.91893851757049560547 ; /* 0x3f6b3f8e00077421 */
/* 0x004fca0000000100 */
/*0250*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101904 */
/*0260*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0270*/ BRA 0x270; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.