system
stringclasses 7
values | input
stringlengths 10
94.5k
| output
stringlengths 14
55.5k
|
|---|---|---|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : inputs_gradients
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */
/* 0x000fe20000000f00 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ MOV R3, c[0x0][0x164] ; /* 0x0000590000037a02 */
/* 0x000fca0000000f00 */
/*0040*/ LDG.E R6, [R2.64+0x10] ; /* 0x0000100402067981 */
/* 0x000ea8000c1e1900 */
/*0050*/ LDG.E R9, [R2.64+0x18] ; /* 0x0000180402097981 */
/* 0x000ea8000c1e1900 */
/*0060*/ LDG.E R8, [R2.64+0x8] ; /* 0x0000080402087981 */
/* 0x000ee2000c1e1900 */
/*0070*/ S2UR UR6, SR_CTAID.X ; /* 0x00000000000679c3 */
/* 0x000e220000002500 */
/*0080*/ MOV R4, c[0x0][0x170] ; /* 0x00005c0000047a02 */
/* 0x000fc40000000f00 */
/*0090*/ S2R R11, SR_TID.X ; /* 0x00000000000b7919 */
/* 0x000ee20000002100 */
/*00a0*/ MOV R5, c[0x0][0x174] ; /* 0x00005d0000057a02 */
/* 0x000fc80000000f00 */
/*00b0*/ S2UR UR7, SR_CTAID.Y ; /* 0x00000000000779c3 */
/* 0x000e620000002600 */
/*00c0*/ LDG.E R0, [R4.64+0x10] ; /* 0x0000100404007981 */
/* 0x000f28000c1e1900 */
/*00d0*/ LDG.E R7, [R4.64+0x18] ; /* 0x0000180404077981 */
/* 0x000f26000c1e1900 */
/*00e0*/ S2UR UR8, SR_CTAID.Z ; /* 0x00000000000879c3 */
/* 0x000f620000002700 */
/*00f0*/ IMAD R12, R9, R6, RZ ; /* 0x00000006090c7224 */
/* 0x004fe400078e02ff */
/*0100*/ LDG.E R6, [R4.64+0x8] ; /* 0x0000080404067981 */
/* 0x000ea4000c1e1900 */
/*0110*/ IMAD R10, R12, UR6, RZ ; /* 0x000000060c0a7c24 */
/* 0x001fc4000f8e02ff */
/*0120*/ IMAD R3, R8, R11, RZ ; /* 0x0000000b08037224 */
/* 0x008fe400078e02ff */
/*0130*/ IMAD R10, R9, UR7, R10 ; /* 0x00000007090a7c24 */
/* 0x002fe2000f8e020a */
/*0140*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fc600000001ff */
/*0150*/ IMAD R2, R12, R3, R10 ; /* 0x000000030c027224 */
/* 0x000fca00078e020a */
/*0160*/ IADD3 R2, R2, UR8, RZ ; /* 0x0000000802027c10 */
/* 0x020fca000fffe0ff */
/*0170*/ IMAD.WIDE R2, R2, R9, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fcc00078e0209 */
/*0180*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ee2000c1e1900 */
/*0190*/ IMAD R8, R7, R0, RZ ; /* 0x0000000007087224 */
/* 0x010fc800078e02ff */
/*01a0*/ IMAD R0, R8, UR6, RZ ; /* 0x0000000608007c24 */
/* 0x000fc8000f8e02ff */
/*01b0*/ IMAD R0, R7, UR7, R0 ; /* 0x0000000707007c24 */
/* 0x000fe4000f8e0200 */
/*01c0*/ IMAD R11, R6, R11, RZ ; /* 0x0000000b060b7224 */
/* 0x004fc800078e02ff */
/*01d0*/ IMAD R0, R8, R11, R0 ; /* 0x0000000b08007224 */
/* 0x000fca00078e0200 */
/*01e0*/ IADD3 R4, R0, UR8, RZ ; /* 0x0000000800047c10 */
/* 0x000fca000fffe0ff */
/*01f0*/ IMAD.WIDE R4, R4, R9, c[0x0][0x178] ; /* 0x00005e0004047625 */
/* 0x000fca00078e0209 */
/*0200*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x008fe2000c101904 */
/*0210*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0220*/ BRA 0x220; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : activation
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ MOV R6, c[0x0][0x170] ; /* 0x00005c0000067a02 */
/* 0x000fe20000000f00 */
/*0020*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff077624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */
/* 0x000fe20000000f00 */
/*0050*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */
/* 0x000fe400078e00ff */
/*0060*/ LDG.E R0, [R6.64+0x18] ; /* 0x0000180406007981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R9, [R6.64+0x10] ; /* 0x0000100406097981 */
/* 0x000ea8000c1e1900 */
/*0080*/ LDG.E.64 R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ee8000c1e1b00 */
/*0090*/ LDG.E R8, [R6.64+0x8] ; /* 0x0000080406087981 */
/* 0x000f22000c1e1900 */
/*00a0*/ S2UR UR6, SR_CTAID.X ; /* 0x00000000000679c3 */
/* 0x000e260000002500 */
/*00b0*/ S2R R11, SR_TID.X ; /* 0x00000000000b7919 */
/* 0x000eea0000002100 */
/*00c0*/ S2UR UR7, SR_CTAID.Y ; /* 0x00000000000779c3 */
/* 0x000e700000002600 */
/*00d0*/ S2UR UR8, SR_CTAID.Z ; /* 0x00000000000879c3 */
/* 0x000f620000002700 */
/*00e0*/ HFMA2.MMA R13, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0d7435 */
/* 0x000fe200000001ff */
/*00f0*/ BSSY B0, 0x270 ; /* 0x0000017000007945 */
/* 0x000fe20003800000 */
/*0100*/ IMAD R10, R0, R9, RZ ; /* 0x00000009000a7224 */
/* 0x004fc800078e02ff */
/*0110*/ IMAD R9, R10, UR6, RZ ; /* 0x000000060a097c24 */
/* 0x001fe2000f8e02ff */
/*0120*/ ISETP.GT.U32.AND P0, PT, R4, R11, PT ; /* 0x0000000b0400720c */
/* 0x008fc60003f04070 */
/*0130*/ IMAD R0, R0, UR7, R9 ; /* 0x0000000700007c24 */
/* 0x002fe2000f8e0209 */
/*0140*/ ISETP.GT.AND.EX P0, PT, R5, RZ, PT, P0 ; /* 0x000000ff0500720c */
/* 0x000fe20003f04300 */
/*0150*/ IMAD R9, R8, R11, RZ ; /* 0x0000000b08097224 */
/* 0x010fc800078e02ff */
/*0160*/ IMAD R0, R10, R9, R0 ; /* 0x000000090a007224 */
/* 0x000fca00078e0200 */
/*0170*/ IADD3 R4, R0, UR8, RZ ; /* 0x0000000800047c10 */
/* 0x020fca000fffe0ff */
/*0180*/ IMAD.WIDE R4, R4, R13, c[0x0][0x178] ; /* 0x00005e0004047625 */
/* 0x000fe200078e020d */
/*0190*/ @!P0 BRA 0x260 ; /* 0x000000c000008947 */
/* 0x000fea0003800000 */
/*01a0*/ LDG.E.64 R6, [R2.64+0x8] ; /* 0x0000080402067981 */
/* 0x000ea4000c1e1b00 */
/*01b0*/ ISETP.GT.U32.AND P0, PT, R6, UR6, PT ; /* 0x0000000606007c0c */
/* 0x004fc8000bf04070 */
/*01c0*/ ISETP.GT.AND.EX P0, PT, R7, RZ, PT, P0 ; /* 0x000000ff0700720c */
/* 0x000fda0003f04300 */
/*01d0*/ @!P0 BRA 0x260 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*01e0*/ LDG.E.64 R8, [R2.64+0x10] ; /* 0x0000100402087981 */
/* 0x000ea4000c1e1b00 */
/*01f0*/ ISETP.GT.U32.AND P0, PT, R8, UR7, PT ; /* 0x0000000708007c0c */
/* 0x004fc8000bf04070 */
/*0200*/ ISETP.GT.AND.EX P0, PT, R9, RZ, PT, P0 ; /* 0x000000ff0900720c */
/* 0x000fda0003f04300 */
/*0210*/ @!P0 BRA 0x260 ; /* 0x0000004000008947 */
/* 0x000fea0003800000 */
/*0220*/ LDG.E.64 R2, [R2.64+0x18] ; /* 0x0000180402027981 */
/* 0x000ea4000c1e1b00 */
/*0230*/ ISETP.GT.U32.AND P0, PT, R2, UR8, PT ; /* 0x0000000802007c0c */
/* 0x004fc8000bf04070 */
/*0240*/ ISETP.GT.AND.EX P0, PT, R3, RZ, PT, P0 ; /* 0x000000ff0300720c */
/* 0x000fda0003f04300 */
/*0250*/ @P0 BRA 0x2a0 ; /* 0x0000004000000947 */
/* 0x000fea0003800000 */
/*0260*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0270*/ MOV R3, c[0x0][0x180] ; /* 0x0000600000037a02 */
/* 0x000fca0000000f00 */
/*0280*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x000fe2000c101904 */
/*0290*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02a0*/ IMAD R8, R2, R8, RZ ; /* 0x0000000802087224 */
/* 0x000fe400078e02ff */
/*02b0*/ IMAD R7, R6, R11, RZ ; /* 0x0000000b06077224 */
/* 0x000fe400078e02ff */
/*02c0*/ IMAD R3, R8, UR6, RZ ; /* 0x0000000608037c24 */
/* 0x000fc8000f8e02ff */
/*02d0*/ IMAD R3, R2, UR7, R3 ; /* 0x0000000702037c24 */
/* 0x000fc8000f8e0203 */
/*02e0*/ IMAD R2, R8, R7, R3 ; /* 0x0000000708027224 */
/* 0x000fca00078e0203 */
/*02f0*/ IADD3 R2, R2, UR8, RZ ; /* 0x0000000802027c10 */
/* 0x000fca000fffe0ff */
/*0300*/ IMAD.WIDE R2, R2, R13, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fcc00078e020d */
/*0310*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*0320*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*0330*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0340*/ BRA 0x340; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected activation
.globl activation
.p2align 8
.type activation,@function
activation:
s_mov_b32 s8, s13
s_clause 0x2
s_load_b64 s[12:13], s[0:1], 0x0
s_load_b64 s[10:11], s[0:1], 0x10
s_load_b32 s20, s[0:1], 0x20
v_mov_b32_e32 v1, 0
s_mov_b32 s2, s15
s_mov_b32 s15, 0
s_waitcnt lgkmcnt(0)
s_load_b64 s[16:17], s[12:13], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[10:11], 0x8
s_load_b64 s[10:11], s[10:11], 0x18
s_waitcnt lgkmcnt(0)
v_cmp_gt_i64_e32 vcc_lo, s[16:17], v[0:1]
v_mov_b32_e32 v1, s20
s_and_saveexec_b32 s5, vcc_lo
s_cbranch_execz .LBB0_5
s_load_b64 s[16:17], s[12:13], 0x8
s_mov_b32 s9, s15
v_mov_b32_e32 v1, s20
s_waitcnt lgkmcnt(0)
v_cmp_le_i64_e64 s3, s[16:17], s[8:9]
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s3
s_cbranch_vccnz .LBB0_5
s_load_b64 s[18:19], s[12:13], 0x10
v_mov_b32_e32 v1, s20
s_waitcnt lgkmcnt(0)
v_cmp_le_i64_e64 s3, s[18:19], s[14:15]
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s3
s_cbranch_vccnz .LBB0_5
s_load_b64 s[12:13], s[12:13], 0x18
s_mov_b32 s3, 0
v_mov_b32_e32 v1, s20
s_waitcnt lgkmcnt(0)
v_cmp_le_i64_e64 s3, s[12:13], s[2:3]
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s3
s_cbranch_vccnz .LBB0_5
v_mul_lo_u32 v1, s16, v0
s_mul_i32 s3, s12, s18
s_load_b64 s[16:17], s[0:1], 0x8
s_mul_i32 s7, s12, s14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v1, v1, s3
s_mul_i32 s3, s8, s3
s_add_i32 s3, s2, s3
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_add3_u32 v1, s3, s7, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s16, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s17, v2, vcc_lo
global_load_b32 v1, v[1:2], off
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s5
v_mul_lo_u32 v0, s4, v0
s_mul_i32 s3, s10, s6
s_load_b64 s[0:1], s[0:1], 0x18
s_mul_i32 s4, s10, s14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v0, v0, s3
s_mul_i32 s3, s8, s3
s_add_i32 s2, s2, s3
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_add3_u32 v2, s2, s4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[2:3], v1, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel activation
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 36
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 21
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size activation, .Lfunc_end0-activation
.section .AMDGPU.csdata,"",@progbits
.text
.protected inputs_gradients
.globl inputs_gradients
.p2align 8
.type inputs_gradients,@function
inputs_gradients:
s_load_b256 s[0:7], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_clause 0x2
s_load_b32 s8, s[0:1], 0x8
s_load_b32 s9, s[0:1], 0x10
s_load_b32 s0, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
v_mul_lo_u32 v1, s8, v0
s_mul_i32 s1, s0, s9
s_mul_i32 s0, s0, s14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v1, v1, s1
s_mul_i32 s1, s13, s1
s_add_i32 s1, s15, s1
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_add3_u32 v1, s1, s0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s2, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
global_load_b32 v2, v[1:2], off
s_clause 0x2
s_load_b32 s0, s[4:5], 0x8
s_load_b32 s1, s[4:5], 0x10
s_load_b32 s2, s[4:5], 0x18
s_waitcnt lgkmcnt(0)
v_mul_lo_u32 v0, s0, v0
s_mul_i32 s0, s2, s1
s_mul_i32 s2, s2, s14
s_mul_i32 s13, s13, s0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_add_i32 s15, s15, s13
v_mul_lo_u32 v0, v0, s0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v0, s15, s2, v0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_u32 v0, vcc_lo, s6, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel inputs_gradients
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size inputs_gradients, .Lfunc_end1-inputs_gradients
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 36
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: activation
.private_segment_fixed_size: 0
.sgpr_count: 23
.sgpr_spill_count: 0
.symbol: activation.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: inputs_gradients
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: inputs_gradients.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_0010e88e_00000000-6_pad_2d.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z10activationPlPfS_S0_fPlPfS_S0_f
.type _Z38__device_stub__Z10activationPlPfS_S0_fPlPfS_S0_f, @function
_Z38__device_stub__Z10activationPlPfS_S0_fPlPfS_S0_f:
.LFB2051:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movss %xmm0, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq activation(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z10activationPlPfS_S0_fPlPfS_S0_f, .-_Z38__device_stub__Z10activationPlPfS_S0_fPlPfS_S0_f
.globl activation
.type activation, @function
activation:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z10activationPlPfS_S0_fPlPfS_S0_f
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size activation, .-activation
.globl _Z43__device_stub__Z16inputs_gradientsPlPfS_S0_PlPfS_S0_
.type _Z43__device_stub__Z16inputs_gradientsPlPfS_S0_PlPfS_S0_, @function
_Z43__device_stub__Z16inputs_gradientsPlPfS_S0_PlPfS_S0_:
.LFB2053:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq inputs_gradients(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z43__device_stub__Z16inputs_gradientsPlPfS_S0_PlPfS_S0_, .-_Z43__device_stub__Z16inputs_gradientsPlPfS_S0_PlPfS_S0_
.globl inputs_gradients
.type inputs_gradients, @function
inputs_gradients:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z16inputs_gradientsPlPfS_S0_PlPfS_S0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size inputs_gradients, .-inputs_gradients
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "inputs_gradients"
.LC1:
.string "activation"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq inputs_gradients(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq activation(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "pad_2d.hip"
.globl __device_stub__activation # -- Begin function __device_stub__activation
.p2align 4, 0x90
.type __device_stub__activation,@function
__device_stub__activation: # @__device_stub__activation
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movss %xmm0, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $activation, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size __device_stub__activation, .Lfunc_end0-__device_stub__activation
.cfi_endproc
# -- End function
.globl __device_stub__inputs_gradients # -- Begin function __device_stub__inputs_gradients
.p2align 4, 0x90
.type __device_stub__inputs_gradients,@function
__device_stub__inputs_gradients: # @__device_stub__inputs_gradients
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %rcx, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $inputs_gradients, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size __device_stub__inputs_gradients, .Lfunc_end1-__device_stub__inputs_gradients
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $activation, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $inputs_gradients, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type activation,@object # @activation
.section .rodata,"a",@progbits
.globl activation
.p2align 3, 0x0
activation:
.quad __device_stub__activation
.size activation, 8
.type inputs_gradients,@object # @inputs_gradients
.globl inputs_gradients
.p2align 3, 0x0
inputs_gradients:
.quad __device_stub__inputs_gradients
.size inputs_gradients, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "activation"
.size .L__unnamed_1, 11
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "inputs_gradients"
.size .L__unnamed_2, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__activation
.addrsig_sym __device_stub__inputs_gradients
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym activation
.addrsig_sym inputs_gradients
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
//#include "opencv2/highgui/highgui.hpp"
//#include <cstdio>
//#include <time.h>
//#include <sstream>
//#include <iostream>
//
//using namespace cv;
//using namespace std;
//
//#define MAX_THREADS_BY_BLOCK 1024
//#define DIM_BLOCK_X 32
//#define DIM_BLOCK_Y 32
//
//__device__ int cuGPos(int y, int x, int cuCols) {
// return y * cuCols + x;
//}
//
//__global__ void cudaConvolutionImage(int *cuPoRows, int *cuPoCols, int *cuInImage, int *cuResImage, int* sizeKer, float* kernel){
// int threadIdGlob = threadIdx.x + blockIdx.x * blockDim.x;
//
// int cuRows = *cuPoRows;
// int cuCols = *cuPoCols;
//
// if (threadIdGlob < cuRows * cuCols) {
// int y = threadIdGlob / cuCols;
// int x = threadIdGlob % cuCols;
// float pixel = 0;
// for (int i = (*sizeKer) / -2; i <= (*sizeKer) / 2; i++) {
// for (int j = (*sizeKer) / -2; j <= (*sizeKer) / 2; j++) {
// if (y + j >= 0 && x + i >= 0) {
// pixel += cuInImage[cuGPos(y + j, x + i, cuCols)] * kernel[(j + (*sizeKer / 2)*(*sizeKer) + (i + (*sizeKer / 2)))];
// }
// }
// }
//
// pixel = pixel > 255 ? 255 : pixel;
// pixel = pixel < 0 ? 0 : pixel;
// cuResImage[cuGPos(y, x, cuCols)] = pixel;
// if (cuResImage[cuGPos(y, x, cuCols)] == 0 && x == 0 && y == 0)
// printf("es igual de cero\n");
// }
//}
//
//Mat generateConvolutionCUDAGrayImage(Mat inMatIn, float** kernel, int siKe, string nameFile) {
// Mat inMatImage = inMatIn.clone();
// float time = 0.0;
// int nRows = inMatImage.rows;
// int nCols = inMatImage.cols;
//
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int* inImage = new int[nRows * nCols];
// float* kerArray = new float[siKe * siKe];
//
// for (int x = 0; x < nCols; x++)
// for (int y = 0; y < nRows; y++)
// inImage[y*nCols + x] = 0;
//
// // split in RedGreenBlue channels
// for (int y = 0; y < nRows; y++){
// for (int x = 0; x < nCols; x++){
// inImage[y * nCols + x] = inMatImage.at<uchar>(y, x);
// }
// }
//
// //gen array1d of kernel
// for (int i = 0; i < siKe; i++)
// for (int j = 0; j< siKe; j++){
// kerArray[i*siKe + j] = kernel[i][j];
// }
//
// int *cuPoRows, *cuPoCols, *cuN, *cuInImage, *cuResImage;
// float* cuKernel;
//
//
// cudaEventRecord(start, 0);
// cudaMalloc((void**)&cuPoRows, sizeof(int));
// cudaMalloc((void**)&cuPoCols, sizeof(int));
// cudaMalloc((void**)&cuN, sizeof(int));
// cudaMalloc((void**)&cuKernel, siKe * siKe * sizeof(float));
// cudaMalloc((void**)&cuInImage, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImage, nCols * nRows * sizeof(int));
//
// cudaMemcpy(cuPoRows, &nRows, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuPoCols, &nCols, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuN, &siKe, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuKernel, kerArray, siKe * siKe * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImage, inImage, nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
//
// int N = nRows * nCols;
// dim3 blockDim(MAX_THREADS_BY_BLOCK, 1, 1);
// dim3 gridDim((N + MAX_THREADS_BY_BLOCK - 1) / MAX_THREADS_BY_BLOCK, 1, 1);
//
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImage, cuResImage, cuN, cuKernel);
//
// cudaMemcpy(inImage, cuResImage, nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
//
// cudaFree(cuPoRows);
// cudaFree(cuPoCols);
// cudaFree(cuN);
// cudaFree(cuKernel);
// cudaFree(cuInImage);
// cudaFree(cuResImage);
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// for (int y = 0; y < nRows; y++)
// for (int x = 0; x < nCols; x++)
// inMatImage.at<uchar>(y, x) = inImage[y*nCols + x];
//
// printf("%f \t", time / 1000.0);
// return inMatImage;
//}
//
//Mat generateConvolutionCUDARGBImage(Mat inMatImage, float** kernel, int siKe, string nameFile){
// float time = 0.0;
// int nRows = inMatImage.rows;
// int nCols = inMatImage.cols;
//
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int** inImage = new int*[3];
// float* kerArray = new float[siKe * siKe];
// for (int i = 0; i < 3; i++)
// inImage[i] = new int[nRows * nCols];
//
// Mat bgr[3];
// split(inMatImage, bgr);
//
// // split in RedGreenBlue channels
// for (int y = 0; y < nRows; y++){
// for (int x = 0; x < nCols; x++){
// inImage[0][y * nCols + x] = bgr[0].at<uchar>(y, x);
// inImage[1][y * nCols + x] = bgr[1].at<uchar>(y, x);
// inImage[2][y * nCols + x] = bgr[2].at<uchar>(y, x);
// }
// }
//
// //gen array1d of kernel
// for (int i = 0; i < siKe; i++)
// for (int j = 0; j< siKe; j++)
// kerArray[i*siKe + j] = kernel[i][j];
//
// int *cuPoRows, *cuPoCols, *cuN;
// int *cuInImageR, *cuInImageG, *cuInImageB;
// int *cuResImageR, *cuResImageG, *cuResImageB;
// float *cuKernel;
//
// cudaEventRecord(start, 0);
// cudaMalloc((void**)&cuPoRows, sizeof(int));
// cudaMalloc((void**)&cuPoCols, sizeof(int));
// cudaMalloc((void**)&cuN, sizeof(int));
// cudaMalloc((void**)&cuKernel, siKe * siKe * sizeof(float));
// cudaMalloc((void**)&cuInImageR, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuInImageG, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuInImageB, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageR, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageG, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageB, nCols * nRows * sizeof(int));
//
// cudaMemcpy(cuPoRows, &nRows, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuPoCols, &nCols, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuN, &siKe, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuKernel, kerArray, siKe * siKe * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageB, inImage[0], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageG, inImage[1], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageR, inImage[2], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
//
// int N = nRows * nCols;
// int nBloq = (N + MAX_THREADS_BY_BLOCK - 1) / MAX_THREADS_BY_BLOCK;
//
// dim3 blockDim(DIM_BLOCK_X, DIM_BLOCK_Y, 1);
// dim3 gridDim(nBloq, 1, 1);
//
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageB, cuResImageB, cuN, cuKernel);
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageG, cuResImageG, cuN, cuKernel);
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageR, cuResImageR, cuN, cuKernel);
//
// cudaMemcpy(cuInImageB, inImage[0], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(cuInImageG, inImage[1], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(cuInImageR, inImage[2], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
//
// cudaFree(cuPoRows);
// cudaFree(cuPoCols);
// cudaFree(cuN);
// cudaFree(cuKernel);
// cudaFree(cuInImageB);
// cudaFree(cuInImageG);
// cudaFree(cuInImageR);
// cudaFree(cuResImageB);
// cudaFree(cuResImageG);
// cudaFree(cuResImageR);
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// for (int y = 0; y < nRows; y++)
// for (int x = 0; x < nCols; x++){
// bgr[0].at<uchar>(y, x) = inImage[0][y*nCols + x];
// bgr[1].at<uchar>(y, x) = inImage[0][y*nCols + x];
// bgr[2].at<uchar>(y, x) = inImage[0][y*nCols + x];
// }
// merge(bgr, 3, inMatImage);
// printf("Convolución de CUDA RGB Image:%s Mascara: %d Tiempo %f\n", nameFile.c_str(), siKe, time / 1000.0);
// return inMatImage;
//}
//
//string intToString(int n){
// ostringstream ss;
// ss << n;
// string a = n <= 9 ? "0" : "";
// return a + ss.str();
//}
//
//
//int main111() {
// string path = "D://temp//convolution//";
// string imagens[15] = { "gordo.jpg"};
// string dirOut[15] = { "tekken"};
//
// // string imagens[2] = {"wallPaper08.jpeg"};
// // string dirOut[1] = {"wallPaper08"};
//
// int arrayKernel[13] = { 3, 5, 9, 13, 19, 25, 31, 39, 47, 57, 67, 75, 85 };
// int nImages = 15, nKernels = 13;
//
// for (int i = 0; i < nImages; i++) {
// Mat inMatGrayImage = imread(path + imagens[i], CV_LOAD_IMAGE_GRAYSCALE);
// printf("%s,%dx%d,%ld \n", imagens[i].c_str(), inMatGrayImage.rows, inMatGrayImage.cols, (long)inMatGrayImage.cols * (long)inMatGrayImage.rows);
// imwrite(path + dirOut[i] + "/grayScale" + imagens[i], inMatGrayImage);
// for (int j = 0; j < nKernels; j++) {
// float **kernel = new float*[arrayKernel[j]];
// for (int k = 0; k < arrayKernel[j]; k++) {
// kernel[k] = new float[arrayKernel[j]];
// for (int l = 0; l < arrayKernel[j]; l++)
// kernel[k][l] = 1.0 / (arrayKernel[j] * arrayKernel[j]);
// }
// Mat outMatImage = generateConvolutionCUDAGrayImage(inMatGrayImage, kernel, arrayKernel[j], imagens[i]);
// string fullpath = path + dirOut[i] + "//grayCuda_" + intToString(arrayKernel[j]) + "_" + imagens[i];
// cout << "kernel : " << arrayKernel[j] << " - fullpath : " << fullpath << endl;
// imwrite(fullpath, outMatImage);
// }
// printf("\n");
// }
//
// return 0;
//}
|
code for sm_80
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
//#include "opencv2/highgui/highgui.hpp"
//#include <cstdio>
//#include <time.h>
//#include <sstream>
//#include <iostream>
//
//using namespace cv;
//using namespace std;
//
//#define MAX_THREADS_BY_BLOCK 1024
//#define DIM_BLOCK_X 32
//#define DIM_BLOCK_Y 32
//
//__device__ int cuGPos(int y, int x, int cuCols) {
// return y * cuCols + x;
//}
//
//__global__ void cudaConvolutionImage(int *cuPoRows, int *cuPoCols, int *cuInImage, int *cuResImage, int* sizeKer, float* kernel){
// int threadIdGlob = threadIdx.x + blockIdx.x * blockDim.x;
//
// int cuRows = *cuPoRows;
// int cuCols = *cuPoCols;
//
// if (threadIdGlob < cuRows * cuCols) {
// int y = threadIdGlob / cuCols;
// int x = threadIdGlob % cuCols;
// float pixel = 0;
// for (int i = (*sizeKer) / -2; i <= (*sizeKer) / 2; i++) {
// for (int j = (*sizeKer) / -2; j <= (*sizeKer) / 2; j++) {
// if (y + j >= 0 && x + i >= 0) {
// pixel += cuInImage[cuGPos(y + j, x + i, cuCols)] * kernel[(j + (*sizeKer / 2)*(*sizeKer) + (i + (*sizeKer / 2)))];
// }
// }
// }
//
// pixel = pixel > 255 ? 255 : pixel;
// pixel = pixel < 0 ? 0 : pixel;
// cuResImage[cuGPos(y, x, cuCols)] = pixel;
// if (cuResImage[cuGPos(y, x, cuCols)] == 0 && x == 0 && y == 0)
// printf("es igual de cero\n");
// }
//}
//
//Mat generateConvolutionCUDAGrayImage(Mat inMatIn, float** kernel, int siKe, string nameFile) {
// Mat inMatImage = inMatIn.clone();
// float time = 0.0;
// int nRows = inMatImage.rows;
// int nCols = inMatImage.cols;
//
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int* inImage = new int[nRows * nCols];
// float* kerArray = new float[siKe * siKe];
//
// for (int x = 0; x < nCols; x++)
// for (int y = 0; y < nRows; y++)
// inImage[y*nCols + x] = 0;
//
// // split in RedGreenBlue channels
// for (int y = 0; y < nRows; y++){
// for (int x = 0; x < nCols; x++){
// inImage[y * nCols + x] = inMatImage.at<uchar>(y, x);
// }
// }
//
// //gen array1d of kernel
// for (int i = 0; i < siKe; i++)
// for (int j = 0; j< siKe; j++){
// kerArray[i*siKe + j] = kernel[i][j];
// }
//
// int *cuPoRows, *cuPoCols, *cuN, *cuInImage, *cuResImage;
// float* cuKernel;
//
//
// cudaEventRecord(start, 0);
// cudaMalloc((void**)&cuPoRows, sizeof(int));
// cudaMalloc((void**)&cuPoCols, sizeof(int));
// cudaMalloc((void**)&cuN, sizeof(int));
// cudaMalloc((void**)&cuKernel, siKe * siKe * sizeof(float));
// cudaMalloc((void**)&cuInImage, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImage, nCols * nRows * sizeof(int));
//
// cudaMemcpy(cuPoRows, &nRows, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuPoCols, &nCols, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuN, &siKe, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuKernel, kerArray, siKe * siKe * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImage, inImage, nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
//
// int N = nRows * nCols;
// dim3 blockDim(MAX_THREADS_BY_BLOCK, 1, 1);
// dim3 gridDim((N + MAX_THREADS_BY_BLOCK - 1) / MAX_THREADS_BY_BLOCK, 1, 1);
//
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImage, cuResImage, cuN, cuKernel);
//
// cudaMemcpy(inImage, cuResImage, nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
//
// cudaFree(cuPoRows);
// cudaFree(cuPoCols);
// cudaFree(cuN);
// cudaFree(cuKernel);
// cudaFree(cuInImage);
// cudaFree(cuResImage);
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// for (int y = 0; y < nRows; y++)
// for (int x = 0; x < nCols; x++)
// inMatImage.at<uchar>(y, x) = inImage[y*nCols + x];
//
// printf("%f \t", time / 1000.0);
// return inMatImage;
//}
//
//Mat generateConvolutionCUDARGBImage(Mat inMatImage, float** kernel, int siKe, string nameFile){
// float time = 0.0;
// int nRows = inMatImage.rows;
// int nCols = inMatImage.cols;
//
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int** inImage = new int*[3];
// float* kerArray = new float[siKe * siKe];
// for (int i = 0; i < 3; i++)
// inImage[i] = new int[nRows * nCols];
//
// Mat bgr[3];
// split(inMatImage, bgr);
//
// // split in RedGreenBlue channels
// for (int y = 0; y < nRows; y++){
// for (int x = 0; x < nCols; x++){
// inImage[0][y * nCols + x] = bgr[0].at<uchar>(y, x);
// inImage[1][y * nCols + x] = bgr[1].at<uchar>(y, x);
// inImage[2][y * nCols + x] = bgr[2].at<uchar>(y, x);
// }
// }
//
// //gen array1d of kernel
// for (int i = 0; i < siKe; i++)
// for (int j = 0; j< siKe; j++)
// kerArray[i*siKe + j] = kernel[i][j];
//
// int *cuPoRows, *cuPoCols, *cuN;
// int *cuInImageR, *cuInImageG, *cuInImageB;
// int *cuResImageR, *cuResImageG, *cuResImageB;
// float *cuKernel;
//
// cudaEventRecord(start, 0);
// cudaMalloc((void**)&cuPoRows, sizeof(int));
// cudaMalloc((void**)&cuPoCols, sizeof(int));
// cudaMalloc((void**)&cuN, sizeof(int));
// cudaMalloc((void**)&cuKernel, siKe * siKe * sizeof(float));
// cudaMalloc((void**)&cuInImageR, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuInImageG, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuInImageB, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageR, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageG, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageB, nCols * nRows * sizeof(int));
//
// cudaMemcpy(cuPoRows, &nRows, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuPoCols, &nCols, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuN, &siKe, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuKernel, kerArray, siKe * siKe * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageB, inImage[0], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageG, inImage[1], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageR, inImage[2], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
//
// int N = nRows * nCols;
// int nBloq = (N + MAX_THREADS_BY_BLOCK - 1) / MAX_THREADS_BY_BLOCK;
//
// dim3 blockDim(DIM_BLOCK_X, DIM_BLOCK_Y, 1);
// dim3 gridDim(nBloq, 1, 1);
//
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageB, cuResImageB, cuN, cuKernel);
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageG, cuResImageG, cuN, cuKernel);
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageR, cuResImageR, cuN, cuKernel);
//
// cudaMemcpy(cuInImageB, inImage[0], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(cuInImageG, inImage[1], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(cuInImageR, inImage[2], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
//
// cudaFree(cuPoRows);
// cudaFree(cuPoCols);
// cudaFree(cuN);
// cudaFree(cuKernel);
// cudaFree(cuInImageB);
// cudaFree(cuInImageG);
// cudaFree(cuInImageR);
// cudaFree(cuResImageB);
// cudaFree(cuResImageG);
// cudaFree(cuResImageR);
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// for (int y = 0; y < nRows; y++)
// for (int x = 0; x < nCols; x++){
// bgr[0].at<uchar>(y, x) = inImage[0][y*nCols + x];
// bgr[1].at<uchar>(y, x) = inImage[0][y*nCols + x];
// bgr[2].at<uchar>(y, x) = inImage[0][y*nCols + x];
// }
// merge(bgr, 3, inMatImage);
// printf("Convolución de CUDA RGB Image:%s Mascara: %d Tiempo %f\n", nameFile.c_str(), siKe, time / 1000.0);
// return inMatImage;
//}
//
//string intToString(int n){
// ostringstream ss;
// ss << n;
// string a = n <= 9 ? "0" : "";
// return a + ss.str();
//}
//
//
//int main111() {
// string path = "D://temp//convolution//";
// string imagens[15] = { "gordo.jpg"};
// string dirOut[15] = { "tekken"};
//
// // string imagens[2] = {"wallPaper08.jpeg"};
// // string dirOut[1] = {"wallPaper08"};
//
// int arrayKernel[13] = { 3, 5, 9, 13, 19, 25, 31, 39, 47, 57, 67, 75, 85 };
// int nImages = 15, nKernels = 13;
//
// for (int i = 0; i < nImages; i++) {
// Mat inMatGrayImage = imread(path + imagens[i], CV_LOAD_IMAGE_GRAYSCALE);
// printf("%s,%dx%d,%ld \n", imagens[i].c_str(), inMatGrayImage.rows, inMatGrayImage.cols, (long)inMatGrayImage.cols * (long)inMatGrayImage.rows);
// imwrite(path + dirOut[i] + "/grayScale" + imagens[i], inMatGrayImage);
// for (int j = 0; j < nKernels; j++) {
// float **kernel = new float*[arrayKernel[j]];
// for (int k = 0; k < arrayKernel[j]; k++) {
// kernel[k] = new float[arrayKernel[j]];
// for (int l = 0; l < arrayKernel[j]; l++)
// kernel[k][l] = 1.0 / (arrayKernel[j] * arrayKernel[j]);
// }
// Mat outMatImage = generateConvolutionCUDAGrayImage(inMatGrayImage, kernel, arrayKernel[j], imagens[i]);
// string fullpath = path + dirOut[i] + "//grayCuda_" + intToString(arrayKernel[j]) + "_" + imagens[i];
// cout << "kernel : " << arrayKernel[j] << " - fullpath : " << fullpath << endl;
// imwrite(fullpath, outMatImage);
// }
// printf("\n");
// }
//
// return 0;
//}
|
.file "tmpxft_000c36af_00000000-6_chelo.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
//#include "opencv2/highgui/highgui.hpp"
//#include <cstdio>
//#include <time.h>
//#include <sstream>
//#include <iostream>
//
//using namespace cv;
//using namespace std;
//
//#define MAX_THREADS_BY_BLOCK 1024
//#define DIM_BLOCK_X 32
//#define DIM_BLOCK_Y 32
//
//__device__ int cuGPos(int y, int x, int cuCols) {
// return y * cuCols + x;
//}
//
//__global__ void cudaConvolutionImage(int *cuPoRows, int *cuPoCols, int *cuInImage, int *cuResImage, int* sizeKer, float* kernel){
// int threadIdGlob = threadIdx.x + blockIdx.x * blockDim.x;
//
// int cuRows = *cuPoRows;
// int cuCols = *cuPoCols;
//
// if (threadIdGlob < cuRows * cuCols) {
// int y = threadIdGlob / cuCols;
// int x = threadIdGlob % cuCols;
// float pixel = 0;
// for (int i = (*sizeKer) / -2; i <= (*sizeKer) / 2; i++) {
// for (int j = (*sizeKer) / -2; j <= (*sizeKer) / 2; j++) {
// if (y + j >= 0 && x + i >= 0) {
// pixel += cuInImage[cuGPos(y + j, x + i, cuCols)] * kernel[(j + (*sizeKer / 2)*(*sizeKer) + (i + (*sizeKer / 2)))];
// }
// }
// }
//
// pixel = pixel > 255 ? 255 : pixel;
// pixel = pixel < 0 ? 0 : pixel;
// cuResImage[cuGPos(y, x, cuCols)] = pixel;
// if (cuResImage[cuGPos(y, x, cuCols)] == 0 && x == 0 && y == 0)
// printf("es igual de cero\n");
// }
//}
//
//Mat generateConvolutionCUDAGrayImage(Mat inMatIn, float** kernel, int siKe, string nameFile) {
// Mat inMatImage = inMatIn.clone();
// float time = 0.0;
// int nRows = inMatImage.rows;
// int nCols = inMatImage.cols;
//
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int* inImage = new int[nRows * nCols];
// float* kerArray = new float[siKe * siKe];
//
// for (int x = 0; x < nCols; x++)
// for (int y = 0; y < nRows; y++)
// inImage[y*nCols + x] = 0;
//
// // split in RedGreenBlue channels
// for (int y = 0; y < nRows; y++){
// for (int x = 0; x < nCols; x++){
// inImage[y * nCols + x] = inMatImage.at<uchar>(y, x);
// }
// }
//
// //gen array1d of kernel
// for (int i = 0; i < siKe; i++)
// for (int j = 0; j< siKe; j++){
// kerArray[i*siKe + j] = kernel[i][j];
// }
//
// int *cuPoRows, *cuPoCols, *cuN, *cuInImage, *cuResImage;
// float* cuKernel;
//
//
// cudaEventRecord(start, 0);
// cudaMalloc((void**)&cuPoRows, sizeof(int));
// cudaMalloc((void**)&cuPoCols, sizeof(int));
// cudaMalloc((void**)&cuN, sizeof(int));
// cudaMalloc((void**)&cuKernel, siKe * siKe * sizeof(float));
// cudaMalloc((void**)&cuInImage, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImage, nCols * nRows * sizeof(int));
//
// cudaMemcpy(cuPoRows, &nRows, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuPoCols, &nCols, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuN, &siKe, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuKernel, kerArray, siKe * siKe * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImage, inImage, nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
//
// int N = nRows * nCols;
// dim3 blockDim(MAX_THREADS_BY_BLOCK, 1, 1);
// dim3 gridDim((N + MAX_THREADS_BY_BLOCK - 1) / MAX_THREADS_BY_BLOCK, 1, 1);
//
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImage, cuResImage, cuN, cuKernel);
//
// cudaMemcpy(inImage, cuResImage, nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
//
// cudaFree(cuPoRows);
// cudaFree(cuPoCols);
// cudaFree(cuN);
// cudaFree(cuKernel);
// cudaFree(cuInImage);
// cudaFree(cuResImage);
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// for (int y = 0; y < nRows; y++)
// for (int x = 0; x < nCols; x++)
// inMatImage.at<uchar>(y, x) = inImage[y*nCols + x];
//
// printf("%f \t", time / 1000.0);
// return inMatImage;
//}
//
//Mat generateConvolutionCUDARGBImage(Mat inMatImage, float** kernel, int siKe, string nameFile){
// float time = 0.0;
// int nRows = inMatImage.rows;
// int nCols = inMatImage.cols;
//
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int** inImage = new int*[3];
// float* kerArray = new float[siKe * siKe];
// for (int i = 0; i < 3; i++)
// inImage[i] = new int[nRows * nCols];
//
// Mat bgr[3];
// split(inMatImage, bgr);
//
// // split in RedGreenBlue channels
// for (int y = 0; y < nRows; y++){
// for (int x = 0; x < nCols; x++){
// inImage[0][y * nCols + x] = bgr[0].at<uchar>(y, x);
// inImage[1][y * nCols + x] = bgr[1].at<uchar>(y, x);
// inImage[2][y * nCols + x] = bgr[2].at<uchar>(y, x);
// }
// }
//
// //gen array1d of kernel
// for (int i = 0; i < siKe; i++)
// for (int j = 0; j< siKe; j++)
// kerArray[i*siKe + j] = kernel[i][j];
//
// int *cuPoRows, *cuPoCols, *cuN;
// int *cuInImageR, *cuInImageG, *cuInImageB;
// int *cuResImageR, *cuResImageG, *cuResImageB;
// float *cuKernel;
//
// cudaEventRecord(start, 0);
// cudaMalloc((void**)&cuPoRows, sizeof(int));
// cudaMalloc((void**)&cuPoCols, sizeof(int));
// cudaMalloc((void**)&cuN, sizeof(int));
// cudaMalloc((void**)&cuKernel, siKe * siKe * sizeof(float));
// cudaMalloc((void**)&cuInImageR, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuInImageG, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuInImageB, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageR, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageG, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageB, nCols * nRows * sizeof(int));
//
// cudaMemcpy(cuPoRows, &nRows, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuPoCols, &nCols, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuN, &siKe, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuKernel, kerArray, siKe * siKe * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageB, inImage[0], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageG, inImage[1], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageR, inImage[2], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
//
// int N = nRows * nCols;
// int nBloq = (N + MAX_THREADS_BY_BLOCK - 1) / MAX_THREADS_BY_BLOCK;
//
// dim3 blockDim(DIM_BLOCK_X, DIM_BLOCK_Y, 1);
// dim3 gridDim(nBloq, 1, 1);
//
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageB, cuResImageB, cuN, cuKernel);
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageG, cuResImageG, cuN, cuKernel);
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageR, cuResImageR, cuN, cuKernel);
//
// cudaMemcpy(cuInImageB, inImage[0], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(cuInImageG, inImage[1], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(cuInImageR, inImage[2], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
//
// cudaFree(cuPoRows);
// cudaFree(cuPoCols);
// cudaFree(cuN);
// cudaFree(cuKernel);
// cudaFree(cuInImageB);
// cudaFree(cuInImageG);
// cudaFree(cuInImageR);
// cudaFree(cuResImageB);
// cudaFree(cuResImageG);
// cudaFree(cuResImageR);
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// for (int y = 0; y < nRows; y++)
// for (int x = 0; x < nCols; x++){
// bgr[0].at<uchar>(y, x) = inImage[0][y*nCols + x];
// bgr[1].at<uchar>(y, x) = inImage[0][y*nCols + x];
// bgr[2].at<uchar>(y, x) = inImage[0][y*nCols + x];
// }
// merge(bgr, 3, inMatImage);
// printf("Convolución de CUDA RGB Image:%s Mascara: %d Tiempo %f\n", nameFile.c_str(), siKe, time / 1000.0);
// return inMatImage;
//}
//
//string intToString(int n){
// ostringstream ss;
// ss << n;
// string a = n <= 9 ? "0" : "";
// return a + ss.str();
//}
//
//
//int main111() {
// string path = "D://temp//convolution//";
// string imagens[15] = { "gordo.jpg"};
// string dirOut[15] = { "tekken"};
//
// // string imagens[2] = {"wallPaper08.jpeg"};
// // string dirOut[1] = {"wallPaper08"};
//
// int arrayKernel[13] = { 3, 5, 9, 13, 19, 25, 31, 39, 47, 57, 67, 75, 85 };
// int nImages = 15, nKernels = 13;
//
// for (int i = 0; i < nImages; i++) {
// Mat inMatGrayImage = imread(path + imagens[i], CV_LOAD_IMAGE_GRAYSCALE);
// printf("%s,%dx%d,%ld \n", imagens[i].c_str(), inMatGrayImage.rows, inMatGrayImage.cols, (long)inMatGrayImage.cols * (long)inMatGrayImage.rows);
// imwrite(path + dirOut[i] + "/grayScale" + imagens[i], inMatGrayImage);
// for (int j = 0; j < nKernels; j++) {
// float **kernel = new float*[arrayKernel[j]];
// for (int k = 0; k < arrayKernel[j]; k++) {
// kernel[k] = new float[arrayKernel[j]];
// for (int l = 0; l < arrayKernel[j]; l++)
// kernel[k][l] = 1.0 / (arrayKernel[j] * arrayKernel[j]);
// }
// Mat outMatImage = generateConvolutionCUDAGrayImage(inMatGrayImage, kernel, arrayKernel[j], imagens[i]);
// string fullpath = path + dirOut[i] + "//grayCuda_" + intToString(arrayKernel[j]) + "_" + imagens[i];
// cout << "kernel : " << arrayKernel[j] << " - fullpath : " << fullpath << endl;
// imwrite(fullpath, outMatImage);
// }
// printf("\n");
// }
//
// return 0;
//}
|
#include <hip/hip_runtime.h>
//#include "opencv2/highgui/highgui.hpp"
//#include <cstdio>
//#include <time.h>
//#include <sstream>
//#include <iostream>
//
//using namespace cv;
//using namespace std;
//
//#define MAX_THREADS_BY_BLOCK 1024
//#define DIM_BLOCK_X 32
//#define DIM_BLOCK_Y 32
//
//__device__ int cuGPos(int y, int x, int cuCols) {
// return y * cuCols + x;
//}
//
//__global__ void cudaConvolutionImage(int *cuPoRows, int *cuPoCols, int *cuInImage, int *cuResImage, int* sizeKer, float* kernel){
// int threadIdGlob = threadIdx.x + blockIdx.x * blockDim.x;
//
// int cuRows = *cuPoRows;
// int cuCols = *cuPoCols;
//
// if (threadIdGlob < cuRows * cuCols) {
// int y = threadIdGlob / cuCols;
// int x = threadIdGlob % cuCols;
// float pixel = 0;
// for (int i = (*sizeKer) / -2; i <= (*sizeKer) / 2; i++) {
// for (int j = (*sizeKer) / -2; j <= (*sizeKer) / 2; j++) {
// if (y + j >= 0 && x + i >= 0) {
// pixel += cuInImage[cuGPos(y + j, x + i, cuCols)] * kernel[(j + (*sizeKer / 2)*(*sizeKer) + (i + (*sizeKer / 2)))];
// }
// }
// }
//
// pixel = pixel > 255 ? 255 : pixel;
// pixel = pixel < 0 ? 0 : pixel;
// cuResImage[cuGPos(y, x, cuCols)] = pixel;
// if (cuResImage[cuGPos(y, x, cuCols)] == 0 && x == 0 && y == 0)
// printf("es igual de cero\n");
// }
//}
//
//Mat generateConvolutionCUDAGrayImage(Mat inMatIn, float** kernel, int siKe, string nameFile) {
// Mat inMatImage = inMatIn.clone();
// float time = 0.0;
// int nRows = inMatImage.rows;
// int nCols = inMatImage.cols;
//
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int* inImage = new int[nRows * nCols];
// float* kerArray = new float[siKe * siKe];
//
// for (int x = 0; x < nCols; x++)
// for (int y = 0; y < nRows; y++)
// inImage[y*nCols + x] = 0;
//
// // split in RedGreenBlue channels
// for (int y = 0; y < nRows; y++){
// for (int x = 0; x < nCols; x++){
// inImage[y * nCols + x] = inMatImage.at<uchar>(y, x);
// }
// }
//
// //gen array1d of kernel
// for (int i = 0; i < siKe; i++)
// for (int j = 0; j< siKe; j++){
// kerArray[i*siKe + j] = kernel[i][j];
// }
//
// int *cuPoRows, *cuPoCols, *cuN, *cuInImage, *cuResImage;
// float* cuKernel;
//
//
// cudaEventRecord(start, 0);
// cudaMalloc((void**)&cuPoRows, sizeof(int));
// cudaMalloc((void**)&cuPoCols, sizeof(int));
// cudaMalloc((void**)&cuN, sizeof(int));
// cudaMalloc((void**)&cuKernel, siKe * siKe * sizeof(float));
// cudaMalloc((void**)&cuInImage, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImage, nCols * nRows * sizeof(int));
//
// cudaMemcpy(cuPoRows, &nRows, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuPoCols, &nCols, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuN, &siKe, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuKernel, kerArray, siKe * siKe * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImage, inImage, nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
//
// int N = nRows * nCols;
// dim3 blockDim(MAX_THREADS_BY_BLOCK, 1, 1);
// dim3 gridDim((N + MAX_THREADS_BY_BLOCK - 1) / MAX_THREADS_BY_BLOCK, 1, 1);
//
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImage, cuResImage, cuN, cuKernel);
//
// cudaMemcpy(inImage, cuResImage, nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
//
// cudaFree(cuPoRows);
// cudaFree(cuPoCols);
// cudaFree(cuN);
// cudaFree(cuKernel);
// cudaFree(cuInImage);
// cudaFree(cuResImage);
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// for (int y = 0; y < nRows; y++)
// for (int x = 0; x < nCols; x++)
// inMatImage.at<uchar>(y, x) = inImage[y*nCols + x];
//
// printf("%f \t", time / 1000.0);
// return inMatImage;
//}
//
//Mat generateConvolutionCUDARGBImage(Mat inMatImage, float** kernel, int siKe, string nameFile){
// float time = 0.0;
// int nRows = inMatImage.rows;
// int nCols = inMatImage.cols;
//
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int** inImage = new int*[3];
// float* kerArray = new float[siKe * siKe];
// for (int i = 0; i < 3; i++)
// inImage[i] = new int[nRows * nCols];
//
// Mat bgr[3];
// split(inMatImage, bgr);
//
// // split in RedGreenBlue channels
// for (int y = 0; y < nRows; y++){
// for (int x = 0; x < nCols; x++){
// inImage[0][y * nCols + x] = bgr[0].at<uchar>(y, x);
// inImage[1][y * nCols + x] = bgr[1].at<uchar>(y, x);
// inImage[2][y * nCols + x] = bgr[2].at<uchar>(y, x);
// }
// }
//
// //gen array1d of kernel
// for (int i = 0; i < siKe; i++)
// for (int j = 0; j< siKe; j++)
// kerArray[i*siKe + j] = kernel[i][j];
//
// int *cuPoRows, *cuPoCols, *cuN;
// int *cuInImageR, *cuInImageG, *cuInImageB;
// int *cuResImageR, *cuResImageG, *cuResImageB;
// float *cuKernel;
//
// cudaEventRecord(start, 0);
// cudaMalloc((void**)&cuPoRows, sizeof(int));
// cudaMalloc((void**)&cuPoCols, sizeof(int));
// cudaMalloc((void**)&cuN, sizeof(int));
// cudaMalloc((void**)&cuKernel, siKe * siKe * sizeof(float));
// cudaMalloc((void**)&cuInImageR, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuInImageG, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuInImageB, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageR, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageG, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageB, nCols * nRows * sizeof(int));
//
// cudaMemcpy(cuPoRows, &nRows, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuPoCols, &nCols, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuN, &siKe, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuKernel, kerArray, siKe * siKe * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageB, inImage[0], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageG, inImage[1], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageR, inImage[2], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
//
// int N = nRows * nCols;
// int nBloq = (N + MAX_THREADS_BY_BLOCK - 1) / MAX_THREADS_BY_BLOCK;
//
// dim3 blockDim(DIM_BLOCK_X, DIM_BLOCK_Y, 1);
// dim3 gridDim(nBloq, 1, 1);
//
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageB, cuResImageB, cuN, cuKernel);
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageG, cuResImageG, cuN, cuKernel);
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageR, cuResImageR, cuN, cuKernel);
//
// cudaMemcpy(cuInImageB, inImage[0], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(cuInImageG, inImage[1], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(cuInImageR, inImage[2], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
//
// cudaFree(cuPoRows);
// cudaFree(cuPoCols);
// cudaFree(cuN);
// cudaFree(cuKernel);
// cudaFree(cuInImageB);
// cudaFree(cuInImageG);
// cudaFree(cuInImageR);
// cudaFree(cuResImageB);
// cudaFree(cuResImageG);
// cudaFree(cuResImageR);
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// for (int y = 0; y < nRows; y++)
// for (int x = 0; x < nCols; x++){
// bgr[0].at<uchar>(y, x) = inImage[0][y*nCols + x];
// bgr[1].at<uchar>(y, x) = inImage[0][y*nCols + x];
// bgr[2].at<uchar>(y, x) = inImage[0][y*nCols + x];
// }
// merge(bgr, 3, inMatImage);
// printf("Convolución de CUDA RGB Image:%s Mascara: %d Tiempo %f\n", nameFile.c_str(), siKe, time / 1000.0);
// return inMatImage;
//}
//
//string intToString(int n){
// ostringstream ss;
// ss << n;
// string a = n <= 9 ? "0" : "";
// return a + ss.str();
//}
//
//
//int main111() {
// string path = "D://temp//convolution//";
// string imagens[15] = { "gordo.jpg"};
// string dirOut[15] = { "tekken"};
//
// // string imagens[2] = {"wallPaper08.jpeg"};
// // string dirOut[1] = {"wallPaper08"};
//
// int arrayKernel[13] = { 3, 5, 9, 13, 19, 25, 31, 39, 47, 57, 67, 75, 85 };
// int nImages = 15, nKernels = 13;
//
// for (int i = 0; i < nImages; i++) {
// Mat inMatGrayImage = imread(path + imagens[i], CV_LOAD_IMAGE_GRAYSCALE);
// printf("%s,%dx%d,%ld \n", imagens[i].c_str(), inMatGrayImage.rows, inMatGrayImage.cols, (long)inMatGrayImage.cols * (long)inMatGrayImage.rows);
// imwrite(path + dirOut[i] + "/grayScale" + imagens[i], inMatGrayImage);
// for (int j = 0; j < nKernels; j++) {
// float **kernel = new float*[arrayKernel[j]];
// for (int k = 0; k < arrayKernel[j]; k++) {
// kernel[k] = new float[arrayKernel[j]];
// for (int l = 0; l < arrayKernel[j]; l++)
// kernel[k][l] = 1.0 / (arrayKernel[j] * arrayKernel[j]);
// }
// Mat outMatImage = generateConvolutionCUDAGrayImage(inMatGrayImage, kernel, arrayKernel[j], imagens[i]);
// string fullpath = path + dirOut[i] + "//grayCuda_" + intToString(arrayKernel[j]) + "_" + imagens[i];
// cout << "kernel : " << arrayKernel[j] << " - fullpath : " << fullpath << endl;
// imwrite(fullpath, outMatImage);
// }
// printf("\n");
// }
//
// return 0;
//}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
//#include "opencv2/highgui/highgui.hpp"
//#include <cstdio>
//#include <time.h>
//#include <sstream>
//#include <iostream>
//
//using namespace cv;
//using namespace std;
//
//#define MAX_THREADS_BY_BLOCK 1024
//#define DIM_BLOCK_X 32
//#define DIM_BLOCK_Y 32
//
//__device__ int cuGPos(int y, int x, int cuCols) {
// return y * cuCols + x;
//}
//
//__global__ void cudaConvolutionImage(int *cuPoRows, int *cuPoCols, int *cuInImage, int *cuResImage, int* sizeKer, float* kernel){
// int threadIdGlob = threadIdx.x + blockIdx.x * blockDim.x;
//
// int cuRows = *cuPoRows;
// int cuCols = *cuPoCols;
//
// if (threadIdGlob < cuRows * cuCols) {
// int y = threadIdGlob / cuCols;
// int x = threadIdGlob % cuCols;
// float pixel = 0;
// for (int i = (*sizeKer) / -2; i <= (*sizeKer) / 2; i++) {
// for (int j = (*sizeKer) / -2; j <= (*sizeKer) / 2; j++) {
// if (y + j >= 0 && x + i >= 0) {
// pixel += cuInImage[cuGPos(y + j, x + i, cuCols)] * kernel[(j + (*sizeKer / 2)*(*sizeKer) + (i + (*sizeKer / 2)))];
// }
// }
// }
//
// pixel = pixel > 255 ? 255 : pixel;
// pixel = pixel < 0 ? 0 : pixel;
// cuResImage[cuGPos(y, x, cuCols)] = pixel;
// if (cuResImage[cuGPos(y, x, cuCols)] == 0 && x == 0 && y == 0)
// printf("es igual de cero\n");
// }
//}
//
//Mat generateConvolutionCUDAGrayImage(Mat inMatIn, float** kernel, int siKe, string nameFile) {
// Mat inMatImage = inMatIn.clone();
// float time = 0.0;
// int nRows = inMatImage.rows;
// int nCols = inMatImage.cols;
//
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int* inImage = new int[nRows * nCols];
// float* kerArray = new float[siKe * siKe];
//
// for (int x = 0; x < nCols; x++)
// for (int y = 0; y < nRows; y++)
// inImage[y*nCols + x] = 0;
//
// // split in RedGreenBlue channels
// for (int y = 0; y < nRows; y++){
// for (int x = 0; x < nCols; x++){
// inImage[y * nCols + x] = inMatImage.at<uchar>(y, x);
// }
// }
//
// //gen array1d of kernel
// for (int i = 0; i < siKe; i++)
// for (int j = 0; j< siKe; j++){
// kerArray[i*siKe + j] = kernel[i][j];
// }
//
// int *cuPoRows, *cuPoCols, *cuN, *cuInImage, *cuResImage;
// float* cuKernel;
//
//
// cudaEventRecord(start, 0);
// cudaMalloc((void**)&cuPoRows, sizeof(int));
// cudaMalloc((void**)&cuPoCols, sizeof(int));
// cudaMalloc((void**)&cuN, sizeof(int));
// cudaMalloc((void**)&cuKernel, siKe * siKe * sizeof(float));
// cudaMalloc((void**)&cuInImage, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImage, nCols * nRows * sizeof(int));
//
// cudaMemcpy(cuPoRows, &nRows, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuPoCols, &nCols, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuN, &siKe, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuKernel, kerArray, siKe * siKe * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImage, inImage, nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
//
// int N = nRows * nCols;
// dim3 blockDim(MAX_THREADS_BY_BLOCK, 1, 1);
// dim3 gridDim((N + MAX_THREADS_BY_BLOCK - 1) / MAX_THREADS_BY_BLOCK, 1, 1);
//
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImage, cuResImage, cuN, cuKernel);
//
// cudaMemcpy(inImage, cuResImage, nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
//
// cudaFree(cuPoRows);
// cudaFree(cuPoCols);
// cudaFree(cuN);
// cudaFree(cuKernel);
// cudaFree(cuInImage);
// cudaFree(cuResImage);
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// for (int y = 0; y < nRows; y++)
// for (int x = 0; x < nCols; x++)
// inMatImage.at<uchar>(y, x) = inImage[y*nCols + x];
//
// printf("%f \t", time / 1000.0);
// return inMatImage;
//}
//
//Mat generateConvolutionCUDARGBImage(Mat inMatImage, float** kernel, int siKe, string nameFile){
// float time = 0.0;
// int nRows = inMatImage.rows;
// int nCols = inMatImage.cols;
//
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int** inImage = new int*[3];
// float* kerArray = new float[siKe * siKe];
// for (int i = 0; i < 3; i++)
// inImage[i] = new int[nRows * nCols];
//
// Mat bgr[3];
// split(inMatImage, bgr);
//
// // split in RedGreenBlue channels
// for (int y = 0; y < nRows; y++){
// for (int x = 0; x < nCols; x++){
// inImage[0][y * nCols + x] = bgr[0].at<uchar>(y, x);
// inImage[1][y * nCols + x] = bgr[1].at<uchar>(y, x);
// inImage[2][y * nCols + x] = bgr[2].at<uchar>(y, x);
// }
// }
//
// //gen array1d of kernel
// for (int i = 0; i < siKe; i++)
// for (int j = 0; j< siKe; j++)
// kerArray[i*siKe + j] = kernel[i][j];
//
// int *cuPoRows, *cuPoCols, *cuN;
// int *cuInImageR, *cuInImageG, *cuInImageB;
// int *cuResImageR, *cuResImageG, *cuResImageB;
// float *cuKernel;
//
// cudaEventRecord(start, 0);
// cudaMalloc((void**)&cuPoRows, sizeof(int));
// cudaMalloc((void**)&cuPoCols, sizeof(int));
// cudaMalloc((void**)&cuN, sizeof(int));
// cudaMalloc((void**)&cuKernel, siKe * siKe * sizeof(float));
// cudaMalloc((void**)&cuInImageR, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuInImageG, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuInImageB, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageR, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageG, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageB, nCols * nRows * sizeof(int));
//
// cudaMemcpy(cuPoRows, &nRows, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuPoCols, &nCols, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuN, &siKe, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuKernel, kerArray, siKe * siKe * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageB, inImage[0], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageG, inImage[1], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageR, inImage[2], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
//
// int N = nRows * nCols;
// int nBloq = (N + MAX_THREADS_BY_BLOCK - 1) / MAX_THREADS_BY_BLOCK;
//
// dim3 blockDim(DIM_BLOCK_X, DIM_BLOCK_Y, 1);
// dim3 gridDim(nBloq, 1, 1);
//
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageB, cuResImageB, cuN, cuKernel);
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageG, cuResImageG, cuN, cuKernel);
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageR, cuResImageR, cuN, cuKernel);
//
// cudaMemcpy(cuInImageB, inImage[0], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(cuInImageG, inImage[1], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(cuInImageR, inImage[2], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
//
// cudaFree(cuPoRows);
// cudaFree(cuPoCols);
// cudaFree(cuN);
// cudaFree(cuKernel);
// cudaFree(cuInImageB);
// cudaFree(cuInImageG);
// cudaFree(cuInImageR);
// cudaFree(cuResImageB);
// cudaFree(cuResImageG);
// cudaFree(cuResImageR);
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// for (int y = 0; y < nRows; y++)
// for (int x = 0; x < nCols; x++){
// bgr[0].at<uchar>(y, x) = inImage[0][y*nCols + x];
// bgr[1].at<uchar>(y, x) = inImage[0][y*nCols + x];
// bgr[2].at<uchar>(y, x) = inImage[0][y*nCols + x];
// }
// merge(bgr, 3, inMatImage);
// printf("Convolución de CUDA RGB Image:%s Mascara: %d Tiempo %f\n", nameFile.c_str(), siKe, time / 1000.0);
// return inMatImage;
//}
//
//string intToString(int n){
// ostringstream ss;
// ss << n;
// string a = n <= 9 ? "0" : "";
// return a + ss.str();
//}
//
//
//int main111() {
// string path = "D://temp//convolution//";
// string imagens[15] = { "gordo.jpg"};
// string dirOut[15] = { "tekken"};
//
// // string imagens[2] = {"wallPaper08.jpeg"};
// // string dirOut[1] = {"wallPaper08"};
//
// int arrayKernel[13] = { 3, 5, 9, 13, 19, 25, 31, 39, 47, 57, 67, 75, 85 };
// int nImages = 15, nKernels = 13;
//
// for (int i = 0; i < nImages; i++) {
// Mat inMatGrayImage = imread(path + imagens[i], CV_LOAD_IMAGE_GRAYSCALE);
// printf("%s,%dx%d,%ld \n", imagens[i].c_str(), inMatGrayImage.rows, inMatGrayImage.cols, (long)inMatGrayImage.cols * (long)inMatGrayImage.rows);
// imwrite(path + dirOut[i] + "/grayScale" + imagens[i], inMatGrayImage);
// for (int j = 0; j < nKernels; j++) {
// float **kernel = new float*[arrayKernel[j]];
// for (int k = 0; k < arrayKernel[j]; k++) {
// kernel[k] = new float[arrayKernel[j]];
// for (int l = 0; l < arrayKernel[j]; l++)
// kernel[k][l] = 1.0 / (arrayKernel[j] * arrayKernel[j]);
// }
// Mat outMatImage = generateConvolutionCUDAGrayImage(inMatGrayImage, kernel, arrayKernel[j], imagens[i]);
// string fullpath = path + dirOut[i] + "//grayCuda_" + intToString(arrayKernel[j]) + "_" + imagens[i];
// cout << "kernel : " << arrayKernel[j] << " - fullpath : " << fullpath << endl;
// imwrite(fullpath, outMatImage);
// }
// printf("\n");
// }
//
// return 0;
//}
|
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
//#include "opencv2/highgui/highgui.hpp"
//#include <cstdio>
//#include <time.h>
//#include <sstream>
//#include <iostream>
//
//using namespace cv;
//using namespace std;
//
//#define MAX_THREADS_BY_BLOCK 1024
//#define DIM_BLOCK_X 32
//#define DIM_BLOCK_Y 32
//
//__device__ int cuGPos(int y, int x, int cuCols) {
// return y * cuCols + x;
//}
//
//__global__ void cudaConvolutionImage(int *cuPoRows, int *cuPoCols, int *cuInImage, int *cuResImage, int* sizeKer, float* kernel){
// int threadIdGlob = threadIdx.x + blockIdx.x * blockDim.x;
//
// int cuRows = *cuPoRows;
// int cuCols = *cuPoCols;
//
// if (threadIdGlob < cuRows * cuCols) {
// int y = threadIdGlob / cuCols;
// int x = threadIdGlob % cuCols;
// float pixel = 0;
// for (int i = (*sizeKer) / -2; i <= (*sizeKer) / 2; i++) {
// for (int j = (*sizeKer) / -2; j <= (*sizeKer) / 2; j++) {
// if (y + j >= 0 && x + i >= 0) {
// pixel += cuInImage[cuGPos(y + j, x + i, cuCols)] * kernel[(j + (*sizeKer / 2)*(*sizeKer) + (i + (*sizeKer / 2)))];
// }
// }
// }
//
// pixel = pixel > 255 ? 255 : pixel;
// pixel = pixel < 0 ? 0 : pixel;
// cuResImage[cuGPos(y, x, cuCols)] = pixel;
// if (cuResImage[cuGPos(y, x, cuCols)] == 0 && x == 0 && y == 0)
// printf("es igual de cero\n");
// }
//}
//
//Mat generateConvolutionCUDAGrayImage(Mat inMatIn, float** kernel, int siKe, string nameFile) {
// Mat inMatImage = inMatIn.clone();
// float time = 0.0;
// int nRows = inMatImage.rows;
// int nCols = inMatImage.cols;
//
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int* inImage = new int[nRows * nCols];
// float* kerArray = new float[siKe * siKe];
//
// for (int x = 0; x < nCols; x++)
// for (int y = 0; y < nRows; y++)
// inImage[y*nCols + x] = 0;
//
// // split in RedGreenBlue channels
// for (int y = 0; y < nRows; y++){
// for (int x = 0; x < nCols; x++){
// inImage[y * nCols + x] = inMatImage.at<uchar>(y, x);
// }
// }
//
// //gen array1d of kernel
// for (int i = 0; i < siKe; i++)
// for (int j = 0; j< siKe; j++){
// kerArray[i*siKe + j] = kernel[i][j];
// }
//
// int *cuPoRows, *cuPoCols, *cuN, *cuInImage, *cuResImage;
// float* cuKernel;
//
//
// cudaEventRecord(start, 0);
// cudaMalloc((void**)&cuPoRows, sizeof(int));
// cudaMalloc((void**)&cuPoCols, sizeof(int));
// cudaMalloc((void**)&cuN, sizeof(int));
// cudaMalloc((void**)&cuKernel, siKe * siKe * sizeof(float));
// cudaMalloc((void**)&cuInImage, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImage, nCols * nRows * sizeof(int));
//
// cudaMemcpy(cuPoRows, &nRows, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuPoCols, &nCols, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuN, &siKe, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuKernel, kerArray, siKe * siKe * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImage, inImage, nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
//
// int N = nRows * nCols;
// dim3 blockDim(MAX_THREADS_BY_BLOCK, 1, 1);
// dim3 gridDim((N + MAX_THREADS_BY_BLOCK - 1) / MAX_THREADS_BY_BLOCK, 1, 1);
//
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImage, cuResImage, cuN, cuKernel);
//
// cudaMemcpy(inImage, cuResImage, nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
//
// cudaFree(cuPoRows);
// cudaFree(cuPoCols);
// cudaFree(cuN);
// cudaFree(cuKernel);
// cudaFree(cuInImage);
// cudaFree(cuResImage);
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// for (int y = 0; y < nRows; y++)
// for (int x = 0; x < nCols; x++)
// inMatImage.at<uchar>(y, x) = inImage[y*nCols + x];
//
// printf("%f \t", time / 1000.0);
// return inMatImage;
//}
//
//Mat generateConvolutionCUDARGBImage(Mat inMatImage, float** kernel, int siKe, string nameFile){
// float time = 0.0;
// int nRows = inMatImage.rows;
// int nCols = inMatImage.cols;
//
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int** inImage = new int*[3];
// float* kerArray = new float[siKe * siKe];
// for (int i = 0; i < 3; i++)
// inImage[i] = new int[nRows * nCols];
//
// Mat bgr[3];
// split(inMatImage, bgr);
//
// // split in RedGreenBlue channels
// for (int y = 0; y < nRows; y++){
// for (int x = 0; x < nCols; x++){
// inImage[0][y * nCols + x] = bgr[0].at<uchar>(y, x);
// inImage[1][y * nCols + x] = bgr[1].at<uchar>(y, x);
// inImage[2][y * nCols + x] = bgr[2].at<uchar>(y, x);
// }
// }
//
// //gen array1d of kernel
// for (int i = 0; i < siKe; i++)
// for (int j = 0; j< siKe; j++)
// kerArray[i*siKe + j] = kernel[i][j];
//
// int *cuPoRows, *cuPoCols, *cuN;
// int *cuInImageR, *cuInImageG, *cuInImageB;
// int *cuResImageR, *cuResImageG, *cuResImageB;
// float *cuKernel;
//
// cudaEventRecord(start, 0);
// cudaMalloc((void**)&cuPoRows, sizeof(int));
// cudaMalloc((void**)&cuPoCols, sizeof(int));
// cudaMalloc((void**)&cuN, sizeof(int));
// cudaMalloc((void**)&cuKernel, siKe * siKe * sizeof(float));
// cudaMalloc((void**)&cuInImageR, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuInImageG, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuInImageB, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageR, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageG, nCols * nRows * sizeof(int));
// cudaMalloc((void**)&cuResImageB, nCols * nRows * sizeof(int));
//
// cudaMemcpy(cuPoRows, &nRows, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuPoCols, &nCols, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuN, &siKe, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuKernel, kerArray, siKe * siKe * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageB, inImage[0], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageG, inImage[1], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(cuInImageR, inImage[2], nCols * nRows * sizeof(int), cudaMemcpyHostToDevice);
//
// int N = nRows * nCols;
// int nBloq = (N + MAX_THREADS_BY_BLOCK - 1) / MAX_THREADS_BY_BLOCK;
//
// dim3 blockDim(DIM_BLOCK_X, DIM_BLOCK_Y, 1);
// dim3 gridDim(nBloq, 1, 1);
//
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageB, cuResImageB, cuN, cuKernel);
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageG, cuResImageG, cuN, cuKernel);
// cudaConvolutionImage << <gridDim, blockDim >> >(cuPoRows, cuPoCols, cuInImageR, cuResImageR, cuN, cuKernel);
//
// cudaMemcpy(cuInImageB, inImage[0], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(cuInImageG, inImage[1], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(cuInImageR, inImage[2], nRows * nCols * sizeof(int), cudaMemcpyDeviceToHost);
//
// cudaFree(cuPoRows);
// cudaFree(cuPoCols);
// cudaFree(cuN);
// cudaFree(cuKernel);
// cudaFree(cuInImageB);
// cudaFree(cuInImageG);
// cudaFree(cuInImageR);
// cudaFree(cuResImageB);
// cudaFree(cuResImageG);
// cudaFree(cuResImageR);
//
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// for (int y = 0; y < nRows; y++)
// for (int x = 0; x < nCols; x++){
// bgr[0].at<uchar>(y, x) = inImage[0][y*nCols + x];
// bgr[1].at<uchar>(y, x) = inImage[0][y*nCols + x];
// bgr[2].at<uchar>(y, x) = inImage[0][y*nCols + x];
// }
// merge(bgr, 3, inMatImage);
// printf("Convolución de CUDA RGB Image:%s Mascara: %d Tiempo %f\n", nameFile.c_str(), siKe, time / 1000.0);
// return inMatImage;
//}
//
//string intToString(int n){
// ostringstream ss;
// ss << n;
// string a = n <= 9 ? "0" : "";
// return a + ss.str();
//}
//
//
//int main111() {
// string path = "D://temp//convolution//";
// string imagens[15] = { "gordo.jpg"};
// string dirOut[15] = { "tekken"};
//
// // string imagens[2] = {"wallPaper08.jpeg"};
// // string dirOut[1] = {"wallPaper08"};
//
// int arrayKernel[13] = { 3, 5, 9, 13, 19, 25, 31, 39, 47, 57, 67, 75, 85 };
// int nImages = 15, nKernels = 13;
//
// for (int i = 0; i < nImages; i++) {
// Mat inMatGrayImage = imread(path + imagens[i], CV_LOAD_IMAGE_GRAYSCALE);
// printf("%s,%dx%d,%ld \n", imagens[i].c_str(), inMatGrayImage.rows, inMatGrayImage.cols, (long)inMatGrayImage.cols * (long)inMatGrayImage.rows);
// imwrite(path + dirOut[i] + "/grayScale" + imagens[i], inMatGrayImage);
// for (int j = 0; j < nKernels; j++) {
// float **kernel = new float*[arrayKernel[j]];
// for (int k = 0; k < arrayKernel[j]; k++) {
// kernel[k] = new float[arrayKernel[j]];
// for (int l = 0; l < arrayKernel[j]; l++)
// kernel[k][l] = 1.0 / (arrayKernel[j] * arrayKernel[j]);
// }
// Mat outMatImage = generateConvolutionCUDAGrayImage(inMatGrayImage, kernel, arrayKernel[j], imagens[i]);
// string fullpath = path + dirOut[i] + "//grayCuda_" + intToString(arrayKernel[j]) + "_" + imagens[i];
// cout << "kernel : " << arrayKernel[j] << " - fullpath : " << fullpath << endl;
// imwrite(fullpath, outMatImage);
// }
// printf("\n");
// }
//
// return 0;
//}
|
.text
.file "chelo.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
|
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_000c36af_00000000-6_chelo.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "chelo.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <stdio.h>
#include <curand.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/system_error.h>
// Fill d_buffer with num random numbers
extern "C" void fill_rand(float *d_buffer, int num)
{
curandGenerator_t gen;
int status;
// Create generator
status = curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
// Set seed
status |= curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
// Generate num random numbers
status |= curandGenerateUniform(gen, d_buffer, num);
// Cleanup generator
status |= curandDestroyGenerator(gen);
if (status != CURAND_STATUS_SUCCESS) {
printf ("curand failure!\n");
exit (EXIT_FAILURE);
}
}
// Sort key value pairs
extern "C" void sort(int *d_keys, int *d_values, int num)
{
try {
// Create THRUST usable device pointers
thrust::device_ptr<int> keys(d_keys);
thrust::device_ptr<int> values(d_values);
// Sort keys AND values array by key
thrust::sort_by_key(keys, keys + num, values);
}
catch(thrust::system_error &e) {
std::cerr << "Error sorting with Thrust: " << e.what() << std::endl;
exit (EXIT_FAILURE);
}
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <hiprand/hiprand.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/system_error.h>
// Fill d_buffer with num random numbers
extern "C" void fill_rand(float *d_buffer, int num)
{
hiprandGenerator_t gen;
int status;
// Create generator
status = hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
// Set seed
status |= hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
// Generate num random numbers
status |= hiprandGenerateUniform(gen, d_buffer, num);
// Cleanup generator
status |= hiprandDestroyGenerator(gen);
if (status != HIPRAND_STATUS_SUCCESS) {
printf ("hiprand failure!\n");
exit (EXIT_FAILURE);
}
}
// Sort key value pairs
extern "C" void sort(int *d_keys, int *d_values, int num)
{
try {
// Create THRUST usable device pointers
thrust::device_ptr<int> keys(d_keys);
thrust::device_ptr<int> values(d_values);
// Sort keys AND values array by key
thrust::sort_by_key(keys, keys + num, values);
}
catch(thrust::system_error &e) {
std::cerr << "Error sorting with Thrust: " << e.what() << std::endl;
exit (EXIT_FAILURE);
}
}
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include "includes.h"
__global__ void copy_buffer_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
output_buf[elem_id] = input_buf[elem_id];
}
|
code for sm_80
Function : _Z23copy_buffer_util_kernelPK6float4PS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 9.5367431640625e-07 ; /* 0x00000010ff037435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R2, R3, c[0x0][0x160] ; /* 0x0000580002047625 */
/* 0x000fcc00078e0203 */
/*0090*/ LDG.E.128.CONSTANT R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e9d00 */
/*00a0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0203 */
/*00b0*/ STG.E.128 [R2.64], R4 ; /* 0x0000000402007986 */
/* 0x004fe2000c101d04 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include "includes.h"
__global__ void copy_buffer_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
output_buf[elem_id] = input_buf[elem_id];
}
|
.file "tmpxft_0017fcc8_00000000-6_copy_buffer_util_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z54__device_stub__Z23copy_buffer_util_kernelPK6float4PS_iPK6float4PS_i
.type _Z54__device_stub__Z23copy_buffer_util_kernelPK6float4PS_iPK6float4PS_i, @function
_Z54__device_stub__Z23copy_buffer_util_kernelPK6float4PS_iPK6float4PS_i:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
movq %rdi, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23copy_buffer_util_kernelPK6float4PS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z54__device_stub__Z23copy_buffer_util_kernelPK6float4PS_iPK6float4PS_i, .-_Z54__device_stub__Z23copy_buffer_util_kernelPK6float4PS_iPK6float4PS_i
.globl _Z23copy_buffer_util_kernelPK6float4PS_i
.type _Z23copy_buffer_util_kernelPK6float4PS_i, @function
_Z23copy_buffer_util_kernelPK6float4PS_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z54__device_stub__Z23copy_buffer_util_kernelPK6float4PS_iPK6float4PS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z23copy_buffer_util_kernelPK6float4PS_i, .-_Z23copy_buffer_util_kernelPK6float4PS_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z23copy_buffer_util_kernelPK6float4PS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z23copy_buffer_util_kernelPK6float4PS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include "includes.h"
__global__ void copy_buffer_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
output_buf[elem_id] = input_buf[elem_id];
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void copy_buffer_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
output_buf[elem_id] = input_buf[elem_id];
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void copy_buffer_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
output_buf[elem_id] = input_buf[elem_id];
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.globl _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.p2align 8
.type _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i,@function
_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 4, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v5, vcc_lo
v_add_co_u32 v4, vcc_lo, s2, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
global_load_b128 v[0:3], v[0:1], off
s_waitcnt vmcnt(0)
global_store_b128 v[4:5], v[0:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i, .Lfunc_end0-_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .actual_access: read_only
.address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .actual_access: write_only
.address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void copy_buffer_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
output_buf[elem_id] = input_buf[elem_id];
}
|
.text
.file "copy_buffer_util_kernel.hip"
.globl _Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i # -- Begin function _Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.p2align 4, 0x90
.type _Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i,@function
_Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i: # @_Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i, .Lfunc_end0-_Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i,@object # @_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.section .rodata,"a",@progbits
.globl _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.p2align 3, 0x0
_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i:
.quad _Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.size _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i"
.size .L__unnamed_1, 59
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z23copy_buffer_util_kernelPK6float4PS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 9.5367431640625e-07 ; /* 0x00000010ff037435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R2, R3, c[0x0][0x160] ; /* 0x0000580002047625 */
/* 0x000fcc00078e0203 */
/*0090*/ LDG.E.128.CONSTANT R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e9d00 */
/*00a0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0203 */
/*00b0*/ STG.E.128 [R2.64], R4 ; /* 0x0000000402007986 */
/* 0x004fe2000c101d04 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.globl _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.p2align 8
.type _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i,@function
_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 4, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v5, vcc_lo
v_add_co_u32 v4, vcc_lo, s2, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
global_load_b128 v[0:3], v[0:1], off
s_waitcnt vmcnt(0)
global_store_b128 v[4:5], v[0:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i, .Lfunc_end0-_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .actual_access: read_only
.address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .actual_access: write_only
.address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_0017fcc8_00000000-6_copy_buffer_util_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z54__device_stub__Z23copy_buffer_util_kernelPK6float4PS_iPK6float4PS_i
.type _Z54__device_stub__Z23copy_buffer_util_kernelPK6float4PS_iPK6float4PS_i, @function
_Z54__device_stub__Z23copy_buffer_util_kernelPK6float4PS_iPK6float4PS_i:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
movq %rdi, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23copy_buffer_util_kernelPK6float4PS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z54__device_stub__Z23copy_buffer_util_kernelPK6float4PS_iPK6float4PS_i, .-_Z54__device_stub__Z23copy_buffer_util_kernelPK6float4PS_iPK6float4PS_i
.globl _Z23copy_buffer_util_kernelPK6float4PS_i
.type _Z23copy_buffer_util_kernelPK6float4PS_i, @function
_Z23copy_buffer_util_kernelPK6float4PS_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z54__device_stub__Z23copy_buffer_util_kernelPK6float4PS_iPK6float4PS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z23copy_buffer_util_kernelPK6float4PS_i, .-_Z23copy_buffer_util_kernelPK6float4PS_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z23copy_buffer_util_kernelPK6float4PS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z23copy_buffer_util_kernelPK6float4PS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "copy_buffer_util_kernel.hip"
.globl _Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i # -- Begin function _Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.p2align 4, 0x90
.type _Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i,@function
_Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i: # @_Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i, .Lfunc_end0-_Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i,@object # @_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.section .rodata,"a",@progbits
.globl _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.p2align 3, 0x0
_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i:
.quad _Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.size _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i"
.size .L__unnamed_1, 59
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23copy_buffer_util_kernelPK15HIP_vector_typeIfLj4EEPS0_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
// Vector version
__global__ void Iteration(double *Xreal, double *Ximag,
const double creal, const double cimag, const double N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// int i = threadIdx.x; // if using just one block
int k;
double temp;
if (i < N) {
for (k = 0; k < 100; k++){
temp = 2 * Xreal[i] * Ximag[i] + cimag;
Xreal[i] = Xreal[i] * Xreal[i] - Ximag[i] * Ximag[i] + creal;
Ximag[i] = temp;
}
// Xreal is the only output that needs to be retrieved
Xreal[i] = exp(-sqrt(Xreal[i] * Xreal[i] + Ximag[i] * Ximag[i]));
}
}
|
.file "tmpxft_000b914a_00000000-6_juliaCuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z9IterationPdS_dddPdS_ddd
.type _Z33__device_stub__Z9IterationPdS_dddPdS_ddd, @function
_Z33__device_stub__Z9IterationPdS_dddPdS_ddd:
.LFB2051:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movsd %xmm0, 24(%rsp)
movsd %xmm1, 16(%rsp)
movsd %xmm2, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z9IterationPdS_ddd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z9IterationPdS_dddPdS_ddd, .-_Z33__device_stub__Z9IterationPdS_dddPdS_ddd
.globl _Z9IterationPdS_ddd
.type _Z9IterationPdS_ddd, @function
_Z9IterationPdS_ddd:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z9IterationPdS_dddPdS_ddd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9IterationPdS_ddd, .-_Z9IterationPdS_ddd
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9IterationPdS_ddd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9IterationPdS_ddd(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
// Vector version
__global__ void Iteration(double *Xreal, double *Ximag,
const double creal, const double cimag, const double N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// int i = threadIdx.x; // if using just one block
int k;
double temp;
if (i < N) {
for (k = 0; k < 100; k++){
temp = 2 * Xreal[i] * Ximag[i] + cimag;
Xreal[i] = Xreal[i] * Xreal[i] - Ximag[i] * Ximag[i] + creal;
Ximag[i] = temp;
}
// Xreal is the only output that needs to be retrieved
Xreal[i] = exp(-sqrt(Xreal[i] * Xreal[i] + Ximag[i] * Ximag[i]));
}
}
|
#include <hip/hip_runtime.h>
// Vector version
__global__ void Iteration(double *Xreal, double *Ximag,
const double creal, const double cimag, const double N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// int i = threadIdx.x; // if using just one block
int k;
double temp;
if (i < N) {
for (k = 0; k < 100; k++){
temp = 2 * Xreal[i] * Ximag[i] + cimag;
Xreal[i] = Xreal[i] * Xreal[i] - Ximag[i] * Ximag[i] + creal;
Ximag[i] = temp;
}
// Xreal is the only output that needs to be retrieved
Xreal[i] = exp(-sqrt(Xreal[i] * Xreal[i] + Ximag[i] * Ximag[i]));
}
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
// Vector version
__global__ void Iteration(double *Xreal, double *Ximag,
const double creal, const double cimag, const double N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// int i = threadIdx.x; // if using just one block
int k;
double temp;
if (i < N) {
for (k = 0; k < 100; k++){
temp = 2 * Xreal[i] * Ximag[i] + cimag;
Xreal[i] = Xreal[i] * Xreal[i] - Ximag[i] * Ximag[i] + creal;
Ximag[i] = temp;
}
// Xreal is the only output that needs to be retrieved
Xreal[i] = exp(-sqrt(Xreal[i] * Xreal[i] + Ximag[i] * Ximag[i]));
}
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9IterationPdS_ddd
.globl _Z9IterationPdS_ddd
.p2align 8
.type _Z9IterationPdS_ddd,@function
_Z9IterationPdS_ddd:
s_load_b32 s2, s[0:1], 0x34
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_load_b64 s[2:3], s[0:1], 0x20
v_cvt_f64_i32_e32 v[2:3], v1
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_f64_e32 vcc_lo, s[2:3], v[2:3]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_4
s_load_b256 s[0:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_movk_i32 s8, 0x64
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s0, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v6, vcc_lo
v_add_co_u32 v5, vcc_lo, s2, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v6, vcc_lo
.p2align 6
.LBB0_2:
global_load_b64 v[7:8], v[5:6], off
global_load_b64 v[9:10], v[3:4], off
s_add_i32 s8, s8, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
s_cmp_lg_u32 s8, 0
s_waitcnt vmcnt(1)
v_mul_f64 v[11:12], v[7:8], v[7:8]
s_waitcnt vmcnt(0)
v_fma_f64 v[11:12], v[9:10], v[9:10], -v[11:12]
v_add_f64 v[9:10], v[9:10], v[9:10]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[11:12], v[11:12], s[4:5]
v_fma_f64 v[7:8], v[9:10], v[7:8], s[6:7]
global_store_b64 v[3:4], v[11:12], off
global_store_b64 v[5:6], v[7:8], off
s_cbranch_scc1 .LBB0_2
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b64 v[2:3], v[2:3], off
s_mov_b32 s1, 0xbff71547
s_mov_b32 s0, 0x652b82fe
global_load_b64 v[4:5], v[0:1], off
s_mov_b32 s3, 0x3e5ade15
s_mov_b32 s2, 0x6a5dcb37
s_waitcnt vmcnt(1)
v_mul_f64 v[2:3], v[2:3], v[2:3]
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[2:3], v[4:5], v[4:5], v[2:3]
v_cmp_gt_f64_e32 vcc_lo, 0x10000000, v[2:3]
v_cndmask_b32_e64 v4, 0, 1, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v4, 8, v4
v_ldexp_f64 v[2:3], v[2:3], v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_rsq_f64_e32 v[4:5], v[2:3]
s_waitcnt_depctr 0xfff
v_mul_f64 v[6:7], v[2:3], v[4:5]
v_mul_f64 v[4:5], v[4:5], 0.5
v_fma_f64 v[8:9], -v[4:5], v[6:7], 0.5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
v_fma_f64 v[4:5], v[4:5], v[8:9], v[4:5]
v_fma_f64 v[8:9], -v[6:7], v[6:7], v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[6:7], v[8:9], v[4:5], v[6:7]
v_fma_f64 v[8:9], -v[6:7], v[6:7], v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
v_cndmask_b32_e64 v6, 0, 0xffffff80, vcc_lo
v_cmp_class_f64_e64 vcc_lo, v[2:3], 0x260
v_ldexp_f64 v[4:5], v[4:5], v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v3, v5, v3 :: v_dual_cndmask_b32 v2, v4, v2
v_mul_f64 v[4:5], v[2:3], s[0:1]
s_mov_b32 s1, 0xbfe62e42
s_mov_b32 s0, 0xfefa39ef
v_cmp_nlt_f64_e32 vcc_lo, 0x4090cc00, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rndne_f64_e32 v[4:5], v[4:5]
v_fma_f64 v[6:7], v[4:5], s[0:1], -v[2:3]
s_mov_b32 s1, 0xbc7abc9e
s_mov_b32 s0, 0x3b39803f
v_cvt_i32_f64_e32 v10, v[4:5]
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[6:7], v[4:5], s[0:1], v[6:7]
s_mov_b32 s1, 0x3e928af3
s_mov_b32 s0, 0xfca7ab0c
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], s[2:3], s[0:1]
s_mov_b32 s1, 0x3ec71dee
s_mov_b32 s0, 0x623fde64
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3efa0199
s_mov_b32 s0, 0x7c89e6b0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3f2a01a0
s_mov_b32 s0, 0x14761f6e
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3f56c16c
s_mov_b32 s0, 0x1852b7b0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3f811111
s_mov_b32 s0, 0x11122322
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3fa55555
s_mov_b32 s0, 0x555502a1
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3fc55555
s_mov_b32 s0, 0x55555511
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_mov_b32 s1, 0x3fe00000
s_mov_b32 s0, 11
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], s[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[8:9], v[6:7], v[8:9], 1.0
v_fma_f64 v[4:5], v[6:7], v[8:9], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ldexp_f64 v[4:5], v[4:5], v10
v_dual_cndmask_b32 v3, 0, v5 :: v_dual_cndmask_b32 v2, 0, v4
global_store_b64 v[0:1], v[2:3], off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9IterationPdS_ddd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9IterationPdS_ddd, .Lfunc_end0-_Z9IterationPdS_ddd
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 8
.value_kind: by_value
- .offset: 32
.size: 8
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9IterationPdS_ddd
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9IterationPdS_ddd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
// Vector version
__global__ void Iteration(double *Xreal, double *Ximag,
const double creal, const double cimag, const double N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// int i = threadIdx.x; // if using just one block
int k;
double temp;
if (i < N) {
for (k = 0; k < 100; k++){
temp = 2 * Xreal[i] * Ximag[i] + cimag;
Xreal[i] = Xreal[i] * Xreal[i] - Ximag[i] * Ximag[i] + creal;
Ximag[i] = temp;
}
// Xreal is the only output that needs to be retrieved
Xreal[i] = exp(-sqrt(Xreal[i] * Xreal[i] + Ximag[i] * Ximag[i]));
}
}
|
.text
.file "juliaCuda.hip"
.globl _Z24__device_stub__IterationPdS_ddd # -- Begin function _Z24__device_stub__IterationPdS_ddd
.p2align 4, 0x90
.type _Z24__device_stub__IterationPdS_ddd,@function
_Z24__device_stub__IterationPdS_ddd: # @_Z24__device_stub__IterationPdS_ddd
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movsd %xmm0, 72(%rsp)
movsd %xmm1, 64(%rsp)
movsd %xmm2, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9IterationPdS_ddd, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z24__device_stub__IterationPdS_ddd, .Lfunc_end0-_Z24__device_stub__IterationPdS_ddd
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9IterationPdS_ddd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9IterationPdS_ddd,@object # @_Z9IterationPdS_ddd
.section .rodata,"a",@progbits
.globl _Z9IterationPdS_ddd
.p2align 3, 0x0
_Z9IterationPdS_ddd:
.quad _Z24__device_stub__IterationPdS_ddd
.size _Z9IterationPdS_ddd, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9IterationPdS_ddd"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__IterationPdS_ddd
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9IterationPdS_ddd
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_000b914a_00000000-6_juliaCuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z9IterationPdS_dddPdS_ddd
.type _Z33__device_stub__Z9IterationPdS_dddPdS_ddd, @function
_Z33__device_stub__Z9IterationPdS_dddPdS_ddd:
.LFB2051:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movsd %xmm0, 24(%rsp)
movsd %xmm1, 16(%rsp)
movsd %xmm2, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z9IterationPdS_ddd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z9IterationPdS_dddPdS_ddd, .-_Z33__device_stub__Z9IterationPdS_dddPdS_ddd
.globl _Z9IterationPdS_ddd
.type _Z9IterationPdS_ddd, @function
_Z9IterationPdS_ddd:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z9IterationPdS_dddPdS_ddd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9IterationPdS_ddd, .-_Z9IterationPdS_ddd
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9IterationPdS_ddd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9IterationPdS_ddd(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "juliaCuda.hip"
.globl _Z24__device_stub__IterationPdS_ddd # -- Begin function _Z24__device_stub__IterationPdS_ddd
.p2align 4, 0x90
.type _Z24__device_stub__IterationPdS_ddd,@function
_Z24__device_stub__IterationPdS_ddd: # @_Z24__device_stub__IterationPdS_ddd
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movsd %xmm0, 72(%rsp)
movsd %xmm1, 64(%rsp)
movsd %xmm2, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9IterationPdS_ddd, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z24__device_stub__IterationPdS_ddd, .Lfunc_end0-_Z24__device_stub__IterationPdS_ddd
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9IterationPdS_ddd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9IterationPdS_ddd,@object # @_Z9IterationPdS_ddd
.section .rodata,"a",@progbits
.globl _Z9IterationPdS_ddd
.p2align 3, 0x0
_Z9IterationPdS_ddd:
.quad _Z24__device_stub__IterationPdS_ddd
.size _Z9IterationPdS_ddd, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9IterationPdS_ddd"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__IterationPdS_ddd
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9IterationPdS_ddd
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <ctype.h>
#include <getopt.h>
#include <string.h>
#include <stdio.h>
#include <cmath>
#include <float.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <thrust/device_vector.h>
using namespace std;
//Size of the GPU memory
#define GPU_MEMSIZE_GB 2
//For case in which XSIZE = 1201 and YSIZE = 801
#define GLOBAL_MEM_USE_MB 773
#define MEM_USE_PER_THREAD_B 1280
//MAX_XSIZE_POSSIBLE is the maximum size of x or max number of columns if there is only one row
#define MAX_XSIZE_POSSIBLE floor(((GPU_MEMSIZE_GB * 1000 - GLOBAL_MEM_USE_MB)*1000000)/MEM_USE_PER_THREAD_B)
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//Always have even number of radius;and divisible by 10
#define RADSTEP 1
#define ANGLESIZE 36
#define PI 3.14
#define THREADS_PER_BLOCK 512
//#define FILENAME "Annie_coastDEM.txt"
//---------------------------Function declarations--------------------------------------------------------------------------//
__global__ void getMatrix(float* data,float* angle,float* anisotropy,float* azimuth,float* variance,float* orientation,float* ortho,size_t XSIZE,size_t YSIZE,int RADIUS,int WINDOW_SIZE);
int Get_GPU_devices(void);
static void HandleError( cudaError_t err,const char *file, int line );
//--------------------------------------------------------------------------------------------------------------------------//
//Current Usage:
//Global Memory: 773 MB
__global__ void getMatrix(float* data,float* angle,float* anisotropy,float* azimuth,float* variance,float* orientation,float* ortho,size_t XSIZE,size_t YSIZE,int RADIUS,int WINDOW_SIZE)
//__global__ void getMatrix(int* data,float* angle,float* anisotropy,float* azimuth,size_t XSIZE,size_t YSIZE)
{
// Thread indices
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int id = y * XSIZE + x;
//----------------------------------------------------------------------------------------------------------------------------//
if((y>(YSIZE - RADIUS - 1))||(y<(RADIUS))) return;
else if((x>(XSIZE - RADIUS - 1))||(x<(RADIUS))) return;
else
{
//Actual computation
int xrad,yrad,xradOrtho1,yradOrtho1,xradOneEighty,yradOneEighty,valueOneEighty;
int valueOrtho1,valueOrtho2,xradOrtho2,yradOrtho2,i,j,k;
//printf("Radius is: %d\n",RADIUS);
/*
float variance[RADIUS];
float orientation[RADIUS];
float ortho[RADIUS];
*/
float value,sum_value,avg_value;
float sum_valueOrtho,avg_valueOrtho;
// Initializing declared variables
sum_value = 0;
avg_value = 0;
sum_valueOrtho = 0;
avg_valueOrtho = 0;
// Iniitalize variance, ortho, and orientation arrays with max float value SGR changed i<100 to i<RADIUS
//Flipped
for(i=0;i<ANGLESIZE;i++) {
for(k = 0;k<RADIUS;k+=RADSTEP) {
//Initializing to 0 so that the sum is zero everytime it starts
sum_value = 0;
sum_valueOrtho = 0;
for(j=k;j<k+WINDOW_SIZE;j++){
//Computation for angle of interest
xrad = (int)lrintf(cosf(angle[i]) * (j+1) + x);
yrad = (int)lrintf(sinf(angle[i]) * (j+1) + y);
value = data[y * XSIZE + x] - data[yrad * XSIZE + xrad];
value = value * value;
//One eighty angle computation
xradOneEighty = (int)lrintf(cosf(angle[i]+PI) * (j+1) + x);
yradOneEighty = (int)lrintf(sinf(angle[i]+PI) * (j+1) + y);
valueOneEighty = data[y * XSIZE + x] - data[yradOneEighty * XSIZE + xradOneEighty];
valueOneEighty = valueOneEighty * valueOneEighty;
sum_value = sum_value + value + valueOneEighty;
avg_value = sum_value/(2*(j+1)); //the average variance from scale 1 to scale j
//Computation for values on angle orthogonal to angle of interest
xradOrtho1 = (int)lrintf(cosf(angle[i]+PI/2) * (j+1) + x);
yradOrtho1 = (int)lrintf(sinf(angle[i]+PI/2) * (j+1) + y);
valueOrtho1 = data[y * XSIZE + x] - data[yradOrtho1 * XSIZE + xradOrtho1];
valueOrtho1 = valueOrtho1 * valueOrtho1;
//One eighty ortho angle computation
xradOrtho2 = (int)lrintf(cosf(angle[i]+PI*3/2) * (j+1) + x);
yradOrtho2 = (int)lrintf(sinf(angle[i]+PI*3/2) * (j+1) + y);
valueOrtho2 = data[y * XSIZE + x] - data[yradOrtho2 * XSIZE + xradOrtho2];
valueOrtho2 = valueOrtho2 * valueOrtho2;
sum_valueOrtho = sum_valueOrtho + valueOrtho1 + valueOrtho2;
avg_valueOrtho = sum_valueOrtho/(2*j+1);
//Fail safe to ensure there is no nan or inf when taking anisotropy ratio, later on.
if(avg_value == 0) {
if((avg_valueOrtho < 1) && (avg_valueOrtho > 0)) {
avg_value = avg_valueOrtho;
}
else {
avg_value = 1;
}
}
if(avg_valueOrtho == 0) {
avg_valueOrtho = 1;
}
//Determine if the variance is minimum compared to others at scale j, if so record it and its angle i. If not, pass it
if(avg_value < variance[id * RADIUS + j]) {
variance[id * RADIUS + j] = avg_value;
orientation[id * RADIUS + j] = angle[i];
ortho[id * RADIUS + j] = avg_valueOrtho;
}
}
}
}
for(j=0;j<RADIUS;j+=RADSTEP){
anisotropy[y * XSIZE * RADIUS/RADSTEP + x * RADIUS/RADSTEP + j] = (36+ortho[id * RADIUS + j])/(36+variance[id * RADIUS + j]);
azimuth[y * XSIZE * RADIUS/RADSTEP + x * RADIUS/RADSTEP + j] = orientation[id * RADIUS + j] * 180/PI;
}
}
}
//--------------------------------------END OF KERNEL-----------------------------------------------------------//
//--------------------------------------Handle Error()-----------------------------------------------------------//
static void HandleError( cudaError_t err,const char *file, int line ) {
if (err != cudaSuccess) {
cout << cudaGetErrorString(err) << "in" << file << "at line" << line << "\n";
exit( EXIT_FAILURE );
}
}
//--------------------------------------Get_GPU_devices()-----------------------------------------------------------//
int Get_GPU_devices()
{
cudaDeviceProp prop;
int whichDevice,DeviceCount;
HANDLE_ERROR(cudaGetDevice(&whichDevice));
HANDLE_ERROR(cudaGetDeviceProperties(&prop,whichDevice));
if(!prop.deviceOverlap){
cout<< "Device does not handle overlaps so streams are not possible\n";
return 0;
}
DeviceCount = 0;
HANDLE_ERROR(cudaGetDeviceCount(&DeviceCount));
if(DeviceCount > 0){
cout<< DeviceCount <<"Devices Found\n";
}else{
cout<< "No devices found or error in reading the number of devices\n";
return 0;
}
for(int i = 0;i<DeviceCount;i++){
cudaDeviceProp properties;
HANDLE_ERROR(cudaGetDeviceProperties(&properties,i));
cout<<"Device Number:"<< i << "\n";
cout<<" Device name: "<< properties.name;
cout<<" Device Global Memory size: "<< properties.totalGlobalMem/1000000 << "MB \n";
cout<<"\n";
}
return DeviceCount;
}
//-------------------------------------------------------------------------------------------------------------//
int main(int argc,char* argv[])
{
char FileName[20];
char delimiterStr[10];
char delimiter;
int RADIUS;
int WINDOW_SIZE;
//delimiter_string = "A";
if(argc != 9){
printf("\tArguments needed = 9; Provided = %d\n",argc);
printf("Usage: ./Executable -i InputDataFileName -d Delimiter -r Radius -w WindowSize\n");
printf("Exiting program\n");
return 0;
}
int option;
while ((option = getopt(argc, argv,"i:d:r:w:")) != -1) {
switch (option) {
case 'i' : strcpy(FileName,optarg);
break;
case 'd' : strcpy(delimiterStr,optarg);
break;
case 'r' : RADIUS = atoi(optarg);
break;
case 'w' : WINDOW_SIZE = atoi(optarg);
break;
default: printf("Usage: Executable -i InputDataFileName -d Delimiter -r Radius -w WindowSize\n");
exit(EXIT_FAILURE);
}
}
//In the future use optarg
if(strcmp(delimiterStr,"space")==0){
delimiter = ' ';
}
else if(strcmp(delimiterStr,"Space")==0){
delimiter = ' ';
}
else if(strcmp(delimiterStr,"tab")==0){
delimiter = '\t';
}
else if(strcmp(delimiterStr,"Tab")==0){
delimiter = '\t';
}
else{
delimiter = delimiterStr[0];
}
printf("Delimiter: %c\n",delimiter);
printf("Radius is %d\n",RADIUS);
printf("Input file name is: %s\n",FileName);
//-------------------------------------------------------------------------------------//
//Setting the output buffer to 500MB
size_t limit;
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 500 * 1024 * 1024);
cudaDeviceGetLimit(&limit,cudaLimitPrintfFifoSize);
//File declarations and opening them
FILE *datTxt1,*datTxt,*outputAnisotropy00,*outputAnisotropy09,*outputAnisotropy49,*outputAnisotropy99;
FILE *outputAzimuth00,*outputAzimuth09,*outputAzimuth49,*outputAzimuth99;
FILE * inpCheck;
inpCheck = fopen("inpCheck.txt","w");
if(inpCheck == NULL) {
perror("Cannot open inpcheck.txt file");
return (-1);
}
datTxt1 = fopen(FileName,"r");
if(datTxt1 == NULL) {
cout<< "Cannot open file:" << argv[1] << "\nCheck if file exists.\n";
exit(1);
}
outputAnisotropy00 = fopen("outputDataAni_First.txt","w");
outputAnisotropy09 = fopen("outputDataAni_Rad_div_10.txt","w");
outputAnisotropy49 = fopen("outputDataAni_Rad_div_2.txt","w");
outputAnisotropy99 = fopen("outputDataAni_Last.txt","w");
if((outputAnisotropy00 == NULL)||(outputAnisotropy09 == NULL)||(outputAnisotropy49 == NULL)||(outputAnisotropy99 == NULL)) {
perror("Cannot open Anisotropy file");
return (-1);
}
outputAzimuth00 = fopen("outputDataAzi_First.txt","w");
outputAzimuth09 = fopen("outputDataAzi_Rad_div_10.txt","w");
outputAzimuth49 = fopen("outputDataAzi_Rad_div_2.txt","w");
outputAzimuth99 = fopen("outputDataAzi_Last.txt","w");
if((outputAzimuth00 == NULL)||(outputAzimuth09 == NULL)||(outputAzimuth49 == NULL)||(outputAzimuth99 == NULL)) {
perror("Cannot open Azimuth file");
return (-1);
}
//-----------Getting total rows and columns in the data file---------------------------------------------------------------------------------------------------//
size_t XSIZE,YSIZE;
XSIZE = 0;
YSIZE = 0;
int i,j;
//Counting number of columns(x)
char* max_line;
max_line = (char*)malloc(MAX_XSIZE_POSSIBLE);
memset(max_line,'\0',sizeof(max_line));
fgets(max_line,MAX_XSIZE_POSSIBLE,datTxt1)!=NULL;
while(*max_line)if(*max_line++ == ' ')++XSIZE;
XSIZE+=1;
//Counting number of rows(y)
do{
i = fgetc(datTxt1);
if(i == '\n') YSIZE++;
}while(i != EOF);
YSIZE+=1;
fclose(datTxt1);
cout<< "(XSIZE,YSIZE)::"<< "(" << XSIZE << "," << YSIZE << ")" << "\n";
datTxt = fopen(FileName,"r");
if(datTxt == NULL) {
printf("Cannot open file: %s\nCheck if file exists\n",argv[1]);
exit(1);
}
//-----------------------Checking if the data size fits the memory of the GPU----------------------------------------------------------------------------------------//
cout<< "(XSIZE,YSIZE)::"<< "(" << XSIZE << "," << YSIZE << ")" << "\n";
//(MAX_XSIZE_POSSIBLE - XSIZE*YSIZE >0)? printf("There is enough memory for the computation\n"):printf("There is not enough memory and may result in incorrect results\n");
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------//
// Allocating Managed Memory (Unified Memory)
// dim3 gridSize(XSIZE ,(YSIZE+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,1);
// dim3 blockSize(1,THREADS_PER_BLOCK,1);
long int total_threads;
float* data;
float* anisotropy,*azimuth,*angle;
float* variance,*orientation,*ortho;
total_threads = THREADS_PER_BLOCK * ((XSIZE + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK) * YSIZE;
HANDLE_ERROR(cudaMallocManaged((void**)&angle,ANGLESIZE * sizeof(float)));
HANDLE_ERROR(cudaMallocManaged((void**)&data,XSIZE * YSIZE * sizeof(float)));
HANDLE_ERROR(cudaMallocManaged((void**)&anisotropy,YSIZE * XSIZE * RADIUS/RADSTEP * sizeof(float)));
HANDLE_ERROR(cudaMallocManaged((void**)&azimuth,YSIZE * XSIZE * RADIUS/RADSTEP * sizeof(float)));
HANDLE_ERROR(cudaMallocManaged((void**)&variance,total_threads * RADIUS * sizeof(float)));
HANDLE_ERROR(cudaMallocManaged((void**)&orientation,total_threads * RADIUS * sizeof(float)));
HANDLE_ERROR(cudaMallocManaged((void**)&ortho,total_threads * RADIUS * sizeof(float)));
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------//
//XSIZE ints in a row which are max of 5 digits
//with a space in the front and the back and space
//between each number
char *startPtr,*endPtr;
char line[XSIZE * 10 +2+(XSIZE-1)];
memset(line, '\0', sizeof(line));
int Value;
i = 0;
j = 0;
//Assuming each number in the data set has a max of 5 characters
char tempVal[5];
memset(tempVal,'\0',sizeof(tempVal));
cout<< "Working1\n";
while(fgets(line,XSIZE *10 + 2 + (XSIZE-1),datTxt)!=NULL) {
cout << "Working2\n";
startPtr = line;
for(i=0;i<XSIZE;i++) {
Value = 0;
memset(tempVal,'\0',sizeof(tempVal));
if(i != (XSIZE - 1)) {
endPtr = strchr(startPtr,' ');
strncpy(tempVal,startPtr,endPtr-startPtr);
Value = atoi(tempVal);
data[j * XSIZE + i] = Value;
fprintf(inpCheck,"%d ",Value);
endPtr = endPtr + 1;
startPtr = endPtr;
}
else if(i == (XSIZE - 1)){
strcpy(tempVal,startPtr);
Value = atoi(tempVal);
data[j * XSIZE + i] = Value;
fprintf(inpCheck,"%d\n",Value);
}
}
j++;
}
//------------------------------------Matrix Declarations--------------------------------------------------------------------------------------------------------------//
// float angle[ANGLESIZE];
for(int i=0;i<ANGLESIZE;i++) {
angle[i] = i * 5 * PI/180;
}
for(i=0;i<RADIUS * total_threads ;i++){
variance[i] = FLT_MAX;
ortho[i] = FLT_MAX;
orientation[i] = FLT_MAX;
}
//--------------------------------------CUDA-------------------------------------------------------------------------------------------------------------------------//
cudaError_t error;
//error = cudaSetDevice(Get_GPU_devices() -1);
error = cudaSetDevice(0);
if(error == cudaSuccess){
cout <<"success\n";
}else{
cout <<"unsuccessful\n";
}
//cudaSetDevice(1);
cout<< "Hello1\n";
dim3 gridSize((XSIZE + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK,YSIZE,1);
dim3 blockSize(THREADS_PER_BLOCK,1,1);
//dim3 gridSize(3,YSIZE,1);
cout <<"Hello2\n";
getMatrix<<<gridSize,blockSize>>>(data,angle,anisotropy,azimuth,variance,orientation,ortho,XSIZE,YSIZE,RADIUS,WINDOW_SIZE);
error = cudaDeviceSynchronize();
if(error != cudaSuccess){
cout << "CUDA Device Synchronization Error:" << cudaGetErrorString(error) << "\n";
// we can't recover from the error -- exit the program
return 0;
}
error = cudaGetLastError();
if(error != cudaSuccess){
cout <<"CUDA Error:" << cudaGetErrorString(error) << "\n";
// we can't recover from the error -- exit the program
return 0;
}
cout << "Hello3\n";
cout << "Hello4\n";
cout << "Hello5\n";
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------//
// Writing to files
for(j=0;j<YSIZE ;j++) {
for(i=0;i<XSIZE ;i++) {
if((j>(YSIZE - RADIUS - 1))||(j<(RADIUS))) continue;
if((i>(XSIZE - RADIUS - 1))||(i<(RADIUS))) continue;
if (i == (XSIZE - RADIUS - 1)) {
fprintf(outputAnisotropy00,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + 0]);
fprintf(outputAzimuth00,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + 0]);
fprintf(outputAnisotropy00,"\n");
fprintf(outputAzimuth00,"\n");
fprintf(outputAnisotropy09,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP +RADIUS/10 -1]);
fprintf(outputAzimuth09,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/10 -1]);
fprintf(outputAnisotropy09,"\n");
fprintf(outputAzimuth09,"\n");
fprintf(outputAnisotropy49,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/2 - 1]);
fprintf(outputAzimuth49,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/2 - 1]);
fprintf(outputAnisotropy49,"\n");
fprintf(outputAzimuth49,"\n");
fprintf(outputAnisotropy99,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS -1]);
fprintf(outputAzimuth99,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS - 1]);
fprintf(outputAnisotropy99,"\n");
fprintf(outputAzimuth99,"\n");
}
else {
fprintf(outputAnisotropy00,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + 0]);
fprintf(outputAzimuth00,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + 0]);
fprintf(outputAnisotropy00,"\t");
fprintf(outputAzimuth00,"\t");
fprintf(outputAnisotropy09,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/10 -1]);
fprintf(outputAzimuth09,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/10 -1]);
fprintf(outputAnisotropy09,"\t");
fprintf(outputAzimuth09,"\t");
fprintf(outputAnisotropy49,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/2 - 1]);
fprintf(outputAzimuth49,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/2 - 1]);
fprintf(outputAnisotropy49,"\t");
fprintf(outputAzimuth49,"\t");
fprintf(outputAnisotropy99,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS - 1]);
fprintf(outputAzimuth99,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS - 1]);
fprintf(outputAnisotropy99,"\t");
fprintf(outputAzimuth99,"\t");
}
}
}
fclose(datTxt);
fclose(inpCheck);
fclose(outputAnisotropy00);
fclose(outputAnisotropy09);
fclose(outputAnisotropy49);
fclose(outputAnisotropy99);
fclose(outputAzimuth00);
fclose(outputAzimuth09);
fclose(outputAzimuth49);
fclose(outputAzimuth99);
cudaFree(data);
cudaFree(angle);
cudaFree(azimuth);
cudaFree(anisotropy);
cudaFree(variance);
cudaFree(orientation);
cudaFree(ortho);
//free(max_line);
// free(anisotropy);
// free(azimuth);
// size_t free_byte ;
// size_t total_byte ;
/*
cudaMemGetInfo( &free_byte, &total_byte );
double free_db = (double)free_byte;
double total_db = (double)total_byte;
double used_db = total_db - free_db;
// cout << "GPU memory usage: used = %f, free = %f MB, total = %f MB\n",used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
*/
return 0;
}
|
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <ctype.h>
#include <getopt.h>
#include <string.h>
#include <stdio.h>
#include <cmath>
#include <float.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <thrust/device_vector.h>
using namespace std;
//Size of the GPU memory
#define GPU_MEMSIZE_GB 2
//For case in which XSIZE = 1201 and YSIZE = 801
#define GLOBAL_MEM_USE_MB 773
#define MEM_USE_PER_THREAD_B 1280
//MAX_XSIZE_POSSIBLE is the maximum size of x or max number of columns if there is only one row
#define MAX_XSIZE_POSSIBLE floor(((GPU_MEMSIZE_GB * 1000 - GLOBAL_MEM_USE_MB)*1000000)/MEM_USE_PER_THREAD_B)
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//Always have even number of radius;and divisible by 10
#define RADSTEP 1
#define ANGLESIZE 36
#define PI 3.14
#define THREADS_PER_BLOCK 512
//#define FILENAME "Annie_coastDEM.txt"
//---------------------------Function declarations--------------------------------------------------------------------------//
__global__ void getMatrix(float* data,float* angle,float* anisotropy,float* azimuth,float* variance,float* orientation,float* ortho,size_t XSIZE,size_t YSIZE,int RADIUS,int WINDOW_SIZE);
int Get_GPU_devices(void);
static void HandleError( hipError_t err,const char *file, int line );
//--------------------------------------------------------------------------------------------------------------------------//
//Current Usage:
//Global Memory: 773 MB
__global__ void getMatrix(float* data,float* angle,float* anisotropy,float* azimuth,float* variance,float* orientation,float* ortho,size_t XSIZE,size_t YSIZE,int RADIUS,int WINDOW_SIZE)
//__global__ void getMatrix(int* data,float* angle,float* anisotropy,float* azimuth,size_t XSIZE,size_t YSIZE)
{
// Thread indices
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int id = y * XSIZE + x;
//----------------------------------------------------------------------------------------------------------------------------//
if((y>(YSIZE - RADIUS - 1))||(y<(RADIUS))) return;
else if((x>(XSIZE - RADIUS - 1))||(x<(RADIUS))) return;
else
{
//Actual computation
int xrad,yrad,xradOrtho1,yradOrtho1,xradOneEighty,yradOneEighty,valueOneEighty;
int valueOrtho1,valueOrtho2,xradOrtho2,yradOrtho2,i,j,k;
//printf("Radius is: %d\n",RADIUS);
/*
float variance[RADIUS];
float orientation[RADIUS];
float ortho[RADIUS];
*/
float value,sum_value,avg_value;
float sum_valueOrtho,avg_valueOrtho;
// Initializing declared variables
sum_value = 0;
avg_value = 0;
sum_valueOrtho = 0;
avg_valueOrtho = 0;
// Iniitalize variance, ortho, and orientation arrays with max float value SGR changed i<100 to i<RADIUS
//Flipped
for(i=0;i<ANGLESIZE;i++) {
for(k = 0;k<RADIUS;k+=RADSTEP) {
//Initializing to 0 so that the sum is zero everytime it starts
sum_value = 0;
sum_valueOrtho = 0;
for(j=k;j<k+WINDOW_SIZE;j++){
//Computation for angle of interest
xrad = (int)lrintf(cosf(angle[i]) * (j+1) + x);
yrad = (int)lrintf(sinf(angle[i]) * (j+1) + y);
value = data[y * XSIZE + x] - data[yrad * XSIZE + xrad];
value = value * value;
//One eighty angle computation
xradOneEighty = (int)lrintf(cosf(angle[i]+PI) * (j+1) + x);
yradOneEighty = (int)lrintf(sinf(angle[i]+PI) * (j+1) + y);
valueOneEighty = data[y * XSIZE + x] - data[yradOneEighty * XSIZE + xradOneEighty];
valueOneEighty = valueOneEighty * valueOneEighty;
sum_value = sum_value + value + valueOneEighty;
avg_value = sum_value/(2*(j+1)); //the average variance from scale 1 to scale j
//Computation for values on angle orthogonal to angle of interest
xradOrtho1 = (int)lrintf(cosf(angle[i]+PI/2) * (j+1) + x);
yradOrtho1 = (int)lrintf(sinf(angle[i]+PI/2) * (j+1) + y);
valueOrtho1 = data[y * XSIZE + x] - data[yradOrtho1 * XSIZE + xradOrtho1];
valueOrtho1 = valueOrtho1 * valueOrtho1;
//One eighty ortho angle computation
xradOrtho2 = (int)lrintf(cosf(angle[i]+PI*3/2) * (j+1) + x);
yradOrtho2 = (int)lrintf(sinf(angle[i]+PI*3/2) * (j+1) + y);
valueOrtho2 = data[y * XSIZE + x] - data[yradOrtho2 * XSIZE + xradOrtho2];
valueOrtho2 = valueOrtho2 * valueOrtho2;
sum_valueOrtho = sum_valueOrtho + valueOrtho1 + valueOrtho2;
avg_valueOrtho = sum_valueOrtho/(2*j+1);
//Fail safe to ensure there is no nan or inf when taking anisotropy ratio, later on.
if(avg_value == 0) {
if((avg_valueOrtho < 1) && (avg_valueOrtho > 0)) {
avg_value = avg_valueOrtho;
}
else {
avg_value = 1;
}
}
if(avg_valueOrtho == 0) {
avg_valueOrtho = 1;
}
//Determine if the variance is minimum compared to others at scale j, if so record it and its angle i. If not, pass it
if(avg_value < variance[id * RADIUS + j]) {
variance[id * RADIUS + j] = avg_value;
orientation[id * RADIUS + j] = angle[i];
ortho[id * RADIUS + j] = avg_valueOrtho;
}
}
}
}
for(j=0;j<RADIUS;j+=RADSTEP){
anisotropy[y * XSIZE * RADIUS/RADSTEP + x * RADIUS/RADSTEP + j] = (36+ortho[id * RADIUS + j])/(36+variance[id * RADIUS + j]);
azimuth[y * XSIZE * RADIUS/RADSTEP + x * RADIUS/RADSTEP + j] = orientation[id * RADIUS + j] * 180/PI;
}
}
}
//--------------------------------------END OF KERNEL-----------------------------------------------------------//
//--------------------------------------Handle Error()-----------------------------------------------------------//
static void HandleError( hipError_t err,const char *file, int line ) {
if (err != hipSuccess) {
cout << hipGetErrorString(err) << "in" << file << "at line" << line << "\n";
exit( EXIT_FAILURE );
}
}
//--------------------------------------Get_GPU_devices()-----------------------------------------------------------//
int Get_GPU_devices()
{
hipDeviceProp_t prop;
int whichDevice,DeviceCount;
HANDLE_ERROR(hipGetDevice(&whichDevice));
HANDLE_ERROR(hipGetDeviceProperties(&prop,whichDevice));
if(!prop.deviceOverlap){
cout<< "Device does not handle overlaps so streams are not possible\n";
return 0;
}
DeviceCount = 0;
HANDLE_ERROR(hipGetDeviceCount(&DeviceCount));
if(DeviceCount > 0){
cout<< DeviceCount <<"Devices Found\n";
}else{
cout<< "No devices found or error in reading the number of devices\n";
return 0;
}
for(int i = 0;i<DeviceCount;i++){
hipDeviceProp_t properties;
HANDLE_ERROR(hipGetDeviceProperties(&properties,i));
cout<<"Device Number:"<< i << "\n";
cout<<" Device name: "<< properties.name;
cout<<" Device Global Memory size: "<< properties.totalGlobalMem/1000000 << "MB \n";
cout<<"\n";
}
return DeviceCount;
}
//-------------------------------------------------------------------------------------------------------------//
int main(int argc,char* argv[])
{
char FileName[20];
char delimiterStr[10];
char delimiter;
int RADIUS;
int WINDOW_SIZE;
//delimiter_string = "A";
if(argc != 9){
printf("\tArguments needed = 9; Provided = %d\n",argc);
printf("Usage: ./Executable -i InputDataFileName -d Delimiter -r Radius -w WindowSize\n");
printf("Exiting program\n");
return 0;
}
int option;
while ((option = getopt(argc, argv,"i:d:r:w:")) != -1) {
switch (option) {
case 'i' : strcpy(FileName,optarg);
break;
case 'd' : strcpy(delimiterStr,optarg);
break;
case 'r' : RADIUS = atoi(optarg);
break;
case 'w' : WINDOW_SIZE = atoi(optarg);
break;
default: printf("Usage: Executable -i InputDataFileName -d Delimiter -r Radius -w WindowSize\n");
exit(EXIT_FAILURE);
}
}
//In the future use optarg
if(strcmp(delimiterStr,"space")==0){
delimiter = ' ';
}
else if(strcmp(delimiterStr,"Space")==0){
delimiter = ' ';
}
else if(strcmp(delimiterStr,"tab")==0){
delimiter = '\t';
}
else if(strcmp(delimiterStr,"Tab")==0){
delimiter = '\t';
}
else{
delimiter = delimiterStr[0];
}
printf("Delimiter: %c\n",delimiter);
printf("Radius is %d\n",RADIUS);
printf("Input file name is: %s\n",FileName);
//-------------------------------------------------------------------------------------//
//Setting the output buffer to 500MB
size_t limit;
hipDeviceSetLimit(hipLimitPrintfFifoSize, 500 * 1024 * 1024);
hipDeviceGetLimit(&limit,hipLimitPrintfFifoSize);
//File declarations and opening them
FILE *datTxt1,*datTxt,*outputAnisotropy00,*outputAnisotropy09,*outputAnisotropy49,*outputAnisotropy99;
FILE *outputAzimuth00,*outputAzimuth09,*outputAzimuth49,*outputAzimuth99;
FILE * inpCheck;
inpCheck = fopen("inpCheck.txt","w");
if(inpCheck == NULL) {
perror("Cannot open inpcheck.txt file");
return (-1);
}
datTxt1 = fopen(FileName,"r");
if(datTxt1 == NULL) {
cout<< "Cannot open file:" << argv[1] << "\nCheck if file exists.\n";
exit(1);
}
outputAnisotropy00 = fopen("outputDataAni_First.txt","w");
outputAnisotropy09 = fopen("outputDataAni_Rad_div_10.txt","w");
outputAnisotropy49 = fopen("outputDataAni_Rad_div_2.txt","w");
outputAnisotropy99 = fopen("outputDataAni_Last.txt","w");
if((outputAnisotropy00 == NULL)||(outputAnisotropy09 == NULL)||(outputAnisotropy49 == NULL)||(outputAnisotropy99 == NULL)) {
perror("Cannot open Anisotropy file");
return (-1);
}
outputAzimuth00 = fopen("outputDataAzi_First.txt","w");
outputAzimuth09 = fopen("outputDataAzi_Rad_div_10.txt","w");
outputAzimuth49 = fopen("outputDataAzi_Rad_div_2.txt","w");
outputAzimuth99 = fopen("outputDataAzi_Last.txt","w");
if((outputAzimuth00 == NULL)||(outputAzimuth09 == NULL)||(outputAzimuth49 == NULL)||(outputAzimuth99 == NULL)) {
perror("Cannot open Azimuth file");
return (-1);
}
//-----------Getting total rows and columns in the data file---------------------------------------------------------------------------------------------------//
size_t XSIZE,YSIZE;
XSIZE = 0;
YSIZE = 0;
int i,j;
//Counting number of columns(x)
char* max_line;
max_line = (char*)malloc(MAX_XSIZE_POSSIBLE);
memset(max_line,'\0',sizeof(max_line));
fgets(max_line,MAX_XSIZE_POSSIBLE,datTxt1)!=NULL;
while(*max_line)if(*max_line++ == ' ')++XSIZE;
XSIZE+=1;
//Counting number of rows(y)
do{
i = fgetc(datTxt1);
if(i == '\n') YSIZE++;
}while(i != EOF);
YSIZE+=1;
fclose(datTxt1);
cout<< "(XSIZE,YSIZE)::"<< "(" << XSIZE << "," << YSIZE << ")" << "\n";
datTxt = fopen(FileName,"r");
if(datTxt == NULL) {
printf("Cannot open file: %s\nCheck if file exists\n",argv[1]);
exit(1);
}
//-----------------------Checking if the data size fits the memory of the GPU----------------------------------------------------------------------------------------//
cout<< "(XSIZE,YSIZE)::"<< "(" << XSIZE << "," << YSIZE << ")" << "\n";
//(MAX_XSIZE_POSSIBLE - XSIZE*YSIZE >0)? printf("There is enough memory for the computation\n"):printf("There is not enough memory and may result in incorrect results\n");
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------//
// Allocating Managed Memory (Unified Memory)
// dim3 gridSize(XSIZE ,(YSIZE+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,1);
// dim3 blockSize(1,THREADS_PER_BLOCK,1);
long int total_threads;
float* data;
float* anisotropy,*azimuth,*angle;
float* variance,*orientation,*ortho;
total_threads = THREADS_PER_BLOCK * ((XSIZE + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK) * YSIZE;
HANDLE_ERROR(hipMallocManaged((void**)&angle,ANGLESIZE * sizeof(float)));
HANDLE_ERROR(hipMallocManaged((void**)&data,XSIZE * YSIZE * sizeof(float)));
HANDLE_ERROR(hipMallocManaged((void**)&anisotropy,YSIZE * XSIZE * RADIUS/RADSTEP * sizeof(float)));
HANDLE_ERROR(hipMallocManaged((void**)&azimuth,YSIZE * XSIZE * RADIUS/RADSTEP * sizeof(float)));
HANDLE_ERROR(hipMallocManaged((void**)&variance,total_threads * RADIUS * sizeof(float)));
HANDLE_ERROR(hipMallocManaged((void**)&orientation,total_threads * RADIUS * sizeof(float)));
HANDLE_ERROR(hipMallocManaged((void**)&ortho,total_threads * RADIUS * sizeof(float)));
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------//
//XSIZE ints in a row which are max of 5 digits
//with a space in the front and the back and space
//between each number
char *startPtr,*endPtr;
char line[XSIZE * 10 +2+(XSIZE-1)];
memset(line, '\0', sizeof(line));
int Value;
i = 0;
j = 0;
//Assuming each number in the data set has a max of 5 characters
char tempVal[5];
memset(tempVal,'\0',sizeof(tempVal));
cout<< "Working1\n";
while(fgets(line,XSIZE *10 + 2 + (XSIZE-1),datTxt)!=NULL) {
cout << "Working2\n";
startPtr = line;
for(i=0;i<XSIZE;i++) {
Value = 0;
memset(tempVal,'\0',sizeof(tempVal));
if(i != (XSIZE - 1)) {
endPtr = strchr(startPtr,' ');
strncpy(tempVal,startPtr,endPtr-startPtr);
Value = atoi(tempVal);
data[j * XSIZE + i] = Value;
fprintf(inpCheck,"%d ",Value);
endPtr = endPtr + 1;
startPtr = endPtr;
}
else if(i == (XSIZE - 1)){
strcpy(tempVal,startPtr);
Value = atoi(tempVal);
data[j * XSIZE + i] = Value;
fprintf(inpCheck,"%d\n",Value);
}
}
j++;
}
//------------------------------------Matrix Declarations--------------------------------------------------------------------------------------------------------------//
// float angle[ANGLESIZE];
for(int i=0;i<ANGLESIZE;i++) {
angle[i] = i * 5 * PI/180;
}
for(i=0;i<RADIUS * total_threads ;i++){
variance[i] = FLT_MAX;
ortho[i] = FLT_MAX;
orientation[i] = FLT_MAX;
}
//--------------------------------------CUDA-------------------------------------------------------------------------------------------------------------------------//
hipError_t error;
//error = cudaSetDevice(Get_GPU_devices() -1);
error = hipSetDevice(0);
if(error == hipSuccess){
cout <<"success\n";
}else{
cout <<"unsuccessful\n";
}
//cudaSetDevice(1);
cout<< "Hello1\n";
dim3 gridSize((XSIZE + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK,YSIZE,1);
dim3 blockSize(THREADS_PER_BLOCK,1,1);
//dim3 gridSize(3,YSIZE,1);
cout <<"Hello2\n";
getMatrix<<<gridSize,blockSize>>>(data,angle,anisotropy,azimuth,variance,orientation,ortho,XSIZE,YSIZE,RADIUS,WINDOW_SIZE);
error = hipDeviceSynchronize();
if(error != hipSuccess){
cout << "CUDA Device Synchronization Error:" << hipGetErrorString(error) << "\n";
// we can't recover from the error -- exit the program
return 0;
}
error = hipGetLastError();
if(error != hipSuccess){
cout <<"CUDA Error:" << hipGetErrorString(error) << "\n";
// we can't recover from the error -- exit the program
return 0;
}
cout << "Hello3\n";
cout << "Hello4\n";
cout << "Hello5\n";
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------//
// Writing to files
for(j=0;j<YSIZE ;j++) {
for(i=0;i<XSIZE ;i++) {
if((j>(YSIZE - RADIUS - 1))||(j<(RADIUS))) continue;
if((i>(XSIZE - RADIUS - 1))||(i<(RADIUS))) continue;
if (i == (XSIZE - RADIUS - 1)) {
fprintf(outputAnisotropy00,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + 0]);
fprintf(outputAzimuth00,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + 0]);
fprintf(outputAnisotropy00,"\n");
fprintf(outputAzimuth00,"\n");
fprintf(outputAnisotropy09,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP +RADIUS/10 -1]);
fprintf(outputAzimuth09,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/10 -1]);
fprintf(outputAnisotropy09,"\n");
fprintf(outputAzimuth09,"\n");
fprintf(outputAnisotropy49,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/2 - 1]);
fprintf(outputAzimuth49,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/2 - 1]);
fprintf(outputAnisotropy49,"\n");
fprintf(outputAzimuth49,"\n");
fprintf(outputAnisotropy99,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS -1]);
fprintf(outputAzimuth99,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS - 1]);
fprintf(outputAnisotropy99,"\n");
fprintf(outputAzimuth99,"\n");
}
else {
fprintf(outputAnisotropy00,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + 0]);
fprintf(outputAzimuth00,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + 0]);
fprintf(outputAnisotropy00,"\t");
fprintf(outputAzimuth00,"\t");
fprintf(outputAnisotropy09,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/10 -1]);
fprintf(outputAzimuth09,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/10 -1]);
fprintf(outputAnisotropy09,"\t");
fprintf(outputAzimuth09,"\t");
fprintf(outputAnisotropy49,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/2 - 1]);
fprintf(outputAzimuth49,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS/2 - 1]);
fprintf(outputAnisotropy49,"\t");
fprintf(outputAzimuth49,"\t");
fprintf(outputAnisotropy99,"%f",anisotropy[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS - 1]);
fprintf(outputAzimuth99,"%f",azimuth[j * XSIZE * RADIUS/RADSTEP + i * RADIUS/RADSTEP + RADIUS - 1]);
fprintf(outputAnisotropy99,"\t");
fprintf(outputAzimuth99,"\t");
}
}
}
fclose(datTxt);
fclose(inpCheck);
fclose(outputAnisotropy00);
fclose(outputAnisotropy09);
fclose(outputAnisotropy49);
fclose(outputAnisotropy99);
fclose(outputAzimuth00);
fclose(outputAzimuth09);
fclose(outputAzimuth49);
fclose(outputAzimuth99);
hipFree(data);
hipFree(angle);
hipFree(azimuth);
hipFree(anisotropy);
hipFree(variance);
hipFree(orientation);
hipFree(ortho);
//free(max_line);
// free(anisotropy);
// free(azimuth);
// size_t free_byte ;
// size_t total_byte ;
/*
cudaMemGetInfo( &free_byte, &total_byte );
double free_db = (double)free_byte;
double total_db = (double)total_byte;
double used_db = total_db - free_db;
// cout << "GPU memory usage: used = %f, free = %f MB, total = %f MB\n",used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
*/
return 0;
}
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
//pass
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is race-free.
//
//The memcpy is between different src and dst types so we have to handle the
//arrays in and out at the byte-level.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
char y;
} s1_t; //< sizeof(s1_t) == 4
typedef struct {
short x;
short y;
} s2_t; //< sizeof(s2_t) == 4
__global__ void k(s1_t *in, s2_t *out) {
size_t len = 4;
memcpy(&out[threadIdx.x], &in[threadIdx.x], len);
}
|
code for sm_80
Function : _Z1kP4s1_tP4s2_t
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R4, R2, R3, c[0x0][0x160] ; /* 0x0000580002047625 */
/* 0x001fca00078e0003 */
/*0050*/ LDG.E.U8 R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea8000c1e1100 */
/*0060*/ LDG.E.U8 R9, [R4.64+0x1] ; /* 0x0000010404097981 */
/* 0x000ee8000c1e1100 */
/*0070*/ LDG.E.U8 R11, [R4.64+0x2] ; /* 0x00000204040b7981 */
/* 0x000f28000c1e1100 */
/*0080*/ LDG.E.U8 R13, [R4.64+0x3] ; /* 0x00000304040d7981 */
/* 0x000f62000c1e1100 */
/*0090*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0003 */
/*00a0*/ STG.E.U8 [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x004fe8000c101104 */
/*00b0*/ STG.E.U8 [R2.64+0x1], R9 ; /* 0x0000010902007986 */
/* 0x008fe8000c101104 */
/*00c0*/ STG.E.U8 [R2.64+0x2], R11 ; /* 0x0000020b02007986 */
/* 0x010fe8000c101104 */
/*00d0*/ STG.E.U8 [R2.64+0x3], R13 ; /* 0x0000030d02007986 */
/* 0x020fe2000c101104 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
//pass
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is race-free.
//
//The memcpy is between different src and dst types so we have to handle the
//arrays in and out at the byte-level.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
char y;
} s1_t; //< sizeof(s1_t) == 4
typedef struct {
short x;
short y;
} s2_t; //< sizeof(s2_t) == 4
__global__ void k(s1_t *in, s2_t *out) {
size_t len = 4;
memcpy(&out[threadIdx.x], &in[threadIdx.x], len);
}
|
.file "tmpxft_0013f60f_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z1kP4s1_tP4s2_tP4s1_tP4s2_t
.type _Z30__device_stub__Z1kP4s1_tP4s2_tP4s1_tP4s2_t, @function
_Z30__device_stub__Z1kP4s1_tP4s2_tP4s1_tP4s2_t:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z1kP4s1_tP4s2_t(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z30__device_stub__Z1kP4s1_tP4s2_tP4s1_tP4s2_t, .-_Z30__device_stub__Z1kP4s1_tP4s2_tP4s1_tP4s2_t
.globl _Z1kP4s1_tP4s2_t
.type _Z1kP4s1_tP4s2_t, @function
_Z1kP4s1_tP4s2_t:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z1kP4s1_tP4s2_tP4s1_tP4s2_t
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z1kP4s1_tP4s2_t, .-_Z1kP4s1_tP4s2_t
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z1kP4s1_tP4s2_t"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z1kP4s1_tP4s2_t(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
//pass
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is race-free.
//
//The memcpy is between different src and dst types so we have to handle the
//arrays in and out at the byte-level.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
char y;
} s1_t; //< sizeof(s1_t) == 4
typedef struct {
short x;
short y;
} s2_t; //< sizeof(s2_t) == 4
__global__ void k(s1_t *in, s2_t *out) {
size_t len = 4;
memcpy(&out[threadIdx.x], &in[threadIdx.x], len);
}
|
#include <hip/hip_runtime.h>
//pass
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is race-free.
//
//The memcpy is between different src and dst types so we have to handle the
//arrays in and out at the byte-level.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
char y;
} s1_t; //< sizeof(s1_t) == 4
typedef struct {
short x;
short y;
} s2_t; //< sizeof(s2_t) == 4
__global__ void k(s1_t *in, s2_t *out) {
size_t len = 4;
memcpy(&out[threadIdx.x], &in[threadIdx.x], len);
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
//pass
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is race-free.
//
//The memcpy is between different src and dst types so we have to handle the
//arrays in and out at the byte-level.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
char y;
} s1_t; //< sizeof(s1_t) == 4
typedef struct {
short x;
short y;
} s2_t; //< sizeof(s2_t) == 4
__global__ void k(s1_t *in, s2_t *out) {
size_t len = 4;
memcpy(&out[threadIdx.x], &in[threadIdx.x], len);
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z1kP4s1_tP4s2_t
.globl _Z1kP4s1_tP4s2_t
.p2align 8
.type _Z1kP4s1_tP4s2_t,@function
_Z1kP4s1_tP4s2_t:
s_load_b128 s[0:3], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v0, s[0:1]
s_waitcnt vmcnt(0)
global_store_b32 v0, v1, s[2:3]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z1kP4s1_tP4s2_t
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z1kP4s1_tP4s2_t, .Lfunc_end0-_Z1kP4s1_tP4s2_t
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z1kP4s1_tP4s2_t
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z1kP4s1_tP4s2_t.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
//pass
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is race-free.
//
//The memcpy is between different src and dst types so we have to handle the
//arrays in and out at the byte-level.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
char y;
} s1_t; //< sizeof(s1_t) == 4
typedef struct {
short x;
short y;
} s2_t; //< sizeof(s2_t) == 4
__global__ void k(s1_t *in, s2_t *out) {
size_t len = 4;
memcpy(&out[threadIdx.x], &in[threadIdx.x], len);
}
|
.text
.file "kernel.hip"
.globl _Z16__device_stub__kP4s1_tP4s2_t # -- Begin function _Z16__device_stub__kP4s1_tP4s2_t
.p2align 4, 0x90
.type _Z16__device_stub__kP4s1_tP4s2_t,@function
_Z16__device_stub__kP4s1_tP4s2_t: # @_Z16__device_stub__kP4s1_tP4s2_t
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z1kP4s1_tP4s2_t, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z16__device_stub__kP4s1_tP4s2_t, .Lfunc_end0-_Z16__device_stub__kP4s1_tP4s2_t
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z1kP4s1_tP4s2_t, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z1kP4s1_tP4s2_t,@object # @_Z1kP4s1_tP4s2_t
.section .rodata,"a",@progbits
.globl _Z1kP4s1_tP4s2_t
.p2align 3, 0x0
_Z1kP4s1_tP4s2_t:
.quad _Z16__device_stub__kP4s1_tP4s2_t
.size _Z1kP4s1_tP4s2_t, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z1kP4s1_tP4s2_t"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z16__device_stub__kP4s1_tP4s2_t
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z1kP4s1_tP4s2_t
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z1kP4s1_tP4s2_t
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R4, R2, R3, c[0x0][0x160] ; /* 0x0000580002047625 */
/* 0x001fca00078e0003 */
/*0050*/ LDG.E.U8 R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea8000c1e1100 */
/*0060*/ LDG.E.U8 R9, [R4.64+0x1] ; /* 0x0000010404097981 */
/* 0x000ee8000c1e1100 */
/*0070*/ LDG.E.U8 R11, [R4.64+0x2] ; /* 0x00000204040b7981 */
/* 0x000f28000c1e1100 */
/*0080*/ LDG.E.U8 R13, [R4.64+0x3] ; /* 0x00000304040d7981 */
/* 0x000f62000c1e1100 */
/*0090*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0003 */
/*00a0*/ STG.E.U8 [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x004fe8000c101104 */
/*00b0*/ STG.E.U8 [R2.64+0x1], R9 ; /* 0x0000010902007986 */
/* 0x008fe8000c101104 */
/*00c0*/ STG.E.U8 [R2.64+0x2], R11 ; /* 0x0000020b02007986 */
/* 0x010fe8000c101104 */
/*00d0*/ STG.E.U8 [R2.64+0x3], R13 ; /* 0x0000030d02007986 */
/* 0x020fe2000c101104 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z1kP4s1_tP4s2_t
.globl _Z1kP4s1_tP4s2_t
.p2align 8
.type _Z1kP4s1_tP4s2_t,@function
_Z1kP4s1_tP4s2_t:
s_load_b128 s[0:3], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v0, s[0:1]
s_waitcnt vmcnt(0)
global_store_b32 v0, v1, s[2:3]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z1kP4s1_tP4s2_t
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z1kP4s1_tP4s2_t, .Lfunc_end0-_Z1kP4s1_tP4s2_t
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z1kP4s1_tP4s2_t
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z1kP4s1_tP4s2_t.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_0013f60f_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z1kP4s1_tP4s2_tP4s1_tP4s2_t
.type _Z30__device_stub__Z1kP4s1_tP4s2_tP4s1_tP4s2_t, @function
_Z30__device_stub__Z1kP4s1_tP4s2_tP4s1_tP4s2_t:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z1kP4s1_tP4s2_t(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z30__device_stub__Z1kP4s1_tP4s2_tP4s1_tP4s2_t, .-_Z30__device_stub__Z1kP4s1_tP4s2_tP4s1_tP4s2_t
.globl _Z1kP4s1_tP4s2_t
.type _Z1kP4s1_tP4s2_t, @function
_Z1kP4s1_tP4s2_t:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z1kP4s1_tP4s2_tP4s1_tP4s2_t
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z1kP4s1_tP4s2_t, .-_Z1kP4s1_tP4s2_t
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z1kP4s1_tP4s2_t"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z1kP4s1_tP4s2_t(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "kernel.hip"
.globl _Z16__device_stub__kP4s1_tP4s2_t # -- Begin function _Z16__device_stub__kP4s1_tP4s2_t
.p2align 4, 0x90
.type _Z16__device_stub__kP4s1_tP4s2_t,@function
_Z16__device_stub__kP4s1_tP4s2_t: # @_Z16__device_stub__kP4s1_tP4s2_t
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z1kP4s1_tP4s2_t, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z16__device_stub__kP4s1_tP4s2_t, .Lfunc_end0-_Z16__device_stub__kP4s1_tP4s2_t
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z1kP4s1_tP4s2_t, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z1kP4s1_tP4s2_t,@object # @_Z1kP4s1_tP4s2_t
.section .rodata,"a",@progbits
.globl _Z1kP4s1_tP4s2_t
.p2align 3, 0x0
_Z1kP4s1_tP4s2_t:
.quad _Z16__device_stub__kP4s1_tP4s2_t
.size _Z1kP4s1_tP4s2_t, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z1kP4s1_tP4s2_t"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z16__device_stub__kP4s1_tP4s2_t
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z1kP4s1_tP4s2_t
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#define BDIMX 32
#define BDIMY 16
dim3 block(BDIMX,BDIMY);
dim3 grid(1,1);
// Write global thread indices to a 2D shared memory array
// Read the values from shared memory and store them to global memory
__global__ void setRowReadRow(int *out) {
__shared__ int tile[BDIMY][BDIMX];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// store in shared memory
tile[threadIdx.y][threadIdx.x] = thread_id;
// Wait for all threads
__syncthreads();
out[thread_id] = tile[threadIdx.y][threadIdx.x];
}
__global__ void setColReadCol(int *out) {
__shared__ int tile[BDIMX][BDIMY];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// store in shared memory
tile[threadIdx.x][threadIdx.y] = thread_id;
// Wait for all threads
__syncthreads();
out[thread_id] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadCol(int *out) {
extern __shared__ int tile[];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// row and col is the transpose of (threadIdx.x,threadIdx.y)
unsigned int col = thread_id / blockDim.y;
unsigned int row = thread_id % blockDim.y;
tile[thread_id] = thread_id;
__syncthreads();
unsigned int rowcol = row * blockDim.x + col;
out[thread_id] = tile[rowcol];
}
int main() {
int *d_C;
cudaMalloc(&d_C, BDIMX*BDIMY*sizeof(int));
cudaMemset(d_C, 0, BDIMX*BDIMY*sizeof(int));
setRowReadCol<<<grid, block, BDIMX * BDIMY * sizeof(int)>>>(d_C);
}
|
code for sm_80
Function : _Z13setRowReadColPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ I2F.U32.RP R4, c[0x0][0x4] ; /* 0x0000010000047b06 */
/* 0x000e220000209000 */
/*0020*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e620000002600 */
/*0030*/ ISETP.NE.U32.AND P2, PT, RZ, c[0x0][0x4], PT ; /* 0x00000100ff007a0c */
/* 0x000fe20003f45070 */
/*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0050*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e680000002500 */
/*0060*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */
/* 0x000ea80000002200 */
/*0070*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000ee20000002100 */
/*0080*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x001e220000001000 */
/*0090*/ IMAD R0, R0, c[0x0][0xc], R5 ; /* 0x0000030000007a24 */
/* 0x002fe200078e0205 */
/*00a0*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */
/* 0x001fc60007ffe0ff */
/*00b0*/ IMAD R0, R0, c[0x0][0x4], R7 ; /* 0x0000010000007a24 */
/* 0x004fc600078e0207 */
/*00c0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000062000021f000 */
/*00d0*/ IMAD R0, R0, c[0x0][0x0], R9 ; /* 0x0000000000007a24 */
/* 0x008fca00078e0209 */
/*00e0*/ STS [R0.X4], R0 ; /* 0x0000000000007388 */
/* 0x000fe20000004800 */
/*00f0*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x001fe200000001ff */
/*0100*/ IADD3 R6, RZ, -R3, RZ ; /* 0x80000003ff067210 */
/* 0x002fca0007ffe0ff */
/*0110*/ IMAD R5, R6, c[0x0][0x4], RZ ; /* 0x0000010006057a24 */
/* 0x000fc800078e02ff */
/*0120*/ IMAD.HI.U32 R3, R3, R5, R2 ; /* 0x0000000503037227 */
/* 0x000fcc00078e0002 */
/*0130*/ IMAD.HI.U32 R3, R3, R0, RZ ; /* 0x0000000003037227 */
/* 0x000fc800078e00ff */
/*0140*/ IMAD.MOV R5, RZ, RZ, -R3 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a03 */
/*0150*/ IMAD R2, R5, c[0x0][0x4], R0 ; /* 0x0000010005027a24 */
/* 0x000fe200078e0200 */
/*0160*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe80000010000 */
/*0170*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x4], PT ; /* 0x0000010002007a0c */
/* 0x000fda0003f06070 */
/*0180*/ @P0 IADD3 R2, R2, -c[0x0][0x4], RZ ; /* 0x8000010002020a10 */
/* 0x000fe40007ffe0ff */
/*0190*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fe40007ffe0ff */
/*01a0*/ ISETP.GE.U32.AND P1, PT, R2, c[0x0][0x4], PT ; /* 0x0000010002007a0c */
/* 0x000fda0003f26070 */
/*01b0*/ @P1 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103031810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x4], RZ, 0x33, !PT ; /* 0x00000100ff03aa12 */
/* 0x000fc800078e33ff */
/*01d0*/ IADD3 R5, -R3, RZ, RZ ; /* 0x000000ff03057210 */
/* 0x000fca0007ffe1ff */
/*01e0*/ IMAD R2, R5, c[0x0][0x4], R0 ; /* 0x0000010005027a24 */
/* 0x000fc800078e0200 */
/*01f0*/ IMAD R4, R2, c[0x0][0x0], R3 ; /* 0x0000000002047a24 */
/* 0x000fe400078e0203 */
/*0200*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc600078e00ff */
/*0210*/ LDS R5, [R4.X4] ; /* 0x0000000004057984 */
/* 0x000e220000004800 */
/*0220*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0003 */
/*0230*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0240*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0250*/ BRA 0x250; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13setColReadColPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e680000002200 */
/*0050*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000ea20000002100 */
/*0060*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fe200078e0203 */
/*0070*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fc600000001ff */
/*0080*/ IMAD R0, R0, c[0x0][0x4], R5 ; /* 0x0000010000007a24 */
/* 0x002fe200078e0205 */
/*0090*/ LEA R7, R2, R5, 0x4 ; /* 0x0000000502077211 */
/* 0x004fc600078e20ff */
/*00a0*/ IMAD R0, R0, c[0x0][0x0], R2 ; /* 0x0000000000007a24 */
/* 0x000fc800078e0202 */
/*00b0*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e0003 */
/*00c0*/ STS [R7.X4], R0 ; /* 0x0000000007007388 */
/* 0x000fe80000004800 */
/*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00e0*/ LDS R5, [R7.X4] ; /* 0x0000000007057984 */
/* 0x000e280000004800 */
/*00f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13setRowReadRowPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e680000002200 */
/*0050*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000ea20000002100 */
/*0060*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fe200078e0203 */
/*0070*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fc600000001ff */
/*0080*/ IMAD R0, R0, c[0x0][0x4], R5 ; /* 0x0000010000007a24 */
/* 0x002fe200078e0205 */
/*0090*/ LEA R7, R5, R2, 0x5 ; /* 0x0000000205077211 */
/* 0x004fc600078e28ff */
/*00a0*/ IMAD R0, R0, c[0x0][0x0], R2 ; /* 0x0000000000007a24 */
/* 0x000fc800078e0202 */
/*00b0*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e0003 */
/*00c0*/ STS [R7.X4], R0 ; /* 0x0000000007007388 */
/* 0x000fe80000004800 */
/*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00e0*/ LDS R5, [R7.X4] ; /* 0x0000000007057984 */
/* 0x000e280000004800 */
/*00f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#define BDIMX 32
#define BDIMY 16
dim3 block(BDIMX,BDIMY);
dim3 grid(1,1);
// Write global thread indices to a 2D shared memory array
// Read the values from shared memory and store them to global memory
__global__ void setRowReadRow(int *out) {
__shared__ int tile[BDIMY][BDIMX];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// store in shared memory
tile[threadIdx.y][threadIdx.x] = thread_id;
// Wait for all threads
__syncthreads();
out[thread_id] = tile[threadIdx.y][threadIdx.x];
}
__global__ void setColReadCol(int *out) {
__shared__ int tile[BDIMX][BDIMY];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// store in shared memory
tile[threadIdx.x][threadIdx.y] = thread_id;
// Wait for all threads
__syncthreads();
out[thread_id] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadCol(int *out) {
extern __shared__ int tile[];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// row and col is the transpose of (threadIdx.x,threadIdx.y)
unsigned int col = thread_id / blockDim.y;
unsigned int row = thread_id % blockDim.y;
tile[thread_id] = thread_id;
__syncthreads();
unsigned int rowcol = row * blockDim.x + col;
out[thread_id] = tile[rowcol];
}
int main() {
int *d_C;
cudaMalloc(&d_C, BDIMX*BDIMY*sizeof(int));
cudaMemset(d_C, 0, BDIMX*BDIMY*sizeof(int));
setRowReadCol<<<grid, block, BDIMX * BDIMY * sizeof(int)>>>(d_C);
}
|
.file "tmpxft_000bc269_00000000-6_main.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z13setRowReadRowPiPi
.type _Z33__device_stub__Z13setRowReadRowPiPi, @function
_Z33__device_stub__Z13setRowReadRowPiPi:
.LFB2052:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13setRowReadRowPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z33__device_stub__Z13setRowReadRowPiPi, .-_Z33__device_stub__Z13setRowReadRowPiPi
.globl _Z13setRowReadRowPi
.type _Z13setRowReadRowPi, @function
_Z13setRowReadRowPi:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z13setRowReadRowPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z13setRowReadRowPi, .-_Z13setRowReadRowPi
.globl _Z33__device_stub__Z13setColReadColPiPi
.type _Z33__device_stub__Z13setColReadColPiPi, @function
_Z33__device_stub__Z13setColReadColPiPi:
.LFB2054:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13setColReadColPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2054:
.size _Z33__device_stub__Z13setColReadColPiPi, .-_Z33__device_stub__Z13setColReadColPiPi
.globl _Z13setColReadColPi
.type _Z13setColReadColPi, @function
_Z13setColReadColPi:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z13setColReadColPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _Z13setColReadColPi, .-_Z13setColReadColPi
.globl _Z33__device_stub__Z13setRowReadColPiPi
.type _Z33__device_stub__Z13setRowReadColPiPi, @function
_Z33__device_stub__Z13setRowReadColPiPi:
.LFB2056:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13setRowReadColPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2056:
.size _Z33__device_stub__Z13setRowReadColPiPi, .-_Z33__device_stub__Z13setRowReadColPiPi
.globl _Z13setRowReadColPi
.type _Z13setRowReadColPi, @function
_Z13setRowReadColPi:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z13setRowReadColPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z13setRowReadColPi, .-_Z13setRowReadColPi
.globl main
.type main, @function
main:
.LFB2027:
.cfi_startproc
endbr64
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $2048, %esi
call cudaMalloc@PLT
movl $2048, %edx
movl $0, %esi
movq (%rsp), %rdi
call cudaMemset@PLT
movl 8+block(%rip), %ecx
movl $0, %r9d
movl $2048, %r8d
movq block(%rip), %rdx
movq grid(%rip), %rdi
movl 8+grid(%rip), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L31
.L28:
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L32
movl $0, %eax
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
movq (%rsp), %rdi
call _Z33__device_stub__Z13setRowReadColPiPi
jmp .L28
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13setRowReadColPi"
.LC1:
.string "_Z13setColReadColPi"
.LC2:
.string "_Z13setRowReadRowPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2059:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13setRowReadColPi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z13setColReadColPi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z13setRowReadRowPi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl grid
.data
.align 8
.type grid, @object
.size grid, 12
grid:
.long 1
.long 1
.long 1
.globl block
.align 8
.type block, @object
.size block, 12
block:
.long 32
.long 16
.long 1
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#define BDIMX 32
#define BDIMY 16
dim3 block(BDIMX,BDIMY);
dim3 grid(1,1);
// Write global thread indices to a 2D shared memory array
// Read the values from shared memory and store them to global memory
__global__ void setRowReadRow(int *out) {
__shared__ int tile[BDIMY][BDIMX];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// store in shared memory
tile[threadIdx.y][threadIdx.x] = thread_id;
// Wait for all threads
__syncthreads();
out[thread_id] = tile[threadIdx.y][threadIdx.x];
}
__global__ void setColReadCol(int *out) {
__shared__ int tile[BDIMX][BDIMY];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// store in shared memory
tile[threadIdx.x][threadIdx.y] = thread_id;
// Wait for all threads
__syncthreads();
out[thread_id] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadCol(int *out) {
extern __shared__ int tile[];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// row and col is the transpose of (threadIdx.x,threadIdx.y)
unsigned int col = thread_id / blockDim.y;
unsigned int row = thread_id % blockDim.y;
tile[thread_id] = thread_id;
__syncthreads();
unsigned int rowcol = row * blockDim.x + col;
out[thread_id] = tile[rowcol];
}
int main() {
int *d_C;
cudaMalloc(&d_C, BDIMX*BDIMY*sizeof(int));
cudaMemset(d_C, 0, BDIMX*BDIMY*sizeof(int));
setRowReadCol<<<grid, block, BDIMX * BDIMY * sizeof(int)>>>(d_C);
}
|
#include <hip/hip_runtime.h>
#define BDIMX 32
#define BDIMY 16
dim3 block(BDIMX,BDIMY);
dim3 grid(1,1);
// Write global thread indices to a 2D shared memory array
// Read the values from shared memory and store them to global memory
__global__ void setRowReadRow(int *out) {
__shared__ int tile[BDIMY][BDIMX];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// store in shared memory
tile[threadIdx.y][threadIdx.x] = thread_id;
// Wait for all threads
__syncthreads();
out[thread_id] = tile[threadIdx.y][threadIdx.x];
}
__global__ void setColReadCol(int *out) {
__shared__ int tile[BDIMX][BDIMY];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// store in shared memory
tile[threadIdx.x][threadIdx.y] = thread_id;
// Wait for all threads
__syncthreads();
out[thread_id] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadCol(int *out) {
extern __shared__ int tile[];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// row and col is the transpose of (threadIdx.x,threadIdx.y)
unsigned int col = thread_id / blockDim.y;
unsigned int row = thread_id % blockDim.y;
tile[thread_id] = thread_id;
__syncthreads();
unsigned int rowcol = row * blockDim.x + col;
out[thread_id] = tile[rowcol];
}
int main() {
int *d_C;
hipMalloc(&d_C, BDIMX*BDIMY*sizeof(int));
hipMemset(d_C, 0, BDIMX*BDIMY*sizeof(int));
setRowReadCol<<<grid, block, BDIMX * BDIMY * sizeof(int)>>>(d_C);
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#define BDIMX 32
#define BDIMY 16
dim3 block(BDIMX,BDIMY);
dim3 grid(1,1);
// Write global thread indices to a 2D shared memory array
// Read the values from shared memory and store them to global memory
__global__ void setRowReadRow(int *out) {
__shared__ int tile[BDIMY][BDIMX];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// store in shared memory
tile[threadIdx.y][threadIdx.x] = thread_id;
// Wait for all threads
__syncthreads();
out[thread_id] = tile[threadIdx.y][threadIdx.x];
}
__global__ void setColReadCol(int *out) {
__shared__ int tile[BDIMX][BDIMY];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// store in shared memory
tile[threadIdx.x][threadIdx.y] = thread_id;
// Wait for all threads
__syncthreads();
out[thread_id] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadCol(int *out) {
extern __shared__ int tile[];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// row and col is the transpose of (threadIdx.x,threadIdx.y)
unsigned int col = thread_id / blockDim.y;
unsigned int row = thread_id % blockDim.y;
tile[thread_id] = thread_id;
__syncthreads();
unsigned int rowcol = row * blockDim.x + col;
out[thread_id] = tile[rowcol];
}
int main() {
int *d_C;
hipMalloc(&d_C, BDIMX*BDIMY*sizeof(int));
hipMemset(d_C, 0, BDIMX*BDIMY*sizeof(int));
setRowReadCol<<<grid, block, BDIMX * BDIMY * sizeof(int)>>>(d_C);
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13setRowReadRowPi
.globl _Z13setRowReadRowPi
.p2align 8
.type _Z13setRowReadRowPi,@function
_Z13setRowReadRowPi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x8
s_load_b32 s3, s[0:1], 0x14
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_load_b64 s[0:1], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
v_lshlrev_b32_e32 v5, 2, v0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s15
s_lshr_b32 s4, s3, 16
s_add_i32 s2, s2, s14
v_mad_u64_u32 v[2:3], null, s2, s4, v[1:2]
s_and_b32 s2, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[3:4], null, v2, s2, v[0:1]
v_mov_b32_e32 v4, 0
v_lshl_add_u32 v0, v1, 7, v5
ds_store_b32 v0, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v2, v0
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13setRowReadRowPi
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13setRowReadRowPi, .Lfunc_end0-_Z13setRowReadRowPi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13setColReadColPi
.globl _Z13setColReadColPi
.p2align 8
.type _Z13setColReadColPi,@function
_Z13setColReadColPi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x8
s_load_b32 s3, s[0:1], 0x14
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s15
s_lshr_b32 s4, s3, 16
s_add_i32 s2, s2, s14
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s2, s4, v[1:2]
v_lshlrev_b32_e32 v1, 2, v1
s_and_b32 s2, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[3:4], null, v2, s2, v[0:1]
v_mov_b32_e32 v4, 0
v_lshl_add_u32 v0, v0, 6, v1
ds_store_b32 v0, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v2, v0
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13setColReadColPi
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z13setColReadColPi, .Lfunc_end1-_Z13setColReadColPi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13setRowReadColPi
.globl _Z13setRowReadColPi
.p2align 8
.type _Z13setRowReadColPi,@function
_Z13setRowReadColPi:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x14
s_load_b32 s4, s[0:1], 0x8
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s2, 16
s_mul_i32 s4, s4, s15
v_cvt_f32_u32_e32 v1, s3
s_sub_i32 s5, 0, s3
s_add_i32 s4, s4, s14
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
v_cvt_u32_f32_e32 v5, v1
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mul_lo_u32 v4, s5, v5
v_mad_u64_u32 v[2:3], null, s4, s3, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v1, v5, v4
v_mad_u64_u32 v[3:4], null, v2, s2, v[0:1]
v_add_nc_u32_e32 v0, v5, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v0, v3, v0
v_add_nc_u32_e32 v2, 1, v0
v_mul_lo_u32 v1, v0, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v1, v3, v1
v_subrev_nc_u32_e32 v4, s3, v1
v_cmp_le_u32_e32 vcc_lo, s3, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v0, v0, v2 :: v_dual_cndmask_b32 v1, v1, v4
v_add_nc_u32_e32 v2, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_le_u32_e32 vcc_lo, s3, v1
v_cndmask_b32_e32 v0, v0, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, v0, s3
v_sub_nc_u32_e32 v4, v3, v1
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, v4, s2, v[0:1]
v_lshl_add_u32 v0, v3, 2, 0
v_mov_b32_e32 v4, 0
ds_store_b32 v0, v3
v_lshl_add_u32 v1, v1, 2, 0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v2, v1
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13setRowReadColPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z13setRowReadColPi, .Lfunc_end2-_Z13setRowReadColPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13setRowReadRowPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13setRowReadRowPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13setColReadColPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13setColReadColPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
- .offset: 128
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13setRowReadColPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13setRowReadColPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#define BDIMX 32
#define BDIMY 16
dim3 block(BDIMX,BDIMY);
dim3 grid(1,1);
// Write global thread indices to a 2D shared memory array
// Read the values from shared memory and store them to global memory
__global__ void setRowReadRow(int *out) {
__shared__ int tile[BDIMY][BDIMX];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// store in shared memory
tile[threadIdx.y][threadIdx.x] = thread_id;
// Wait for all threads
__syncthreads();
out[thread_id] = tile[threadIdx.y][threadIdx.x];
}
__global__ void setColReadCol(int *out) {
__shared__ int tile[BDIMX][BDIMY];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// store in shared memory
tile[threadIdx.x][threadIdx.y] = thread_id;
// Wait for all threads
__syncthreads();
out[thread_id] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadCol(int *out) {
extern __shared__ int tile[];
unsigned int block_id = (gridDim.x * blockIdx.y) + blockIdx.x;
unsigned int thread_id = (block_id * blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x + threadIdx.x);
// row and col is the transpose of (threadIdx.x,threadIdx.y)
unsigned int col = thread_id / blockDim.y;
unsigned int row = thread_id % blockDim.y;
tile[thread_id] = thread_id;
__syncthreads();
unsigned int rowcol = row * blockDim.x + col;
out[thread_id] = tile[rowcol];
}
int main() {
int *d_C;
hipMalloc(&d_C, BDIMX*BDIMY*sizeof(int));
hipMemset(d_C, 0, BDIMX*BDIMY*sizeof(int));
setRowReadCol<<<grid, block, BDIMX * BDIMY * sizeof(int)>>>(d_C);
}
|
.text
.file "main.hip"
.globl _Z28__device_stub__setRowReadRowPi # -- Begin function _Z28__device_stub__setRowReadRowPi
.p2align 4, 0x90
.type _Z28__device_stub__setRowReadRowPi,@function
_Z28__device_stub__setRowReadRowPi: # @_Z28__device_stub__setRowReadRowPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z13setRowReadRowPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z28__device_stub__setRowReadRowPi, .Lfunc_end0-_Z28__device_stub__setRowReadRowPi
.cfi_endproc
# -- End function
.globl _Z28__device_stub__setColReadColPi # -- Begin function _Z28__device_stub__setColReadColPi
.p2align 4, 0x90
.type _Z28__device_stub__setColReadColPi,@function
_Z28__device_stub__setColReadColPi: # @_Z28__device_stub__setColReadColPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z13setColReadColPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end1:
.size _Z28__device_stub__setColReadColPi, .Lfunc_end1-_Z28__device_stub__setColReadColPi
.cfi_endproc
# -- End function
.globl _Z28__device_stub__setRowReadColPi # -- Begin function _Z28__device_stub__setRowReadColPi
.p2align 4, 0x90
.type _Z28__device_stub__setRowReadColPi,@function
_Z28__device_stub__setRowReadColPi: # @_Z28__device_stub__setRowReadColPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z13setRowReadColPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end2:
.size _Z28__device_stub__setRowReadColPi, .Lfunc_end2-_Z28__device_stub__setRowReadColPi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
leaq 8(%rsp), %rdi
movl $2048, %esi # imm = 0x800
callq hipMalloc
movq 8(%rsp), %rdi
movl $2048, %edx # imm = 0x800
xorl %esi, %esi
callq hipMemset
movq grid(%rip), %rdi
movl grid+8(%rip), %esi
movq block(%rip), %rdx
movl block+8(%rip), %ecx
movl $2048, %r8d # imm = 0x800
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 16(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z13setRowReadColPi, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
xorl %eax, %eax
addq $88, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13setRowReadRowPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13setColReadColPi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13setRowReadColPi, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type block,@object # @block
.data
.globl block
.p2align 3, 0x0
block:
.long 32 # 0x20
.long 16 # 0x10
.long 1 # 0x1
.size block, 12
.type grid,@object # @grid
.globl grid
.p2align 3, 0x0
grid:
.long 1 # 0x1
.long 1 # 0x1
.long 1 # 0x1
.size grid, 12
.type _Z13setRowReadRowPi,@object # @_Z13setRowReadRowPi
.section .rodata,"a",@progbits
.globl _Z13setRowReadRowPi
.p2align 3, 0x0
_Z13setRowReadRowPi:
.quad _Z28__device_stub__setRowReadRowPi
.size _Z13setRowReadRowPi, 8
.type _Z13setColReadColPi,@object # @_Z13setColReadColPi
.globl _Z13setColReadColPi
.p2align 3, 0x0
_Z13setColReadColPi:
.quad _Z28__device_stub__setColReadColPi
.size _Z13setColReadColPi, 8
.type _Z13setRowReadColPi,@object # @_Z13setRowReadColPi
.globl _Z13setRowReadColPi
.p2align 3, 0x0
_Z13setRowReadColPi:
.quad _Z28__device_stub__setRowReadColPi
.size _Z13setRowReadColPi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13setRowReadRowPi"
.size .L__unnamed_1, 20
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z13setColReadColPi"
.size .L__unnamed_2, 20
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z13setRowReadColPi"
.size .L__unnamed_3, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__setRowReadRowPi
.addrsig_sym _Z28__device_stub__setColReadColPi
.addrsig_sym _Z28__device_stub__setRowReadColPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13setRowReadRowPi
.addrsig_sym _Z13setColReadColPi
.addrsig_sym _Z13setRowReadColPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z13setRowReadColPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ I2F.U32.RP R4, c[0x0][0x4] ; /* 0x0000010000047b06 */
/* 0x000e220000209000 */
/*0020*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e620000002600 */
/*0030*/ ISETP.NE.U32.AND P2, PT, RZ, c[0x0][0x4], PT ; /* 0x00000100ff007a0c */
/* 0x000fe20003f45070 */
/*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0050*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e680000002500 */
/*0060*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */
/* 0x000ea80000002200 */
/*0070*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000ee20000002100 */
/*0080*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x001e220000001000 */
/*0090*/ IMAD R0, R0, c[0x0][0xc], R5 ; /* 0x0000030000007a24 */
/* 0x002fe200078e0205 */
/*00a0*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */
/* 0x001fc60007ffe0ff */
/*00b0*/ IMAD R0, R0, c[0x0][0x4], R7 ; /* 0x0000010000007a24 */
/* 0x004fc600078e0207 */
/*00c0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000062000021f000 */
/*00d0*/ IMAD R0, R0, c[0x0][0x0], R9 ; /* 0x0000000000007a24 */
/* 0x008fca00078e0209 */
/*00e0*/ STS [R0.X4], R0 ; /* 0x0000000000007388 */
/* 0x000fe20000004800 */
/*00f0*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x001fe200000001ff */
/*0100*/ IADD3 R6, RZ, -R3, RZ ; /* 0x80000003ff067210 */
/* 0x002fca0007ffe0ff */
/*0110*/ IMAD R5, R6, c[0x0][0x4], RZ ; /* 0x0000010006057a24 */
/* 0x000fc800078e02ff */
/*0120*/ IMAD.HI.U32 R3, R3, R5, R2 ; /* 0x0000000503037227 */
/* 0x000fcc00078e0002 */
/*0130*/ IMAD.HI.U32 R3, R3, R0, RZ ; /* 0x0000000003037227 */
/* 0x000fc800078e00ff */
/*0140*/ IMAD.MOV R5, RZ, RZ, -R3 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a03 */
/*0150*/ IMAD R2, R5, c[0x0][0x4], R0 ; /* 0x0000010005027a24 */
/* 0x000fe200078e0200 */
/*0160*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe80000010000 */
/*0170*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x4], PT ; /* 0x0000010002007a0c */
/* 0x000fda0003f06070 */
/*0180*/ @P0 IADD3 R2, R2, -c[0x0][0x4], RZ ; /* 0x8000010002020a10 */
/* 0x000fe40007ffe0ff */
/*0190*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fe40007ffe0ff */
/*01a0*/ ISETP.GE.U32.AND P1, PT, R2, c[0x0][0x4], PT ; /* 0x0000010002007a0c */
/* 0x000fda0003f26070 */
/*01b0*/ @P1 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103031810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x4], RZ, 0x33, !PT ; /* 0x00000100ff03aa12 */
/* 0x000fc800078e33ff */
/*01d0*/ IADD3 R5, -R3, RZ, RZ ; /* 0x000000ff03057210 */
/* 0x000fca0007ffe1ff */
/*01e0*/ IMAD R2, R5, c[0x0][0x4], R0 ; /* 0x0000010005027a24 */
/* 0x000fc800078e0200 */
/*01f0*/ IMAD R4, R2, c[0x0][0x0], R3 ; /* 0x0000000002047a24 */
/* 0x000fe400078e0203 */
/*0200*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc600078e00ff */
/*0210*/ LDS R5, [R4.X4] ; /* 0x0000000004057984 */
/* 0x000e220000004800 */
/*0220*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0003 */
/*0230*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0240*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0250*/ BRA 0x250; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13setColReadColPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e680000002200 */
/*0050*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000ea20000002100 */
/*0060*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fe200078e0203 */
/*0070*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fc600000001ff */
/*0080*/ IMAD R0, R0, c[0x0][0x4], R5 ; /* 0x0000010000007a24 */
/* 0x002fe200078e0205 */
/*0090*/ LEA R7, R2, R5, 0x4 ; /* 0x0000000502077211 */
/* 0x004fc600078e20ff */
/*00a0*/ IMAD R0, R0, c[0x0][0x0], R2 ; /* 0x0000000000007a24 */
/* 0x000fc800078e0202 */
/*00b0*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e0003 */
/*00c0*/ STS [R7.X4], R0 ; /* 0x0000000007007388 */
/* 0x000fe80000004800 */
/*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00e0*/ LDS R5, [R7.X4] ; /* 0x0000000007057984 */
/* 0x000e280000004800 */
/*00f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13setRowReadRowPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e680000002200 */
/*0050*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000ea20000002100 */
/*0060*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fe200078e0203 */
/*0070*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fc600000001ff */
/*0080*/ IMAD R0, R0, c[0x0][0x4], R5 ; /* 0x0000010000007a24 */
/* 0x002fe200078e0205 */
/*0090*/ LEA R7, R5, R2, 0x5 ; /* 0x0000000205077211 */
/* 0x004fc600078e28ff */
/*00a0*/ IMAD R0, R0, c[0x0][0x0], R2 ; /* 0x0000000000007a24 */
/* 0x000fc800078e0202 */
/*00b0*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e0003 */
/*00c0*/ STS [R7.X4], R0 ; /* 0x0000000007007388 */
/* 0x000fe80000004800 */
/*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00e0*/ LDS R5, [R7.X4] ; /* 0x0000000007057984 */
/* 0x000e280000004800 */
/*00f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13setRowReadRowPi
.globl _Z13setRowReadRowPi
.p2align 8
.type _Z13setRowReadRowPi,@function
_Z13setRowReadRowPi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x8
s_load_b32 s3, s[0:1], 0x14
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_load_b64 s[0:1], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
v_lshlrev_b32_e32 v5, 2, v0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s15
s_lshr_b32 s4, s3, 16
s_add_i32 s2, s2, s14
v_mad_u64_u32 v[2:3], null, s2, s4, v[1:2]
s_and_b32 s2, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[3:4], null, v2, s2, v[0:1]
v_mov_b32_e32 v4, 0
v_lshl_add_u32 v0, v1, 7, v5
ds_store_b32 v0, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v2, v0
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13setRowReadRowPi
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13setRowReadRowPi, .Lfunc_end0-_Z13setRowReadRowPi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13setColReadColPi
.globl _Z13setColReadColPi
.p2align 8
.type _Z13setColReadColPi,@function
_Z13setColReadColPi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x8
s_load_b32 s3, s[0:1], 0x14
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s15
s_lshr_b32 s4, s3, 16
s_add_i32 s2, s2, s14
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s2, s4, v[1:2]
v_lshlrev_b32_e32 v1, 2, v1
s_and_b32 s2, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[3:4], null, v2, s2, v[0:1]
v_mov_b32_e32 v4, 0
v_lshl_add_u32 v0, v0, 6, v1
ds_store_b32 v0, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v2, v0
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13setColReadColPi
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z13setColReadColPi, .Lfunc_end1-_Z13setColReadColPi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13setRowReadColPi
.globl _Z13setRowReadColPi
.p2align 8
.type _Z13setRowReadColPi,@function
_Z13setRowReadColPi:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x14
s_load_b32 s4, s[0:1], 0x8
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s2, 16
s_mul_i32 s4, s4, s15
v_cvt_f32_u32_e32 v1, s3
s_sub_i32 s5, 0, s3
s_add_i32 s4, s4, s14
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
v_cvt_u32_f32_e32 v5, v1
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mul_lo_u32 v4, s5, v5
v_mad_u64_u32 v[2:3], null, s4, s3, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v1, v5, v4
v_mad_u64_u32 v[3:4], null, v2, s2, v[0:1]
v_add_nc_u32_e32 v0, v5, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v0, v3, v0
v_add_nc_u32_e32 v2, 1, v0
v_mul_lo_u32 v1, v0, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v1, v3, v1
v_subrev_nc_u32_e32 v4, s3, v1
v_cmp_le_u32_e32 vcc_lo, s3, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v0, v0, v2 :: v_dual_cndmask_b32 v1, v1, v4
v_add_nc_u32_e32 v2, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_le_u32_e32 vcc_lo, s3, v1
v_cndmask_b32_e32 v0, v0, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, v0, s3
v_sub_nc_u32_e32 v4, v3, v1
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, v4, s2, v[0:1]
v_lshl_add_u32 v0, v3, 2, 0
v_mov_b32_e32 v4, 0
ds_store_b32 v0, v3
v_lshl_add_u32 v1, v1, 2, 0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v2, v1
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13setRowReadColPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z13setRowReadColPi, .Lfunc_end2-_Z13setRowReadColPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13setRowReadRowPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13setRowReadRowPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13setColReadColPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13setColReadColPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
- .offset: 128
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13setRowReadColPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13setRowReadColPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_000bc269_00000000-6_main.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z13setRowReadRowPiPi
.type _Z33__device_stub__Z13setRowReadRowPiPi, @function
_Z33__device_stub__Z13setRowReadRowPiPi:
.LFB2052:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13setRowReadRowPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z33__device_stub__Z13setRowReadRowPiPi, .-_Z33__device_stub__Z13setRowReadRowPiPi
.globl _Z13setRowReadRowPi
.type _Z13setRowReadRowPi, @function
_Z13setRowReadRowPi:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z13setRowReadRowPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z13setRowReadRowPi, .-_Z13setRowReadRowPi
.globl _Z33__device_stub__Z13setColReadColPiPi
.type _Z33__device_stub__Z13setColReadColPiPi, @function
_Z33__device_stub__Z13setColReadColPiPi:
.LFB2054:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13setColReadColPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2054:
.size _Z33__device_stub__Z13setColReadColPiPi, .-_Z33__device_stub__Z13setColReadColPiPi
.globl _Z13setColReadColPi
.type _Z13setColReadColPi, @function
_Z13setColReadColPi:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z13setColReadColPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _Z13setColReadColPi, .-_Z13setColReadColPi
.globl _Z33__device_stub__Z13setRowReadColPiPi
.type _Z33__device_stub__Z13setRowReadColPiPi, @function
_Z33__device_stub__Z13setRowReadColPiPi:
.LFB2056:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13setRowReadColPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2056:
.size _Z33__device_stub__Z13setRowReadColPiPi, .-_Z33__device_stub__Z13setRowReadColPiPi
.globl _Z13setRowReadColPi
.type _Z13setRowReadColPi, @function
_Z13setRowReadColPi:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z13setRowReadColPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z13setRowReadColPi, .-_Z13setRowReadColPi
.globl main
.type main, @function
main:
.LFB2027:
.cfi_startproc
endbr64
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $2048, %esi
call cudaMalloc@PLT
movl $2048, %edx
movl $0, %esi
movq (%rsp), %rdi
call cudaMemset@PLT
movl 8+block(%rip), %ecx
movl $0, %r9d
movl $2048, %r8d
movq block(%rip), %rdx
movq grid(%rip), %rdi
movl 8+grid(%rip), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L31
.L28:
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L32
movl $0, %eax
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
movq (%rsp), %rdi
call _Z33__device_stub__Z13setRowReadColPiPi
jmp .L28
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13setRowReadColPi"
.LC1:
.string "_Z13setColReadColPi"
.LC2:
.string "_Z13setRowReadRowPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2059:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13setRowReadColPi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z13setColReadColPi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z13setRowReadRowPi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl grid
.data
.align 8
.type grid, @object
.size grid, 12
grid:
.long 1
.long 1
.long 1
.globl block
.align 8
.type block, @object
.size block, 12
block:
.long 32
.long 16
.long 1
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "main.hip"
.globl _Z28__device_stub__setRowReadRowPi # -- Begin function _Z28__device_stub__setRowReadRowPi
.p2align 4, 0x90
.type _Z28__device_stub__setRowReadRowPi,@function
_Z28__device_stub__setRowReadRowPi: # @_Z28__device_stub__setRowReadRowPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z13setRowReadRowPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z28__device_stub__setRowReadRowPi, .Lfunc_end0-_Z28__device_stub__setRowReadRowPi
.cfi_endproc
# -- End function
.globl _Z28__device_stub__setColReadColPi # -- Begin function _Z28__device_stub__setColReadColPi
.p2align 4, 0x90
.type _Z28__device_stub__setColReadColPi,@function
_Z28__device_stub__setColReadColPi: # @_Z28__device_stub__setColReadColPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z13setColReadColPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end1:
.size _Z28__device_stub__setColReadColPi, .Lfunc_end1-_Z28__device_stub__setColReadColPi
.cfi_endproc
# -- End function
.globl _Z28__device_stub__setRowReadColPi # -- Begin function _Z28__device_stub__setRowReadColPi
.p2align 4, 0x90
.type _Z28__device_stub__setRowReadColPi,@function
_Z28__device_stub__setRowReadColPi: # @_Z28__device_stub__setRowReadColPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z13setRowReadColPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end2:
.size _Z28__device_stub__setRowReadColPi, .Lfunc_end2-_Z28__device_stub__setRowReadColPi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
leaq 8(%rsp), %rdi
movl $2048, %esi # imm = 0x800
callq hipMalloc
movq 8(%rsp), %rdi
movl $2048, %edx # imm = 0x800
xorl %esi, %esi
callq hipMemset
movq grid(%rip), %rdi
movl grid+8(%rip), %esi
movq block(%rip), %rdx
movl block+8(%rip), %ecx
movl $2048, %r8d # imm = 0x800
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 16(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z13setRowReadColPi, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
xorl %eax, %eax
addq $88, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13setRowReadRowPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13setColReadColPi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13setRowReadColPi, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type block,@object # @block
.data
.globl block
.p2align 3, 0x0
block:
.long 32 # 0x20
.long 16 # 0x10
.long 1 # 0x1
.size block, 12
.type grid,@object # @grid
.globl grid
.p2align 3, 0x0
grid:
.long 1 # 0x1
.long 1 # 0x1
.long 1 # 0x1
.size grid, 12
.type _Z13setRowReadRowPi,@object # @_Z13setRowReadRowPi
.section .rodata,"a",@progbits
.globl _Z13setRowReadRowPi
.p2align 3, 0x0
_Z13setRowReadRowPi:
.quad _Z28__device_stub__setRowReadRowPi
.size _Z13setRowReadRowPi, 8
.type _Z13setColReadColPi,@object # @_Z13setColReadColPi
.globl _Z13setColReadColPi
.p2align 3, 0x0
_Z13setColReadColPi:
.quad _Z28__device_stub__setColReadColPi
.size _Z13setColReadColPi, 8
.type _Z13setRowReadColPi,@object # @_Z13setRowReadColPi
.globl _Z13setRowReadColPi
.p2align 3, 0x0
_Z13setRowReadColPi:
.quad _Z28__device_stub__setRowReadColPi
.size _Z13setRowReadColPi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13setRowReadRowPi"
.size .L__unnamed_1, 20
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z13setColReadColPi"
.size .L__unnamed_2, 20
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z13setRowReadColPi"
.size .L__unnamed_3, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__setRowReadRowPi
.addrsig_sym _Z28__device_stub__setColReadColPi
.addrsig_sym _Z28__device_stub__setRowReadColPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13setRowReadRowPi
.addrsig_sym _Z13setColReadColPi
.addrsig_sym _Z13setRowReadColPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include <stdio.h>
#include <math.h>
__global__ void epsilon_of_p_GPU(double *output_arr, double *px_arr, double *py_arr,
double *kxprime_arr, double *kyprime_arr,
double *E_k_n, double *U_k,
double mu, double q, double g, double theta,
int N_kx, int N_ky, int debug)
{
const int i = threadIdx.x + blockDim.x*blockIdx.x;
const int j = threadIdx.y + blockDim.y*blockIdx.y;
const int N_n = 3;
const int N_l = 3;
int iprime, jprime, n, l;
const double px = px_arr[i];
const double py = py_arr[j];
double kxprime, kyprime;
double px_pot, py_pot, potential;
double evec_element, eigenval;
double accumulator = 0;
double pot_trig_terms = pow(cos(theta), 2) - pow(sin(theta), 2)*0;
const double dpx = px_arr[1] - px_arr[0];
const double dpy = py_arr[1] - py_arr[0];
const double prefactor = dpx*dpy/(4*M_PI*M_PI);
if ((i < N_kx) & (j < N_ky)){
for (iprime=0; iprime<N_kx; iprime++){
for (jprime=0; jprime<N_ky; jprime++){
kxprime = kxprime_arr[iprime];
kyprime = kyprime_arr[jprime];
for (n=-1; n<2; n++){
px_pot = px - kxprime - n*q;
py_pot = py - kyprime;
potential = - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
for (l=0; l<3; l++){
eigenval = E_k_n[N_ky*N_l*iprime + N_l*jprime + l];
if (eigenval <= mu){
evec_element = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*(n+1) + l];
// if ((debug > 0) & (i==7) & (j==5) & (iprime==22) & (jprime==33) & (n==0) & (l==0)){
// printf(" epsilon_k[7,5] kprime[22,33] n=0, l=0 (CUDA):\n");
// printf(" px is: %f\n", px);
// printf(" py is: %f\n", py);
// printf(" kxprime is: %f\n", kxprime);
// printf(" kyprime is: %f\n", kyprime);
// printf(" potential terms is %f\n",potential);
// printf(" evec element is %f\n", evec_element);
// printf(" value of term is: %f\n", potential*evec_element*evec_element);
// }
accumulator += potential * evec_element*evec_element;
}
}
}
}
}
// Multiple the sum by the appropriate prefactor:
accumulator *= prefactor;
// Add the kinetic term:
accumulator += (px*px + py*py);
output_arr[N_ky*i + j] = accumulator;
// if ((debug > 0) & (i==7) & (j==5)){
// printf(" epsilon_k[7,5] (CUDA): %f\n\n", accumulator);
// }
}
}
__global__ void h_of_p_GPU(double *output_arr, double *px_arr, double *py_arr,
double *kxprime_arr, double *kyprime_arr,
double *E_k_n, double *U_k,
double mu, double q, double g, double theta,
int N_kx, int N_ky, int debug)
{
const int i = threadIdx.x + blockDim.x*blockIdx.x;
const int j = threadIdx.y + blockDim.y*blockIdx.y;
const int N_n = 3;
const int N_l = 3;
int iprime, jprime, l;
double px = px_arr[i];
double py = py_arr[j];
double kxprime, kyprime;
double px_pot, py_pot, potential_1, potential_2;
double evec_element_1, evec_element_2, evec_element_3, eigenval;
double accumulator = 0;
double pot_trig_terms = pow(cos(theta), 2) - pow(sin(theta), 2)*0;
double V_of_q = g*sqrt(q*q) * pot_trig_terms; // V(px=q, py=0)
const double dpx = px_arr[1] - px_arr[0];
const double dpy = py_arr[1] - py_arr[0];
const double prefactor = dpx*dpy/(4*M_PI*M_PI);
if ((i < N_kx) & (j < N_ky)){
for (iprime=0; iprime<N_kx; iprime++){
for (jprime=0; jprime<N_ky; jprime++){
kxprime = kxprime_arr[iprime];
kyprime = kyprime_arr[jprime];
for (l=0; l<3; l++){
eigenval = E_k_n[N_ky*N_l*iprime + N_l*jprime + l];
if (eigenval <= mu){
evec_element_1 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*0 + l];
evec_element_2 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*1 + l];
evec_element_3 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*2 + l];
px_pot = px - kxprime + q;
py_pot = py - kyprime;
potential_1 = V_of_q - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
px_pot = px - kxprime;
py_pot = py - kyprime;
potential_2 = V_of_q - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
accumulator += potential_1 * evec_element_1*evec_element_2 + potential_2 * evec_element_2*evec_element_3;
// if ((debug > 0) & (i==7) & (j==5) & (iprime==22) & (jprime==33) & (l==0)){
// printf(" h_k[7,5] kprime[22,33], l=0 (CUDA):\n");
// printf(" px is: %f\n", px);
// printf(" py is: %f\n", py);
// printf(" kxprime is: %f\n", kxprime);
// printf(" kyprime is: %f\n", kyprime);
// printf(" potential term 1 is %f\n", potential_1);
// printf(" potential term 2 is %f\n", potential_2);
// printf(" value of term is: %f\n",
// potential_1 * evec_element_1*evec_element_2 + potential_2 * evec_element_2*evec_element_3);
// }
}
}
}
}
// Multiple the sum by the appropriate prefactor:
accumulator *= prefactor;
output_arr[N_ky*i + j] = accumulator;
// if ((debug > 0) & (i==7) & (j==5)){
// printf(" h_k[7,5] (CUDA): %f\n\n", accumulator);
// }
}
}
|
.file "tmpxft_00085588_00000000-6_cuda_module.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z55__device_stub__Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii
.type _Z55__device_stub__Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii, @function
_Z55__device_stub__Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii:
.LFB2081:
.cfi_startproc
endbr64
subq $296, %rsp
.cfi_def_cfa_offset 304
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
movsd %xmm0, 32(%rsp)
movsd %xmm1, 24(%rsp)
movsd %xmm2, 16(%rsp)
movsd %xmm3, 8(%rsp)
movq 304(%rsp), %rax
movq %rax, 40(%rsp)
movq %fs:40, %rax
movq %rax, 280(%rsp)
xorl %eax, %eax
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rax
movq %rax, 168(%rsp)
leaq 72(%rsp), %rax
movq %rax, 176(%rsp)
leaq 64(%rsp), %rax
movq %rax, 184(%rsp)
leaq 56(%rsp), %rax
movq %rax, 192(%rsp)
leaq 48(%rsp), %rax
movq %rax, 200(%rsp)
leaq 40(%rsp), %rax
movq %rax, 208(%rsp)
leaq 32(%rsp), %rax
movq %rax, 216(%rsp)
leaq 24(%rsp), %rax
movq %rax, 224(%rsp)
leaq 16(%rsp), %rax
movq %rax, 232(%rsp)
leaq 8(%rsp), %rax
movq %rax, 240(%rsp)
leaq 312(%rsp), %rax
movq %rax, 248(%rsp)
leaq 320(%rsp), %rax
movq %rax, 256(%rsp)
leaq 328(%rsp), %rax
movq %rax, 264(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 124(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
leaq 104(%rsp), %rcx
leaq 96(%rsp), %rdx
leaq 124(%rsp), %rsi
leaq 112(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 280(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $296, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 104(%rsp)
.cfi_def_cfa_offset 312
pushq 104(%rsp)
.cfi_def_cfa_offset 320
leaq 176(%rsp), %r9
movq 140(%rsp), %rcx
movl 148(%rsp), %r8d
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
leaq _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 304
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z55__device_stub__Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii, .-_Z55__device_stub__Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii
.globl _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.type _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii, @function
_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z55__device_stub__Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii, .-_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.globl _Z49__device_stub__Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii
.type _Z49__device_stub__Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii, @function
_Z49__device_stub__Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii:
.LFB2083:
.cfi_startproc
endbr64
subq $296, %rsp
.cfi_def_cfa_offset 304
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
movsd %xmm0, 32(%rsp)
movsd %xmm1, 24(%rsp)
movsd %xmm2, 16(%rsp)
movsd %xmm3, 8(%rsp)
movq 304(%rsp), %rax
movq %rax, 40(%rsp)
movq %fs:40, %rax
movq %rax, 280(%rsp)
xorl %eax, %eax
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rax
movq %rax, 168(%rsp)
leaq 72(%rsp), %rax
movq %rax, 176(%rsp)
leaq 64(%rsp), %rax
movq %rax, 184(%rsp)
leaq 56(%rsp), %rax
movq %rax, 192(%rsp)
leaq 48(%rsp), %rax
movq %rax, 200(%rsp)
leaq 40(%rsp), %rax
movq %rax, 208(%rsp)
leaq 32(%rsp), %rax
movq %rax, 216(%rsp)
leaq 24(%rsp), %rax
movq %rax, 224(%rsp)
leaq 16(%rsp), %rax
movq %rax, 232(%rsp)
leaq 8(%rsp), %rax
movq %rax, 240(%rsp)
leaq 312(%rsp), %rax
movq %rax, 248(%rsp)
leaq 320(%rsp), %rax
movq %rax, 256(%rsp)
leaq 328(%rsp), %rax
movq %rax, 264(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 124(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
leaq 104(%rsp), %rcx
leaq 96(%rsp), %rdx
leaq 124(%rsp), %rsi
leaq 112(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 280(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $296, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 104(%rsp)
.cfi_def_cfa_offset 312
pushq 104(%rsp)
.cfi_def_cfa_offset 320
leaq 176(%rsp), %r9
movq 140(%rsp), %rcx
movl 148(%rsp), %r8d
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
leaq _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 304
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z49__device_stub__Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii, .-_Z49__device_stub__Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii
.globl _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.type _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii, @function
_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z49__device_stub__Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii, .-_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii"
.align 8
.LC1:
.string "_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <stdio.h>
#include <math.h>
__global__ void epsilon_of_p_GPU(double *output_arr, double *px_arr, double *py_arr,
double *kxprime_arr, double *kyprime_arr,
double *E_k_n, double *U_k,
double mu, double q, double g, double theta,
int N_kx, int N_ky, int debug)
{
const int i = threadIdx.x + blockDim.x*blockIdx.x;
const int j = threadIdx.y + blockDim.y*blockIdx.y;
const int N_n = 3;
const int N_l = 3;
int iprime, jprime, n, l;
const double px = px_arr[i];
const double py = py_arr[j];
double kxprime, kyprime;
double px_pot, py_pot, potential;
double evec_element, eigenval;
double accumulator = 0;
double pot_trig_terms = pow(cos(theta), 2) - pow(sin(theta), 2)*0;
const double dpx = px_arr[1] - px_arr[0];
const double dpy = py_arr[1] - py_arr[0];
const double prefactor = dpx*dpy/(4*M_PI*M_PI);
if ((i < N_kx) & (j < N_ky)){
for (iprime=0; iprime<N_kx; iprime++){
for (jprime=0; jprime<N_ky; jprime++){
kxprime = kxprime_arr[iprime];
kyprime = kyprime_arr[jprime];
for (n=-1; n<2; n++){
px_pot = px - kxprime - n*q;
py_pot = py - kyprime;
potential = - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
for (l=0; l<3; l++){
eigenval = E_k_n[N_ky*N_l*iprime + N_l*jprime + l];
if (eigenval <= mu){
evec_element = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*(n+1) + l];
// if ((debug > 0) & (i==7) & (j==5) & (iprime==22) & (jprime==33) & (n==0) & (l==0)){
// printf(" epsilon_k[7,5] kprime[22,33] n=0, l=0 (CUDA):\n");
// printf(" px is: %f\n", px);
// printf(" py is: %f\n", py);
// printf(" kxprime is: %f\n", kxprime);
// printf(" kyprime is: %f\n", kyprime);
// printf(" potential terms is %f\n",potential);
// printf(" evec element is %f\n", evec_element);
// printf(" value of term is: %f\n", potential*evec_element*evec_element);
// }
accumulator += potential * evec_element*evec_element;
}
}
}
}
}
// Multiple the sum by the appropriate prefactor:
accumulator *= prefactor;
// Add the kinetic term:
accumulator += (px*px + py*py);
output_arr[N_ky*i + j] = accumulator;
// if ((debug > 0) & (i==7) & (j==5)){
// printf(" epsilon_k[7,5] (CUDA): %f\n\n", accumulator);
// }
}
}
__global__ void h_of_p_GPU(double *output_arr, double *px_arr, double *py_arr,
double *kxprime_arr, double *kyprime_arr,
double *E_k_n, double *U_k,
double mu, double q, double g, double theta,
int N_kx, int N_ky, int debug)
{
const int i = threadIdx.x + blockDim.x*blockIdx.x;
const int j = threadIdx.y + blockDim.y*blockIdx.y;
const int N_n = 3;
const int N_l = 3;
int iprime, jprime, l;
double px = px_arr[i];
double py = py_arr[j];
double kxprime, kyprime;
double px_pot, py_pot, potential_1, potential_2;
double evec_element_1, evec_element_2, evec_element_3, eigenval;
double accumulator = 0;
double pot_trig_terms = pow(cos(theta), 2) - pow(sin(theta), 2)*0;
double V_of_q = g*sqrt(q*q) * pot_trig_terms; // V(px=q, py=0)
const double dpx = px_arr[1] - px_arr[0];
const double dpy = py_arr[1] - py_arr[0];
const double prefactor = dpx*dpy/(4*M_PI*M_PI);
if ((i < N_kx) & (j < N_ky)){
for (iprime=0; iprime<N_kx; iprime++){
for (jprime=0; jprime<N_ky; jprime++){
kxprime = kxprime_arr[iprime];
kyprime = kyprime_arr[jprime];
for (l=0; l<3; l++){
eigenval = E_k_n[N_ky*N_l*iprime + N_l*jprime + l];
if (eigenval <= mu){
evec_element_1 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*0 + l];
evec_element_2 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*1 + l];
evec_element_3 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*2 + l];
px_pot = px - kxprime + q;
py_pot = py - kyprime;
potential_1 = V_of_q - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
px_pot = px - kxprime;
py_pot = py - kyprime;
potential_2 = V_of_q - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
accumulator += potential_1 * evec_element_1*evec_element_2 + potential_2 * evec_element_2*evec_element_3;
// if ((debug > 0) & (i==7) & (j==5) & (iprime==22) & (jprime==33) & (l==0)){
// printf(" h_k[7,5] kprime[22,33], l=0 (CUDA):\n");
// printf(" px is: %f\n", px);
// printf(" py is: %f\n", py);
// printf(" kxprime is: %f\n", kxprime);
// printf(" kyprime is: %f\n", kyprime);
// printf(" potential term 1 is %f\n", potential_1);
// printf(" potential term 2 is %f\n", potential_2);
// printf(" value of term is: %f\n",
// potential_1 * evec_element_1*evec_element_2 + potential_2 * evec_element_2*evec_element_3);
// }
}
}
}
}
// Multiple the sum by the appropriate prefactor:
accumulator *= prefactor;
output_arr[N_ky*i + j] = accumulator;
// if ((debug > 0) & (i==7) & (j==5)){
// printf(" h_k[7,5] (CUDA): %f\n\n", accumulator);
// }
}
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
__global__ void epsilon_of_p_GPU(double *output_arr, double *px_arr, double *py_arr,
double *kxprime_arr, double *kyprime_arr,
double *E_k_n, double *U_k,
double mu, double q, double g, double theta,
int N_kx, int N_ky, int debug)
{
const int i = threadIdx.x + blockDim.x*blockIdx.x;
const int j = threadIdx.y + blockDim.y*blockIdx.y;
const int N_n = 3;
const int N_l = 3;
int iprime, jprime, n, l;
const double px = px_arr[i];
const double py = py_arr[j];
double kxprime, kyprime;
double px_pot, py_pot, potential;
double evec_element, eigenval;
double accumulator = 0;
double pot_trig_terms = pow(cos(theta), 2) - pow(sin(theta), 2)*0;
const double dpx = px_arr[1] - px_arr[0];
const double dpy = py_arr[1] - py_arr[0];
const double prefactor = dpx*dpy/(4*M_PI*M_PI);
if ((i < N_kx) & (j < N_ky)){
for (iprime=0; iprime<N_kx; iprime++){
for (jprime=0; jprime<N_ky; jprime++){
kxprime = kxprime_arr[iprime];
kyprime = kyprime_arr[jprime];
for (n=-1; n<2; n++){
px_pot = px - kxprime - n*q;
py_pot = py - kyprime;
potential = - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
for (l=0; l<3; l++){
eigenval = E_k_n[N_ky*N_l*iprime + N_l*jprime + l];
if (eigenval <= mu){
evec_element = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*(n+1) + l];
// if ((debug > 0) & (i==7) & (j==5) & (iprime==22) & (jprime==33) & (n==0) & (l==0)){
// printf(" epsilon_k[7,5] kprime[22,33] n=0, l=0 (CUDA):\n");
// printf(" px is: %f\n", px);
// printf(" py is: %f\n", py);
// printf(" kxprime is: %f\n", kxprime);
// printf(" kyprime is: %f\n", kyprime);
// printf(" potential terms is %f\n",potential);
// printf(" evec element is %f\n", evec_element);
// printf(" value of term is: %f\n", potential*evec_element*evec_element);
// }
accumulator += potential * evec_element*evec_element;
}
}
}
}
}
// Multiple the sum by the appropriate prefactor:
accumulator *= prefactor;
// Add the kinetic term:
accumulator += (px*px + py*py);
output_arr[N_ky*i + j] = accumulator;
// if ((debug > 0) & (i==7) & (j==5)){
// printf(" epsilon_k[7,5] (CUDA): %f\n\n", accumulator);
// }
}
}
__global__ void h_of_p_GPU(double *output_arr, double *px_arr, double *py_arr,
double *kxprime_arr, double *kyprime_arr,
double *E_k_n, double *U_k,
double mu, double q, double g, double theta,
int N_kx, int N_ky, int debug)
{
const int i = threadIdx.x + blockDim.x*blockIdx.x;
const int j = threadIdx.y + blockDim.y*blockIdx.y;
const int N_n = 3;
const int N_l = 3;
int iprime, jprime, l;
double px = px_arr[i];
double py = py_arr[j];
double kxprime, kyprime;
double px_pot, py_pot, potential_1, potential_2;
double evec_element_1, evec_element_2, evec_element_3, eigenval;
double accumulator = 0;
double pot_trig_terms = pow(cos(theta), 2) - pow(sin(theta), 2)*0;
double V_of_q = g*sqrt(q*q) * pot_trig_terms; // V(px=q, py=0)
const double dpx = px_arr[1] - px_arr[0];
const double dpy = py_arr[1] - py_arr[0];
const double prefactor = dpx*dpy/(4*M_PI*M_PI);
if ((i < N_kx) & (j < N_ky)){
for (iprime=0; iprime<N_kx; iprime++){
for (jprime=0; jprime<N_ky; jprime++){
kxprime = kxprime_arr[iprime];
kyprime = kyprime_arr[jprime];
for (l=0; l<3; l++){
eigenval = E_k_n[N_ky*N_l*iprime + N_l*jprime + l];
if (eigenval <= mu){
evec_element_1 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*0 + l];
evec_element_2 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*1 + l];
evec_element_3 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*2 + l];
px_pot = px - kxprime + q;
py_pot = py - kyprime;
potential_1 = V_of_q - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
px_pot = px - kxprime;
py_pot = py - kyprime;
potential_2 = V_of_q - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
accumulator += potential_1 * evec_element_1*evec_element_2 + potential_2 * evec_element_2*evec_element_3;
// if ((debug > 0) & (i==7) & (j==5) & (iprime==22) & (jprime==33) & (l==0)){
// printf(" h_k[7,5] kprime[22,33], l=0 (CUDA):\n");
// printf(" px is: %f\n", px);
// printf(" py is: %f\n", py);
// printf(" kxprime is: %f\n", kxprime);
// printf(" kyprime is: %f\n", kyprime);
// printf(" potential term 1 is %f\n", potential_1);
// printf(" potential term 2 is %f\n", potential_2);
// printf(" value of term is: %f\n",
// potential_1 * evec_element_1*evec_element_2 + potential_2 * evec_element_2*evec_element_3);
// }
}
}
}
}
// Multiple the sum by the appropriate prefactor:
accumulator *= prefactor;
output_arr[N_ky*i + j] = accumulator;
// if ((debug > 0) & (i==7) & (j==5)){
// printf(" h_k[7,5] (CUDA): %f\n\n", accumulator);
// }
}
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
__global__ void epsilon_of_p_GPU(double *output_arr, double *px_arr, double *py_arr,
double *kxprime_arr, double *kyprime_arr,
double *E_k_n, double *U_k,
double mu, double q, double g, double theta,
int N_kx, int N_ky, int debug)
{
const int i = threadIdx.x + blockDim.x*blockIdx.x;
const int j = threadIdx.y + blockDim.y*blockIdx.y;
const int N_n = 3;
const int N_l = 3;
int iprime, jprime, n, l;
const double px = px_arr[i];
const double py = py_arr[j];
double kxprime, kyprime;
double px_pot, py_pot, potential;
double evec_element, eigenval;
double accumulator = 0;
double pot_trig_terms = pow(cos(theta), 2) - pow(sin(theta), 2)*0;
const double dpx = px_arr[1] - px_arr[0];
const double dpy = py_arr[1] - py_arr[0];
const double prefactor = dpx*dpy/(4*M_PI*M_PI);
if ((i < N_kx) & (j < N_ky)){
for (iprime=0; iprime<N_kx; iprime++){
for (jprime=0; jprime<N_ky; jprime++){
kxprime = kxprime_arr[iprime];
kyprime = kyprime_arr[jprime];
for (n=-1; n<2; n++){
px_pot = px - kxprime - n*q;
py_pot = py - kyprime;
potential = - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
for (l=0; l<3; l++){
eigenval = E_k_n[N_ky*N_l*iprime + N_l*jprime + l];
if (eigenval <= mu){
evec_element = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*(n+1) + l];
// if ((debug > 0) & (i==7) & (j==5) & (iprime==22) & (jprime==33) & (n==0) & (l==0)){
// printf(" epsilon_k[7,5] kprime[22,33] n=0, l=0 (CUDA):\n");
// printf(" px is: %f\n", px);
// printf(" py is: %f\n", py);
// printf(" kxprime is: %f\n", kxprime);
// printf(" kyprime is: %f\n", kyprime);
// printf(" potential terms is %f\n",potential);
// printf(" evec element is %f\n", evec_element);
// printf(" value of term is: %f\n", potential*evec_element*evec_element);
// }
accumulator += potential * evec_element*evec_element;
}
}
}
}
}
// Multiple the sum by the appropriate prefactor:
accumulator *= prefactor;
// Add the kinetic term:
accumulator += (px*px + py*py);
output_arr[N_ky*i + j] = accumulator;
// if ((debug > 0) & (i==7) & (j==5)){
// printf(" epsilon_k[7,5] (CUDA): %f\n\n", accumulator);
// }
}
}
__global__ void h_of_p_GPU(double *output_arr, double *px_arr, double *py_arr,
double *kxprime_arr, double *kyprime_arr,
double *E_k_n, double *U_k,
double mu, double q, double g, double theta,
int N_kx, int N_ky, int debug)
{
const int i = threadIdx.x + blockDim.x*blockIdx.x;
const int j = threadIdx.y + blockDim.y*blockIdx.y;
const int N_n = 3;
const int N_l = 3;
int iprime, jprime, l;
double px = px_arr[i];
double py = py_arr[j];
double kxprime, kyprime;
double px_pot, py_pot, potential_1, potential_2;
double evec_element_1, evec_element_2, evec_element_3, eigenval;
double accumulator = 0;
double pot_trig_terms = pow(cos(theta), 2) - pow(sin(theta), 2)*0;
double V_of_q = g*sqrt(q*q) * pot_trig_terms; // V(px=q, py=0)
const double dpx = px_arr[1] - px_arr[0];
const double dpy = py_arr[1] - py_arr[0];
const double prefactor = dpx*dpy/(4*M_PI*M_PI);
if ((i < N_kx) & (j < N_ky)){
for (iprime=0; iprime<N_kx; iprime++){
for (jprime=0; jprime<N_ky; jprime++){
kxprime = kxprime_arr[iprime];
kyprime = kyprime_arr[jprime];
for (l=0; l<3; l++){
eigenval = E_k_n[N_ky*N_l*iprime + N_l*jprime + l];
if (eigenval <= mu){
evec_element_1 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*0 + l];
evec_element_2 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*1 + l];
evec_element_3 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*2 + l];
px_pot = px - kxprime + q;
py_pot = py - kyprime;
potential_1 = V_of_q - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
px_pot = px - kxprime;
py_pot = py - kyprime;
potential_2 = V_of_q - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
accumulator += potential_1 * evec_element_1*evec_element_2 + potential_2 * evec_element_2*evec_element_3;
// if ((debug > 0) & (i==7) & (j==5) & (iprime==22) & (jprime==33) & (l==0)){
// printf(" h_k[7,5] kprime[22,33], l=0 (CUDA):\n");
// printf(" px is: %f\n", px);
// printf(" py is: %f\n", py);
// printf(" kxprime is: %f\n", kxprime);
// printf(" kyprime is: %f\n", kyprime);
// printf(" potential term 1 is %f\n", potential_1);
// printf(" potential term 2 is %f\n", potential_2);
// printf(" value of term is: %f\n",
// potential_1 * evec_element_1*evec_element_2 + potential_2 * evec_element_2*evec_element_3);
// }
}
}
}
}
// Multiple the sum by the appropriate prefactor:
accumulator *= prefactor;
output_arr[N_ky*i + j] = accumulator;
// if ((debug > 0) & (i==7) & (j==5)){
// printf(" h_k[7,5] (CUDA): %f\n\n", accumulator);
// }
}
}
|
.text
.file "cuda_module.hip"
.globl _Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii # -- Begin function _Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.p2align 4, 0x90
.type _Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii,@function
_Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii: # @_Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.cfi_startproc
# %bb.0:
subq $248, %rsp
.cfi_def_cfa_offset 256
movq %rdi, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rcx, 96(%rsp)
movq %r8, 88(%rsp)
movq %r9, 80(%rsp)
movsd %xmm0, 72(%rsp)
movsd %xmm1, 64(%rsp)
movsd %xmm2, 56(%rsp)
movsd %xmm3, 48(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rax
movq %rax, 168(%rsp)
leaq 256(%rsp), %rax
movq %rax, 176(%rsp)
leaq 72(%rsp), %rax
movq %rax, 184(%rsp)
leaq 64(%rsp), %rax
movq %rax, 192(%rsp)
leaq 56(%rsp), %rax
movq %rax, 200(%rsp)
leaq 48(%rsp), %rax
movq %rax, 208(%rsp)
leaq 264(%rsp), %rax
movq %rax, 216(%rsp)
leaq 272(%rsp), %rax
movq %rax, 224(%rsp)
leaq 280(%rsp), %rax
movq %rax, 232(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $264, %rsp # imm = 0x108
.cfi_adjust_cfa_offset -264
retq
.Lfunc_end0:
.size _Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii, .Lfunc_end0-_Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.cfi_endproc
# -- End function
.globl _Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii # -- Begin function _Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.p2align 4, 0x90
.type _Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii,@function
_Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii: # @_Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.cfi_startproc
# %bb.0:
subq $248, %rsp
.cfi_def_cfa_offset 256
movq %rdi, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rcx, 96(%rsp)
movq %r8, 88(%rsp)
movq %r9, 80(%rsp)
movsd %xmm0, 72(%rsp)
movsd %xmm1, 64(%rsp)
movsd %xmm2, 56(%rsp)
movsd %xmm3, 48(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rax
movq %rax, 168(%rsp)
leaq 256(%rsp), %rax
movq %rax, 176(%rsp)
leaq 72(%rsp), %rax
movq %rax, 184(%rsp)
leaq 64(%rsp), %rax
movq %rax, 192(%rsp)
leaq 56(%rsp), %rax
movq %rax, 200(%rsp)
leaq 48(%rsp), %rax
movq %rax, 208(%rsp)
leaq 264(%rsp), %rax
movq %rax, 216(%rsp)
leaq 272(%rsp), %rax
movq %rax, 224(%rsp)
leaq 280(%rsp), %rax
movq %rax, 232(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $264, %rsp # imm = 0x108
.cfi_adjust_cfa_offset -264
retq
.Lfunc_end1:
.size _Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii, .Lfunc_end1-_Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii,@object # @_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.section .rodata,"a",@progbits
.globl _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.p2align 3, 0x0
_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii:
.quad _Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.size _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii, 8
.type _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii,@object # @_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.globl _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.p2align 3, 0x0
_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii:
.quad _Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.size _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii"
.size .L__unnamed_1, 42
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii"
.size .L__unnamed_2, 36
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.addrsig_sym _Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.addrsig_sym _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_00085588_00000000-6_cuda_module.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z55__device_stub__Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii
.type _Z55__device_stub__Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii, @function
_Z55__device_stub__Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii:
.LFB2081:
.cfi_startproc
endbr64
subq $296, %rsp
.cfi_def_cfa_offset 304
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
movsd %xmm0, 32(%rsp)
movsd %xmm1, 24(%rsp)
movsd %xmm2, 16(%rsp)
movsd %xmm3, 8(%rsp)
movq 304(%rsp), %rax
movq %rax, 40(%rsp)
movq %fs:40, %rax
movq %rax, 280(%rsp)
xorl %eax, %eax
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rax
movq %rax, 168(%rsp)
leaq 72(%rsp), %rax
movq %rax, 176(%rsp)
leaq 64(%rsp), %rax
movq %rax, 184(%rsp)
leaq 56(%rsp), %rax
movq %rax, 192(%rsp)
leaq 48(%rsp), %rax
movq %rax, 200(%rsp)
leaq 40(%rsp), %rax
movq %rax, 208(%rsp)
leaq 32(%rsp), %rax
movq %rax, 216(%rsp)
leaq 24(%rsp), %rax
movq %rax, 224(%rsp)
leaq 16(%rsp), %rax
movq %rax, 232(%rsp)
leaq 8(%rsp), %rax
movq %rax, 240(%rsp)
leaq 312(%rsp), %rax
movq %rax, 248(%rsp)
leaq 320(%rsp), %rax
movq %rax, 256(%rsp)
leaq 328(%rsp), %rax
movq %rax, 264(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 124(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
leaq 104(%rsp), %rcx
leaq 96(%rsp), %rdx
leaq 124(%rsp), %rsi
leaq 112(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 280(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $296, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 104(%rsp)
.cfi_def_cfa_offset 312
pushq 104(%rsp)
.cfi_def_cfa_offset 320
leaq 176(%rsp), %r9
movq 140(%rsp), %rcx
movl 148(%rsp), %r8d
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
leaq _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 304
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z55__device_stub__Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii, .-_Z55__device_stub__Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii
.globl _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.type _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii, @function
_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z55__device_stub__Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii, .-_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.globl _Z49__device_stub__Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii
.type _Z49__device_stub__Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii, @function
_Z49__device_stub__Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii:
.LFB2083:
.cfi_startproc
endbr64
subq $296, %rsp
.cfi_def_cfa_offset 304
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
movsd %xmm0, 32(%rsp)
movsd %xmm1, 24(%rsp)
movsd %xmm2, 16(%rsp)
movsd %xmm3, 8(%rsp)
movq 304(%rsp), %rax
movq %rax, 40(%rsp)
movq %fs:40, %rax
movq %rax, 280(%rsp)
xorl %eax, %eax
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rax
movq %rax, 168(%rsp)
leaq 72(%rsp), %rax
movq %rax, 176(%rsp)
leaq 64(%rsp), %rax
movq %rax, 184(%rsp)
leaq 56(%rsp), %rax
movq %rax, 192(%rsp)
leaq 48(%rsp), %rax
movq %rax, 200(%rsp)
leaq 40(%rsp), %rax
movq %rax, 208(%rsp)
leaq 32(%rsp), %rax
movq %rax, 216(%rsp)
leaq 24(%rsp), %rax
movq %rax, 224(%rsp)
leaq 16(%rsp), %rax
movq %rax, 232(%rsp)
leaq 8(%rsp), %rax
movq %rax, 240(%rsp)
leaq 312(%rsp), %rax
movq %rax, 248(%rsp)
leaq 320(%rsp), %rax
movq %rax, 256(%rsp)
leaq 328(%rsp), %rax
movq %rax, 264(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 124(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
leaq 104(%rsp), %rcx
leaq 96(%rsp), %rdx
leaq 124(%rsp), %rsi
leaq 112(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 280(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $296, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 104(%rsp)
.cfi_def_cfa_offset 312
pushq 104(%rsp)
.cfi_def_cfa_offset 320
leaq 176(%rsp), %r9
movq 140(%rsp), %rcx
movl 148(%rsp), %r8d
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
leaq _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 304
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z49__device_stub__Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii, .-_Z49__device_stub__Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii
.globl _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.type _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii, @function
_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z49__device_stub__Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiiiPdS_S_S_S_S_S_ddddiii
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii, .-_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii"
.align 8
.LC1:
.string "_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "cuda_module.hip"
.globl _Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii # -- Begin function _Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.p2align 4, 0x90
.type _Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii,@function
_Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii: # @_Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.cfi_startproc
# %bb.0:
subq $248, %rsp
.cfi_def_cfa_offset 256
movq %rdi, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rcx, 96(%rsp)
movq %r8, 88(%rsp)
movq %r9, 80(%rsp)
movsd %xmm0, 72(%rsp)
movsd %xmm1, 64(%rsp)
movsd %xmm2, 56(%rsp)
movsd %xmm3, 48(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rax
movq %rax, 168(%rsp)
leaq 256(%rsp), %rax
movq %rax, 176(%rsp)
leaq 72(%rsp), %rax
movq %rax, 184(%rsp)
leaq 64(%rsp), %rax
movq %rax, 192(%rsp)
leaq 56(%rsp), %rax
movq %rax, 200(%rsp)
leaq 48(%rsp), %rax
movq %rax, 208(%rsp)
leaq 264(%rsp), %rax
movq %rax, 216(%rsp)
leaq 272(%rsp), %rax
movq %rax, 224(%rsp)
leaq 280(%rsp), %rax
movq %rax, 232(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $264, %rsp # imm = 0x108
.cfi_adjust_cfa_offset -264
retq
.Lfunc_end0:
.size _Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii, .Lfunc_end0-_Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.cfi_endproc
# -- End function
.globl _Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii # -- Begin function _Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.p2align 4, 0x90
.type _Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii,@function
_Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii: # @_Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.cfi_startproc
# %bb.0:
subq $248, %rsp
.cfi_def_cfa_offset 256
movq %rdi, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rcx, 96(%rsp)
movq %r8, 88(%rsp)
movq %r9, 80(%rsp)
movsd %xmm0, 72(%rsp)
movsd %xmm1, 64(%rsp)
movsd %xmm2, 56(%rsp)
movsd %xmm3, 48(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rax
movq %rax, 168(%rsp)
leaq 256(%rsp), %rax
movq %rax, 176(%rsp)
leaq 72(%rsp), %rax
movq %rax, 184(%rsp)
leaq 64(%rsp), %rax
movq %rax, 192(%rsp)
leaq 56(%rsp), %rax
movq %rax, 200(%rsp)
leaq 48(%rsp), %rax
movq %rax, 208(%rsp)
leaq 264(%rsp), %rax
movq %rax, 216(%rsp)
leaq 272(%rsp), %rax
movq %rax, 224(%rsp)
leaq 280(%rsp), %rax
movq %rax, 232(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $264, %rsp # imm = 0x108
.cfi_adjust_cfa_offset -264
retq
.Lfunc_end1:
.size _Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii, .Lfunc_end1-_Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii,@object # @_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.section .rodata,"a",@progbits
.globl _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.p2align 3, 0x0
_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii:
.quad _Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.size _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii, 8
.type _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii,@object # @_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.globl _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.p2align 3, 0x0
_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii:
.quad _Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.size _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii"
.size .L__unnamed_1, 42
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii"
.size .L__unnamed_2, 36
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.addrsig_sym _Z25__device_stub__h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16epsilon_of_p_GPUPdS_S_S_S_S_S_ddddiii
.addrsig_sym _Z10h_of_p_GPUPdS_S_S_S_S_S_ddddiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
/*
Demo for the following:
cudaError_t
cudaGetErrorString
*/
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void helloFromGPU(void){
printf("Hello from GPU! %d\n", threadIdx.x);
}
int main(void){
printf("Hello from CPU!\n");
helloFromGPU <<< 1,10 >>>();
// error handling
cudaError_t res; // enumerated error-code type
res = cudaDeviceReset();
if(res != cudaSuccess){
printf("%s\n", cudaGetErrorString(res)); // get description of error
}else{
printf("Success!\n");
}
}
|
code for sm_80
Function : _Z12helloFromGPUv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fc800078e00ff */
/*0010*/ S2R R8, SR_TID.X ; /* 0x0000000000087919 */
/* 0x000e220000002100 */
/*0020*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */
/* 0x000fe20007ffe0ff */
/*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0040*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0050*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0060*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe40007f1e0ff */
/*0070*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x0002a60000000a00 */
/*0080*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */
/* 0x000fe200000e06ff */
/*0090*/ STL [R1], R8 ; /* 0x0000000801007387 */
/* 0x0013e80000100800 */
/*00a0*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x002fc60000000000 */
/*00b0*/ MOV R11, 0x120 ; /* 0x00000120000b7802 */
/* 0x000fe40000000f00 */
/*00c0*/ MOV R20, 0xa0 ; /* 0x000000a000147802 */
/* 0x000fc40000000f00 */
/*00d0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00e0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*00f0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*0100*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*0110*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x004fea0003c00000 */
/*0120*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0130*/ BRA 0x130; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
/*
Demo for the following:
cudaError_t
cudaGetErrorString
*/
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void helloFromGPU(void){
printf("Hello from GPU! %d\n", threadIdx.x);
}
int main(void){
printf("Hello from CPU!\n");
helloFromGPU <<< 1,10 >>>();
// error handling
cudaError_t res; // enumerated error-code type
res = cudaDeviceReset();
if(res != cudaSuccess){
printf("%s\n", cudaGetErrorString(res)); // get description of error
}else{
printf("Success!\n");
}
}
|
.file "tmpxft_0015d3e5_00000000-6_error-codes.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z12helloFromGPUvv
.type _Z31__device_stub__Z12helloFromGPUvv, @function
_Z31__device_stub__Z12helloFromGPUvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z12helloFromGPUv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z31__device_stub__Z12helloFromGPUvv, .-_Z31__device_stub__Z12helloFromGPUvv
.globl _Z12helloFromGPUv
.type _Z12helloFromGPUv, @function
_Z12helloFromGPUv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z12helloFromGPUvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z12helloFromGPUv, .-_Z12helloFromGPUv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello from CPU!\n"
.LC1:
.string "%s\n"
.LC2:
.string "Success!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $10, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
call cudaDeviceReset@PLT
testl %eax, %eax
je .L13
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L14:
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
call _Z31__device_stub__Z12helloFromGPUvv
jmp .L12
.L13:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L14
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z12helloFromGPUv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z12helloFromGPUv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
/*
Demo for the following:
cudaError_t
cudaGetErrorString
*/
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void helloFromGPU(void){
printf("Hello from GPU! %d\n", threadIdx.x);
}
int main(void){
printf("Hello from CPU!\n");
helloFromGPU <<< 1,10 >>>();
// error handling
cudaError_t res; // enumerated error-code type
res = cudaDeviceReset();
if(res != cudaSuccess){
printf("%s\n", cudaGetErrorString(res)); // get description of error
}else{
printf("Success!\n");
}
}
|
/*
Demo for the following:
cudaError_t
cudaGetErrorString
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void helloFromGPU(void){
printf("Hello from GPU! %d\n", threadIdx.x);
}
int main(void){
printf("Hello from CPU!\n");
helloFromGPU <<< 1,10 >>>();
// error handling
hipError_t res; // enumerated error-code type
res = hipDeviceReset();
if(res != hipSuccess){
printf("%s\n", hipGetErrorString(res)); // get description of error
}else{
printf("Success!\n");
}
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
/*
Demo for the following:
cudaError_t
cudaGetErrorString
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void helloFromGPU(void){
printf("Hello from GPU! %d\n", threadIdx.x);
}
int main(void){
printf("Hello from CPU!\n");
helloFromGPU <<< 1,10 >>>();
// error handling
hipError_t res; // enumerated error-code type
res = hipDeviceReset();
if(res != hipSuccess){
printf("%s\n", hipGetErrorString(res)); // get description of error
}else{
printf("Success!\n");
}
}
|
.text
.file "error-codes.hip"
.globl _Z27__device_stub__helloFromGPUv # -- Begin function _Z27__device_stub__helloFromGPUv
.p2align 4, 0x90
.type _Z27__device_stub__helloFromGPUv,@function
_Z27__device_stub__helloFromGPUv: # @_Z27__device_stub__helloFromGPUv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12helloFromGPUv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z27__device_stub__helloFromGPUv, .Lfunc_end0-_Z27__device_stub__helloFromGPUv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movl $.Lstr, %edi
callq puts@PLT
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 9(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12helloFromGPUv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceReset
testl %eax, %eax
je .LBB1_3
# %bb.4:
movl %eax, %edi
callq hipGetErrorString
movq %rax, %rdi
jmp .LBB1_5
.LBB1_3:
movl $.Lstr.1, %edi
.LBB1_5:
callq puts@PLT
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12helloFromGPUv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12helloFromGPUv,@object # @_Z12helloFromGPUv
.section .rodata,"a",@progbits
.globl _Z12helloFromGPUv
.p2align 3, 0x0
_Z12helloFromGPUv:
.quad _Z27__device_stub__helloFromGPUv
.size _Z12helloFromGPUv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z12helloFromGPUv"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello from CPU!"
.size .Lstr, 16
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Success!"
.size .Lstr.1, 9
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__helloFromGPUv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12helloFromGPUv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_0015d3e5_00000000-6_error-codes.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z12helloFromGPUvv
.type _Z31__device_stub__Z12helloFromGPUvv, @function
_Z31__device_stub__Z12helloFromGPUvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z12helloFromGPUv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z31__device_stub__Z12helloFromGPUvv, .-_Z31__device_stub__Z12helloFromGPUvv
.globl _Z12helloFromGPUv
.type _Z12helloFromGPUv, @function
_Z12helloFromGPUv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z12helloFromGPUvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z12helloFromGPUv, .-_Z12helloFromGPUv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello from CPU!\n"
.LC1:
.string "%s\n"
.LC2:
.string "Success!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $10, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
call cudaDeviceReset@PLT
testl %eax, %eax
je .L13
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L14:
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
call _Z31__device_stub__Z12helloFromGPUvv
jmp .L12
.L13:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L14
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z12helloFromGPUv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z12helloFromGPUv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "error-codes.hip"
.globl _Z27__device_stub__helloFromGPUv # -- Begin function _Z27__device_stub__helloFromGPUv
.p2align 4, 0x90
.type _Z27__device_stub__helloFromGPUv,@function
_Z27__device_stub__helloFromGPUv: # @_Z27__device_stub__helloFromGPUv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12helloFromGPUv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z27__device_stub__helloFromGPUv, .Lfunc_end0-_Z27__device_stub__helloFromGPUv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movl $.Lstr, %edi
callq puts@PLT
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 9(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12helloFromGPUv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceReset
testl %eax, %eax
je .LBB1_3
# %bb.4:
movl %eax, %edi
callq hipGetErrorString
movq %rax, %rdi
jmp .LBB1_5
.LBB1_3:
movl $.Lstr.1, %edi
.LBB1_5:
callq puts@PLT
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12helloFromGPUv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12helloFromGPUv,@object # @_Z12helloFromGPUv
.section .rodata,"a",@progbits
.globl _Z12helloFromGPUv
.p2align 3, 0x0
_Z12helloFromGPUv:
.quad _Z27__device_stub__helloFromGPUv
.size _Z12helloFromGPUv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z12helloFromGPUv"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello from CPU!"
.size .Lstr, 16
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Success!"
.size .Lstr.1, 9
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__helloFromGPUv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12helloFromGPUv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
/**
* Inaki Urruta Sanchez
* Pedro Alexandre Simoes dos Reis
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define BLOCK_SIZE 16
/**
* Initialize matrix M with dimension dim with n in all matrix's entries
*/
void initWith(float* M, int dim, float n) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = n;
}
}
}
/**
* Initialize matrix M with dimension dim with a random number between 0 and 9 in all matrix's entries
*/
void init(float* M, int dim) {
for (int i = 0; i < dim; i++ ) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = (rand() % 10);
}
}
}
/**
* Multiplies matrix left by the matrix right, both with dimensions dim and stores the result in matrix res
* Operation is done in GPU
*/
__global__
void matrixMul(float* left, float* right, float* res, int dim) {
int i, j, idx;
float temp = 0;
__shared__ float Left_shared_t [BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE];
// Row i of matrix left
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) {
// Column j of matrix left
j = tileNUM * BLOCK_SIZE + threadIdx.x;
i = tileNUM * BLOCK_SIZE + threadIdx.y;
// Load left[i][j] to shared mem
idx = row * dim + tileNUM * BLOCK_SIZE + threadIdx.x;
if (idx >= dim * dim) {
Left_shared_t[threadIdx.y][threadIdx.x] = 0;// Coalesced access
} else {
Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];// Coalesced access
}
// Load right[i][j] to shared mem
idx = (tileNUM * BLOCK_SIZE + threadIdx.y) * dim + col;
if (idx >= dim * dim) {
Right_shared_t[threadIdx.y][threadIdx.x] = 0;
} else {
Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col]; // Coalesced access
}
// Synchronize before computation
__syncthreads();
// Accumulate one tile of res from tiles of left and right in shared mem
for (int k = 0; k < BLOCK_SIZE; k++) {
temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x]; //no shared memory bank conflict
}
// Synchronize
__syncthreads();
}
if ((row < dim) && (col < dim)) {
// Store accumulated value to res
res[row * dim + col] = temp;
}
}
/**
* Multiplies matrix A by matrix B, both with dimension dim X dim and stores the result in matrix C with dimension dim X dim
* Operation is done in CPU
*/
__host__
void matrixMulCPU(float* A, float* B, float* C, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
float tmp = 0.0;
for (int k = 0; k < dim; k++) {
tmp += A[i * dim + k] * B[k * dim + j];
}
C[i * dim + j] = tmp;
}
}
}
/**
* Given two matrices A and B, both with dimensions dim X dim, prints in stdout if the result stored in matrix C with dimension dim X dim
* is the same as the result given in matrix C_cpu
*/
void checkResult(float* A, float* B, float* C, float* C_cpu, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
if (abs(C[i * dim + j] - C_cpu[i * dim + j]) > 0.001) {
printf("ERROR: Incorrect Results!\n");
return;
}
}
}
printf("Everything is OK! :D\n");
}
/**
* Returns the current time in milliseconds
* Used to calculate elapsed time
*/
double cpuTimer() {
struct timeval clock;
gettimeofday(&clock, NULL);
return ((double) clock.tv_sec + (double) clock.tv_usec * 1e-6);
}
int main(int argc, char** argv) {
// Set random seed
srand(time(0));
cudaError_t error;
cudaDeviceProp prop;
int numDevices = 0;
error = cudaGetDeviceCount(&numDevices);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
int totalMemory = 0;
for (int i = 0; i < numDevices; i++) {
error = cudaGetDeviceProperties(&prop, i);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
totalMemory += prop.totalGlobalMem;
}
// Matrix size definition and calculation
const int N = 10;
size_t size = N * N * sizeof(float);
int allMatrixSizes = (N * N) * 3;
if (allMatrixSizes > totalMemory) {
printf("ERROR");
exit(EXIT_FAILURE);
}
// Matrix allocation
float *A, *B, *C, *C_cpu;
error = cudaMallocManaged(&A, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMallocManaged(&B, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMallocManaged(&C, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error =cudaMallocManaged(&C_cpu, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Matrix initialization
init(A, N);
init(B, N);
// Cuda layout definition
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE);
// Start timer
double start = cpuTimer();
matrixMul<<<blocksPerGrid, threadsPerBlock>>>(A, B, C, N);
cudaDeviceSynchronize();
// Stop timer
double stop = cpuTimer();
error = cudaGetLastError();
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Print time interval
float gpu_milliseconds = stop - start;
printf("Matrix Multiplication @ GPU: %f ms\n", gpu_milliseconds);
// Start timer
double begin = cpuTimer();
// Matrix multiplication in CPU
matrixMulCPU(A, B, C_cpu, N);
// Stop timer
double end = cpuTimer();
// Print time interval
float cpu_milliseconds = end - begin;
printf("Matrix Multiplication @ CPU: %f ms\n", cpu_milliseconds);
checkResult(A, B, C, C_cpu, N);
// Free memory
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(C_cpu);
return 0;
}
|
code for sm_80
Function : _Z9matrixMulPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ ISETP.NE.AND P1, PT, RZ, c[0x0][0xc], PT ; /* 0x00000300ff007a0c */
/* 0x000fe20003f25270 */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R17, SR_TID.X ; /* 0x0000000000117919 */
/* 0x000e280000002100 */
/*0060*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002600 */
/*0070*/ S2R R16, SR_TID.Y ; /* 0x0000000000107919 */
/* 0x000e620000002200 */
/*0080*/ IMAD R3, R2, c[0x0][0x0], R17 ; /* 0x0000000002037a24 */
/* 0x001fca00078e0211 */
/*0090*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x178], PT ; /* 0x00005e0003007a0c */
/* 0x000fe20003f06270 */
/*00a0*/ IMAD R0, R0, c[0x0][0x4], R16 ; /* 0x0000010000007a24 */
/* 0x002fca00078e0210 */
/*00b0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fe20000706670 */
/*00c0*/ @!P1 BRA 0x4d0 ; /* 0x0000040000009947 */
/* 0x000fd80003800000 */
/*00d0*/ SHF.L.U32 R2, R16.reuse, 0x6, RZ ; /* 0x0000000610027819 */
/* 0x040fe200000006ff */
/*00e0*/ IMAD R16, R16, c[0x0][0x178], R3 ; /* 0x00005e0010107a24 */
/* 0x000fe200078e0203 */
/*00f0*/ MOV R19, RZ ; /* 0x000000ff00137202 */
/* 0x000fe20000000f00 */
/*0100*/ IMAD R18, R0, c[0x0][0x178], R17 ; /* 0x00005e0000127a24 */
/* 0x000fe200078e0211 */
/*0110*/ MOV R9, RZ ; /* 0x000000ff00097202 */
/* 0x000fe20000000f00 */
/*0120*/ ULDC UR4, c[0x0][0x178] ; /* 0x00005e0000047ab9 */
/* 0x000fe20000000800 */
/*0130*/ LEA R20, R17, R2, 0x2 ; /* 0x0000000211147211 */
/* 0x000fe200078e10ff */
/*0140*/ UIMAD UR4, UR4, UR4, URZ ; /* 0x00000004040472a4 */
/* 0x000fcc000f8e023f */
/*0150*/ ISETP.GE.AND P1, PT, R18, UR4, PT ; /* 0x0000000412007c0c */
/* 0x000fe2000bf26270 */
/*0160*/ HFMA2.MMA R21, -RZ, RZ, 0, 0 ; /* 0x00000000ff157435 */
/* 0x000fe200000001ff */
/*0170*/ ISETP.GE.AND P2, PT, R16, UR4, PT ; /* 0x0000000410007c0c */
/* 0x000fc4000bf46270 */
/*0180*/ MOV R27, RZ ; /* 0x000000ff001b7202 */
/* 0x000fd20000000f00 */
/*0190*/ @!P1 MOV R5, 0x4 ; /* 0x0000000400059802 */
/* 0x000fe40000000f00 */
/*01a0*/ @!P2 MOV R11, 0x4 ; /* 0x00000004000ba802 */
/* 0x000fc60000000f00 */
/*01b0*/ @!P1 IMAD.WIDE R4, R18, R5, c[0x0][0x160] ; /* 0x0000580012049625 */
/* 0x000fc800078e0205 */
/*01c0*/ @!P2 IMAD.WIDE R10, R16, R11, c[0x0][0x168] ; /* 0x00005a00100aa625 */
/* 0x000fe200078e020b */
/*01d0*/ @!P1 LDG.E R21, [R4.64] ; /* 0x0000000604159981 */
/* 0x000ea8000c1e1900 */
/*01e0*/ @!P2 LDG.E R27, [R10.64] ; /* 0x000000060a1ba981 */
/* 0x000ee2000c1e1900 */
/*01f0*/ IADD3 R19, R19, 0x1, RZ ; /* 0x0000000113137810 */
/* 0x000fe40007ffe0ff */
/*0200*/ IADD3 R18, R18, 0x10, RZ ; /* 0x0000001012127810 */
/* 0x000fe40007ffe0ff */
/*0210*/ ISETP.GE.U32.AND P1, PT, R19, c[0x0][0xc], PT ; /* 0x0000030013007a0c */
/* 0x000fe20003f26070 */
/*0220*/ STS [R20], R21 ; /* 0x0000001514007388 */
/* 0x004fe80000000800 */
/*0230*/ STS [R20+0x400], R27 ; /* 0x0004001b14007388 */
/* 0x008fe80000000800 */
/*0240*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0250*/ LDS R8, [R17.X4+0x400] ; /* 0x0004000011087984 */
/* 0x000fe80000004800 */
/*0260*/ LDS.128 R12, [R2] ; /* 0x00000000020c7984 */
/* 0x000e280000000c00 */
/*0270*/ LDS R28, [R17.X4+0x440] ; /* 0x00044000111c7984 */
/* 0x000e680000004800 */
/*0280*/ LDS R29, [R17.X4+0x480] ; /* 0x00048000111d7984 */
/* 0x000ea80000004800 */
/*0290*/ LDS R24, [R17.X4+0x4c0] ; /* 0x0004c00011187984 */
/* 0x000ee80000004800 */
/*02a0*/ LDS R25, [R17.X4+0x500] ; /* 0x0005000011197984 */
/* 0x000fe80000004800 */
/*02b0*/ LDS.128 R4, [R2+0x10] ; /* 0x0000100002047984 */
/* 0x000f280000000c00 */
/*02c0*/ LDS R26, [R17.X4+0x540] ; /* 0x00054000111a7984 */
/* 0x000f680000004800 */
/*02d0*/ LDS R23, [R17.X4+0x580] ; /* 0x0005800011177984 */
/* 0x000f680000004800 */
/*02e0*/ LDS R22, [R17.X4+0x5c0] ; /* 0x0005c00011167984 */
/* 0x000f680000004800 */
/*02f0*/ LDS R21, [R17.X4+0x600] ; /* 0x0006000011157984 */
/* 0x000fe20000004800 */
/*0300*/ FFMA R8, R8, R12, R9 ; /* 0x0000000c08087223 */
/* 0x001fc80000000009 */
/*0310*/ FFMA R13, R28, R13, R8 ; /* 0x0000000d1c0d7223 */
/* 0x002fe40000000008 */
/*0320*/ LDS.128 R8, [R2+0x20] ; /* 0x0000200002087984 */
/* 0x000e240000000c00 */
/*0330*/ FFMA R13, R29, R14, R13 ; /* 0x0000000e1d0d7223 */
/* 0x004fc8000000000d */
/*0340*/ FFMA R13, R24, R15, R13 ; /* 0x0000000f180d7223 */
/* 0x008fe4000000000d */
/*0350*/ LDS R24, [R17.X4+0x640] ; /* 0x0006400011187984 */
/* 0x000e640000004800 */
/*0360*/ FFMA R4, R25, R4, R13 ; /* 0x0000000419047223 */
/* 0x010fe4000000000d */
/*0370*/ LDS R25, [R17.X4+0x680] ; /* 0x0006800011197984 */
/* 0x000ea40000004800 */
/*0380*/ FFMA R5, R26, R5, R4 ; /* 0x000000051a057223 */
/* 0x020fe40000000004 */
/*0390*/ LDS R4, [R17.X4+0x6c0] ; /* 0x0006c00011047984 */
/* 0x000ee40000004800 */
/*03a0*/ FFMA R23, R23, R6, R5 ; /* 0x0000000617177223 */
/* 0x000fc40000000005 */
/*03b0*/ LDS R5, [R17.X4+0x700] ; /* 0x0007000011057984 */
/* 0x000fe40000004800 */
/*03c0*/ FFMA R23, R22, R7, R23 ; /* 0x0000000716177223 */
/* 0x000fe40000000017 */
/*03d0*/ LDS.128 R12, [R2+0x30] ; /* 0x00003000020c7984 */
/* 0x000f280000000c00 */
/*03e0*/ LDS R6, [R17.X4+0x740] ; /* 0x0007400011067984 */
/* 0x000f680000004800 */
/*03f0*/ LDS R7, [R17.X4+0x780] ; /* 0x0007800011077984 */
/* 0x000f680000004800 */
/*0400*/ LDS R22, [R17.X4+0x7c0] ; /* 0x0007c00011167984 */
/* 0x000f620000004800 */
/*0410*/ FFMA R8, R21, R8, R23 ; /* 0x0000000815087223 */
/* 0x001fc80000000017 */
/*0420*/ FFMA R8, R24, R9, R8 ; /* 0x0000000918087223 */
/* 0x002fc80000000008 */
/*0430*/ FFMA R8, R25, R10, R8 ; /* 0x0000000a19087223 */
/* 0x004fc80000000008 */
/*0440*/ FFMA R4, R4, R11, R8 ; /* 0x0000000b04047223 */
/* 0x008fc80000000008 */
/*0450*/ FFMA R4, R5, R12, R4 ; /* 0x0000000c05047223 */
/* 0x010fe20000000004 */
/*0460*/ MOV R5, c[0x0][0x178] ; /* 0x00005e0000057a02 */
/* 0x000fc60000000f00 */
/*0470*/ FFMA R4, R6, R13, R4 ; /* 0x0000000d06047223 */
/* 0x020fe20000000004 */
/*0480*/ LEA R16, R5, R16, 0x4 ; /* 0x0000001005107211 */
/* 0x000fc600078e20ff */
/*0490*/ FFMA R4, R7, R14, R4 ; /* 0x0000000e07047223 */
/* 0x000fc80000000004 */
/*04a0*/ FFMA R9, R22, R15, R4 ; /* 0x0000000f16097223 */
/* 0x000fe20000000004 */
/*04b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*04c0*/ @!P1 BRA 0x150 ; /* 0xfffffc8000009947 */
/* 0x000fea000383ffff */
/*04d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*04e0*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*04f0*/ IMAD R3, R0, c[0x0][0x178], R3 ; /* 0x00005e0000037a24 */
/* 0x000fd200078e0203 */
/*0500*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */
/* 0x000fca00078e0202 */
/*0510*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x000fe2000c101906 */
/*0520*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0530*/ BRA 0x530; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
/**
* Inaki Urruta Sanchez
* Pedro Alexandre Simoes dos Reis
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define BLOCK_SIZE 16
/**
* Initialize matrix M with dimension dim with n in all matrix's entries
*/
void initWith(float* M, int dim, float n) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = n;
}
}
}
/**
* Initialize matrix M with dimension dim with a random number between 0 and 9 in all matrix's entries
*/
void init(float* M, int dim) {
for (int i = 0; i < dim; i++ ) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = (rand() % 10);
}
}
}
/**
* Multiplies matrix left by the matrix right, both with dimensions dim and stores the result in matrix res
* Operation is done in GPU
*/
__global__
void matrixMul(float* left, float* right, float* res, int dim) {
int i, j, idx;
float temp = 0;
__shared__ float Left_shared_t [BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE];
// Row i of matrix left
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) {
// Column j of matrix left
j = tileNUM * BLOCK_SIZE + threadIdx.x;
i = tileNUM * BLOCK_SIZE + threadIdx.y;
// Load left[i][j] to shared mem
idx = row * dim + tileNUM * BLOCK_SIZE + threadIdx.x;
if (idx >= dim * dim) {
Left_shared_t[threadIdx.y][threadIdx.x] = 0;// Coalesced access
} else {
Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];// Coalesced access
}
// Load right[i][j] to shared mem
idx = (tileNUM * BLOCK_SIZE + threadIdx.y) * dim + col;
if (idx >= dim * dim) {
Right_shared_t[threadIdx.y][threadIdx.x] = 0;
} else {
Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col]; // Coalesced access
}
// Synchronize before computation
__syncthreads();
// Accumulate one tile of res from tiles of left and right in shared mem
for (int k = 0; k < BLOCK_SIZE; k++) {
temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x]; //no shared memory bank conflict
}
// Synchronize
__syncthreads();
}
if ((row < dim) && (col < dim)) {
// Store accumulated value to res
res[row * dim + col] = temp;
}
}
/**
* Multiplies matrix A by matrix B, both with dimension dim X dim and stores the result in matrix C with dimension dim X dim
* Operation is done in CPU
*/
__host__
void matrixMulCPU(float* A, float* B, float* C, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
float tmp = 0.0;
for (int k = 0; k < dim; k++) {
tmp += A[i * dim + k] * B[k * dim + j];
}
C[i * dim + j] = tmp;
}
}
}
/**
* Given two matrices A and B, both with dimensions dim X dim, prints in stdout if the result stored in matrix C with dimension dim X dim
* is the same as the result given in matrix C_cpu
*/
void checkResult(float* A, float* B, float* C, float* C_cpu, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
if (abs(C[i * dim + j] - C_cpu[i * dim + j]) > 0.001) {
printf("ERROR: Incorrect Results!\n");
return;
}
}
}
printf("Everything is OK! :D\n");
}
/**
* Returns the current time in milliseconds
* Used to calculate elapsed time
*/
double cpuTimer() {
struct timeval clock;
gettimeofday(&clock, NULL);
return ((double) clock.tv_sec + (double) clock.tv_usec * 1e-6);
}
int main(int argc, char** argv) {
// Set random seed
srand(time(0));
cudaError_t error;
cudaDeviceProp prop;
int numDevices = 0;
error = cudaGetDeviceCount(&numDevices);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
int totalMemory = 0;
for (int i = 0; i < numDevices; i++) {
error = cudaGetDeviceProperties(&prop, i);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
totalMemory += prop.totalGlobalMem;
}
// Matrix size definition and calculation
const int N = 10;
size_t size = N * N * sizeof(float);
int allMatrixSizes = (N * N) * 3;
if (allMatrixSizes > totalMemory) {
printf("ERROR");
exit(EXIT_FAILURE);
}
// Matrix allocation
float *A, *B, *C, *C_cpu;
error = cudaMallocManaged(&A, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMallocManaged(&B, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMallocManaged(&C, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error =cudaMallocManaged(&C_cpu, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Matrix initialization
init(A, N);
init(B, N);
// Cuda layout definition
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE);
// Start timer
double start = cpuTimer();
matrixMul<<<blocksPerGrid, threadsPerBlock>>>(A, B, C, N);
cudaDeviceSynchronize();
// Stop timer
double stop = cpuTimer();
error = cudaGetLastError();
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Print time interval
float gpu_milliseconds = stop - start;
printf("Matrix Multiplication @ GPU: %f ms\n", gpu_milliseconds);
// Start timer
double begin = cpuTimer();
// Matrix multiplication in CPU
matrixMulCPU(A, B, C_cpu, N);
// Stop timer
double end = cpuTimer();
// Print time interval
float cpu_milliseconds = end - begin;
printf("Matrix Multiplication @ CPU: %f ms\n", cpu_milliseconds);
checkResult(A, B, C, C_cpu, N);
// Free memory
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(C_cpu);
return 0;
}
|
.file "tmpxft_00093563_00000000-6_matrix_mul_naive.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2065:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8initWithPfif
.type _Z8initWithPfif, @function
_Z8initWithPfif:
.LFB2057:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L3
movslq %esi, %rax
leaq 0(,%rax,4), %r8
leaq (%rdi,%r8), %rdx
negq %rax
leaq 0(,%rax,4), %rdi
movl $0, %ecx
.L5:
leaq (%rdx,%rdi), %rax
.L6:
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L6
addl $1, %ecx
addq %r8, %rdx
cmpl %ecx, %esi
jne .L5
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z8initWithPfif, .-_Z8initWithPfif
.globl _Z4initPfi
.type _Z4initPfi, @function
_Z4initPfi:
.LFB2058:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L14
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movl %esi, %r14d
movslq %esi, %r13
leaq 0(,%r13,4), %r15
leaq (%rdi,%r15), %rbp
negq %r13
salq $2, %r13
movl $0, %r12d
.L10:
leaq 0(%rbp,%r13), %rbx
.L11:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L11
addl $1, %r12d
addq %r15, %rbp
cmpl %r12d, %r14d
jne .L10
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2058:
.size _Z4initPfi, .-_Z4initPfi
.globl _Z12matrixMulCPUPfS_S_i
.type _Z12matrixMulCPUPfS_S_i, @function
_Z12matrixMulCPUPfS_S_i:
.LFB2059:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L25
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rsi, %rbx
movq %rdx, %r9
movl %ecx, %r12d
movslq %ecx, %r11
leaq 0(,%r11,4), %rcx
movq %rdi, %r10
leaq (%rdi,%rcx), %rsi
movl $0, %ebp
.L19:
movq %rbx, %r8
movl $0, %edi
.L22:
movq %r8, %rdx
movq %r10, %rax
pxor %xmm1, %xmm1
.L20:
movss (%rax), %xmm0
mulss (%rdx), %xmm0
addss %xmm0, %xmm1
addq $4, %rax
addq %rcx, %rdx
cmpq %rsi, %rax
jne .L20
movss %xmm1, (%r9,%rdi,4)
addq $1, %rdi
addq $4, %r8
cmpq %r11, %rdi
jne .L22
addl $1, %ebp
addq %rcx, %r9
addq %rcx, %r10
addq %rcx, %rsi
cmpl %ebp, %r12d
jne .L19
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
ret
.cfi_endproc
.LFE2059:
.size _Z12matrixMulCPUPfS_S_i, .-_Z12matrixMulCPUPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "ERROR: Incorrect Results!\n"
.LC4:
.string "Everything is OK! :D\n"
.text
.globl _Z11checkResultPfS_S_S_i
.type _Z11checkResultPfS_S_S_i, @function
_Z11checkResultPfS_S_S_i:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
testl %r8d, %r8d
jle .L29
movq %rdx, %rsi
movslq %r8d, %rax
leaq 0(,%rax,4), %r10
negq %rax
leaq 0(,%rax,4), %rdi
movq %r10, %rdx
movl $0, %r9d
movss .LC1(%rip), %xmm2
movsd .LC2(%rip), %xmm1
.L30:
leaq (%rdx,%rdi), %rax
.L34:
movss (%rsi,%rax), %xmm0
subss (%rcx,%rax), %xmm0
andps %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm1, %xmm0
ja .L39
addq $4, %rax
cmpq %rdx, %rax
jne .L34
addl $1, %r9d
addq %r10, %rdx
cmpl %r9d, %r8d
jne .L30
.L29:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L28
.L39:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L28:
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z11checkResultPfS_S_S_i, .-_Z11checkResultPfS_S_S_i
.globl _Z8cpuTimerv
.type _Z8cpuTimerv, @function
_Z8cpuTimerv:
.LFB2061:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC5(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L43
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z8cpuTimerv, .-_Z8cpuTimerv
.globl _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
.type _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i, @function
_Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L48
.L44:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L49
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9matrixMulPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L44
.L49:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i, .-_Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
.globl _Z9matrixMulPfS_S_i
.type _Z9matrixMulPfS_S_i, @function
_Z9matrixMulPfS_S_i:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z9matrixMulPfS_S_i, .-_Z9matrixMulPfS_S_i
.section .rodata.str1.1
.LC6:
.string "ERROR: %s\n"
.LC7:
.string "ERROR"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC8:
.string "Matrix Multiplication @ GPU: %f ms\n"
.align 8
.LC9:
.string "Matrix Multiplication @ CPU: %f ms\n"
.text
.globl main
.type main, @function
main:
.LFB2062:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $1120, %rsp
.cfi_def_cfa_offset 1152
movq %fs:40, %rax
movq %rax, 1112(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movl $0, 20(%rsp)
leaq 20(%rsp), %rdi
call cudaGetDeviceCount@PLT
testl %eax, %eax
jne .L53
movl $0, %ebx
movl $0, %ebp
leaq 80(%rsp), %r12
cmpl $0, 20(%rsp)
jle .L55
.L54:
movl %ebx, %esi
movq %r12, %rdi
call cudaGetDeviceProperties_v2@PLT
testl %eax, %eax
jne .L69
movl %ebp, %eax
addl 368(%rsp), %eax
movl %eax, %ebp
addl $1, %ebx
cmpl %ebx, 20(%rsp)
jg .L54
cmpl $299, %eax
jle .L55
leaq 24(%rsp), %rdi
movl $1, %edx
movl $400, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L70
leaq 32(%rsp), %rdi
movl $1, %edx
movl $400, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L71
leaq 40(%rsp), %rdi
movl $1, %edx
movl $400, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L72
leaq 48(%rsp), %rdi
movl $1, %edx
movl $400, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L73
movl $10, %esi
movq 24(%rsp), %rdi
call _Z4initPfi
movl $10, %esi
movq 32(%rsp), %rdi
call _Z4initPfi
movl $16, 56(%rsp)
movl $16, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
call _Z8cpuTimerv
movsd %xmm0, (%rsp)
movl 64(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 56(%rsp), %rdx
movq 68(%rsp), %rdi
movl 76(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L74
.L62:
call cudaDeviceSynchronize@PLT
call _Z8cpuTimerv
movsd %xmm0, 8(%rsp)
call cudaGetLastError@PLT
testl %eax, %eax
jne .L75
movsd 8(%rsp), %xmm0
subsd (%rsp), %xmm0
cvtsd2ss %xmm0, %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
call _Z8cpuTimerv
movsd %xmm0, (%rsp)
movl $10, %ecx
movq 48(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z12matrixMulCPUPfS_S_i
call _Z8cpuTimerv
subsd (%rsp), %xmm0
cvtsd2ss %xmm0, %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $10, %r8d
movq 48(%rsp), %rcx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z11checkResultPfS_S_S_i
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 1112(%rsp), %rax
subq %fs:40, %rax
jne .L76
movl $0, %eax
addq $1120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L69:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L55:
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L70:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L71:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L72:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L73:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L74:
movl $10, %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
jmp .L62
.L75:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L76:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2062:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z9matrixMulPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z9matrixMulPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC1:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long -755914244
.long 1062232653
.align 8
.LC5:
.long -1598689907
.long 1051772663
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
/**
* Inaki Urruta Sanchez
* Pedro Alexandre Simoes dos Reis
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define BLOCK_SIZE 16
/**
* Initialize matrix M with dimension dim with n in all matrix's entries
*/
void initWith(float* M, int dim, float n) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = n;
}
}
}
/**
* Initialize matrix M with dimension dim with a random number between 0 and 9 in all matrix's entries
*/
void init(float* M, int dim) {
for (int i = 0; i < dim; i++ ) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = (rand() % 10);
}
}
}
/**
* Multiplies matrix left by the matrix right, both with dimensions dim and stores the result in matrix res
* Operation is done in GPU
*/
__global__
void matrixMul(float* left, float* right, float* res, int dim) {
int i, j, idx;
float temp = 0;
__shared__ float Left_shared_t [BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE];
// Row i of matrix left
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) {
// Column j of matrix left
j = tileNUM * BLOCK_SIZE + threadIdx.x;
i = tileNUM * BLOCK_SIZE + threadIdx.y;
// Load left[i][j] to shared mem
idx = row * dim + tileNUM * BLOCK_SIZE + threadIdx.x;
if (idx >= dim * dim) {
Left_shared_t[threadIdx.y][threadIdx.x] = 0;// Coalesced access
} else {
Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];// Coalesced access
}
// Load right[i][j] to shared mem
idx = (tileNUM * BLOCK_SIZE + threadIdx.y) * dim + col;
if (idx >= dim * dim) {
Right_shared_t[threadIdx.y][threadIdx.x] = 0;
} else {
Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col]; // Coalesced access
}
// Synchronize before computation
__syncthreads();
// Accumulate one tile of res from tiles of left and right in shared mem
for (int k = 0; k < BLOCK_SIZE; k++) {
temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x]; //no shared memory bank conflict
}
// Synchronize
__syncthreads();
}
if ((row < dim) && (col < dim)) {
// Store accumulated value to res
res[row * dim + col] = temp;
}
}
/**
* Multiplies matrix A by matrix B, both with dimension dim X dim and stores the result in matrix C with dimension dim X dim
* Operation is done in CPU
*/
__host__
void matrixMulCPU(float* A, float* B, float* C, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
float tmp = 0.0;
for (int k = 0; k < dim; k++) {
tmp += A[i * dim + k] * B[k * dim + j];
}
C[i * dim + j] = tmp;
}
}
}
/**
* Given two matrices A and B, both with dimensions dim X dim, prints in stdout if the result stored in matrix C with dimension dim X dim
* is the same as the result given in matrix C_cpu
*/
void checkResult(float* A, float* B, float* C, float* C_cpu, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
if (abs(C[i * dim + j] - C_cpu[i * dim + j]) > 0.001) {
printf("ERROR: Incorrect Results!\n");
return;
}
}
}
printf("Everything is OK! :D\n");
}
/**
* Returns the current time in milliseconds
* Used to calculate elapsed time
*/
double cpuTimer() {
struct timeval clock;
gettimeofday(&clock, NULL);
return ((double) clock.tv_sec + (double) clock.tv_usec * 1e-6);
}
int main(int argc, char** argv) {
// Set random seed
srand(time(0));
cudaError_t error;
cudaDeviceProp prop;
int numDevices = 0;
error = cudaGetDeviceCount(&numDevices);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
int totalMemory = 0;
for (int i = 0; i < numDevices; i++) {
error = cudaGetDeviceProperties(&prop, i);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
totalMemory += prop.totalGlobalMem;
}
// Matrix size definition and calculation
const int N = 10;
size_t size = N * N * sizeof(float);
int allMatrixSizes = (N * N) * 3;
if (allMatrixSizes > totalMemory) {
printf("ERROR");
exit(EXIT_FAILURE);
}
// Matrix allocation
float *A, *B, *C, *C_cpu;
error = cudaMallocManaged(&A, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMallocManaged(&B, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMallocManaged(&C, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error =cudaMallocManaged(&C_cpu, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Matrix initialization
init(A, N);
init(B, N);
// Cuda layout definition
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE);
// Start timer
double start = cpuTimer();
matrixMul<<<blocksPerGrid, threadsPerBlock>>>(A, B, C, N);
cudaDeviceSynchronize();
// Stop timer
double stop = cpuTimer();
error = cudaGetLastError();
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Print time interval
float gpu_milliseconds = stop - start;
printf("Matrix Multiplication @ GPU: %f ms\n", gpu_milliseconds);
// Start timer
double begin = cpuTimer();
// Matrix multiplication in CPU
matrixMulCPU(A, B, C_cpu, N);
// Stop timer
double end = cpuTimer();
// Print time interval
float cpu_milliseconds = end - begin;
printf("Matrix Multiplication @ CPU: %f ms\n", cpu_milliseconds);
checkResult(A, B, C, C_cpu, N);
// Free memory
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(C_cpu);
return 0;
}
|
/**
* Inaki Urruta Sanchez
* Pedro Alexandre Simoes dos Reis
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define BLOCK_SIZE 16
/**
* Initialize matrix M with dimension dim with n in all matrix's entries
*/
void initWith(float* M, int dim, float n) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = n;
}
}
}
/**
* Initialize matrix M with dimension dim with a random number between 0 and 9 in all matrix's entries
*/
void init(float* M, int dim) {
for (int i = 0; i < dim; i++ ) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = (rand() % 10);
}
}
}
/**
* Multiplies matrix left by the matrix right, both with dimensions dim and stores the result in matrix res
* Operation is done in GPU
*/
__global__
void matrixMul(float* left, float* right, float* res, int dim) {
int i, j, idx;
float temp = 0;
__shared__ float Left_shared_t [BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE];
// Row i of matrix left
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) {
// Column j of matrix left
j = tileNUM * BLOCK_SIZE + threadIdx.x;
i = tileNUM * BLOCK_SIZE + threadIdx.y;
// Load left[i][j] to shared mem
idx = row * dim + tileNUM * BLOCK_SIZE + threadIdx.x;
if (idx >= dim * dim) {
Left_shared_t[threadIdx.y][threadIdx.x] = 0;// Coalesced access
} else {
Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];// Coalesced access
}
// Load right[i][j] to shared mem
idx = (tileNUM * BLOCK_SIZE + threadIdx.y) * dim + col;
if (idx >= dim * dim) {
Right_shared_t[threadIdx.y][threadIdx.x] = 0;
} else {
Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col]; // Coalesced access
}
// Synchronize before computation
__syncthreads();
// Accumulate one tile of res from tiles of left and right in shared mem
for (int k = 0; k < BLOCK_SIZE; k++) {
temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x]; //no shared memory bank conflict
}
// Synchronize
__syncthreads();
}
if ((row < dim) && (col < dim)) {
// Store accumulated value to res
res[row * dim + col] = temp;
}
}
/**
* Multiplies matrix A by matrix B, both with dimension dim X dim and stores the result in matrix C with dimension dim X dim
* Operation is done in CPU
*/
__host__
void matrixMulCPU(float* A, float* B, float* C, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
float tmp = 0.0;
for (int k = 0; k < dim; k++) {
tmp += A[i * dim + k] * B[k * dim + j];
}
C[i * dim + j] = tmp;
}
}
}
/**
* Given two matrices A and B, both with dimensions dim X dim, prints in stdout if the result stored in matrix C with dimension dim X dim
* is the same as the result given in matrix C_cpu
*/
void checkResult(float* A, float* B, float* C, float* C_cpu, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
if (abs(C[i * dim + j] - C_cpu[i * dim + j]) > 0.001) {
printf("ERROR: Incorrect Results!\n");
return;
}
}
}
printf("Everything is OK! :D\n");
}
/**
* Returns the current time in milliseconds
* Used to calculate elapsed time
*/
double cpuTimer() {
struct timeval clock;
gettimeofday(&clock, NULL);
return ((double) clock.tv_sec + (double) clock.tv_usec * 1e-6);
}
int main(int argc, char** argv) {
// Set random seed
srand(time(0));
hipError_t error;
hipDeviceProp_t prop;
int numDevices = 0;
error = hipGetDeviceCount(&numDevices);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
int totalMemory = 0;
for (int i = 0; i < numDevices; i++) {
error = hipGetDeviceProperties(&prop, i);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
totalMemory += prop.totalGlobalMem;
}
// Matrix size definition and calculation
const int N = 10;
size_t size = N * N * sizeof(float);
int allMatrixSizes = (N * N) * 3;
if (allMatrixSizes > totalMemory) {
printf("ERROR");
exit(EXIT_FAILURE);
}
// Matrix allocation
float *A, *B, *C, *C_cpu;
error = hipMallocManaged(&A, size);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipMallocManaged(&B, size);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipMallocManaged(&C, size);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error =hipMallocManaged(&C_cpu, size);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Matrix initialization
init(A, N);
init(B, N);
// Cuda layout definition
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE);
// Start timer
double start = cpuTimer();
matrixMul<<<blocksPerGrid, threadsPerBlock>>>(A, B, C, N);
hipDeviceSynchronize();
// Stop timer
double stop = cpuTimer();
error = hipGetLastError();
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Print time interval
float gpu_milliseconds = stop - start;
printf("Matrix Multiplication @ GPU: %f ms\n", gpu_milliseconds);
// Start timer
double begin = cpuTimer();
// Matrix multiplication in CPU
matrixMulCPU(A, B, C_cpu, N);
// Stop timer
double end = cpuTimer();
// Print time interval
float cpu_milliseconds = end - begin;
printf("Matrix Multiplication @ CPU: %f ms\n", cpu_milliseconds);
checkResult(A, B, C, C_cpu, N);
// Free memory
hipFree(A);
hipFree(B);
hipFree(C);
hipFree(C_cpu);
return 0;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
/**
* Inaki Urruta Sanchez
* Pedro Alexandre Simoes dos Reis
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define BLOCK_SIZE 16
/**
* Initialize matrix M with dimension dim with n in all matrix's entries
*/
void initWith(float* M, int dim, float n) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = n;
}
}
}
/**
* Initialize matrix M with dimension dim with a random number between 0 and 9 in all matrix's entries
*/
void init(float* M, int dim) {
for (int i = 0; i < dim; i++ ) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = (rand() % 10);
}
}
}
/**
* Multiplies matrix left by the matrix right, both with dimensions dim and stores the result in matrix res
* Operation is done in GPU
*/
__global__
void matrixMul(float* left, float* right, float* res, int dim) {
int i, j, idx;
float temp = 0;
__shared__ float Left_shared_t [BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE];
// Row i of matrix left
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) {
// Column j of matrix left
j = tileNUM * BLOCK_SIZE + threadIdx.x;
i = tileNUM * BLOCK_SIZE + threadIdx.y;
// Load left[i][j] to shared mem
idx = row * dim + tileNUM * BLOCK_SIZE + threadIdx.x;
if (idx >= dim * dim) {
Left_shared_t[threadIdx.y][threadIdx.x] = 0;// Coalesced access
} else {
Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];// Coalesced access
}
// Load right[i][j] to shared mem
idx = (tileNUM * BLOCK_SIZE + threadIdx.y) * dim + col;
if (idx >= dim * dim) {
Right_shared_t[threadIdx.y][threadIdx.x] = 0;
} else {
Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col]; // Coalesced access
}
// Synchronize before computation
__syncthreads();
// Accumulate one tile of res from tiles of left and right in shared mem
for (int k = 0; k < BLOCK_SIZE; k++) {
temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x]; //no shared memory bank conflict
}
// Synchronize
__syncthreads();
}
if ((row < dim) && (col < dim)) {
// Store accumulated value to res
res[row * dim + col] = temp;
}
}
/**
* Multiplies matrix A by matrix B, both with dimension dim X dim and stores the result in matrix C with dimension dim X dim
* Operation is done in CPU
*/
__host__
void matrixMulCPU(float* A, float* B, float* C, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
float tmp = 0.0;
for (int k = 0; k < dim; k++) {
tmp += A[i * dim + k] * B[k * dim + j];
}
C[i * dim + j] = tmp;
}
}
}
/**
* Given two matrices A and B, both with dimensions dim X dim, prints in stdout if the result stored in matrix C with dimension dim X dim
* is the same as the result given in matrix C_cpu
*/
void checkResult(float* A, float* B, float* C, float* C_cpu, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
if (abs(C[i * dim + j] - C_cpu[i * dim + j]) > 0.001) {
printf("ERROR: Incorrect Results!\n");
return;
}
}
}
printf("Everything is OK! :D\n");
}
/**
* Returns the current time in milliseconds
* Used to calculate elapsed time
*/
double cpuTimer() {
struct timeval clock;
gettimeofday(&clock, NULL);
return ((double) clock.tv_sec + (double) clock.tv_usec * 1e-6);
}
int main(int argc, char** argv) {
// Set random seed
srand(time(0));
hipError_t error;
hipDeviceProp_t prop;
int numDevices = 0;
error = hipGetDeviceCount(&numDevices);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
int totalMemory = 0;
for (int i = 0; i < numDevices; i++) {
error = hipGetDeviceProperties(&prop, i);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
totalMemory += prop.totalGlobalMem;
}
// Matrix size definition and calculation
const int N = 10;
size_t size = N * N * sizeof(float);
int allMatrixSizes = (N * N) * 3;
if (allMatrixSizes > totalMemory) {
printf("ERROR");
exit(EXIT_FAILURE);
}
// Matrix allocation
float *A, *B, *C, *C_cpu;
error = hipMallocManaged(&A, size);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipMallocManaged(&B, size);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipMallocManaged(&C, size);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error =hipMallocManaged(&C_cpu, size);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Matrix initialization
init(A, N);
init(B, N);
// Cuda layout definition
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE);
// Start timer
double start = cpuTimer();
matrixMul<<<blocksPerGrid, threadsPerBlock>>>(A, B, C, N);
hipDeviceSynchronize();
// Stop timer
double stop = cpuTimer();
error = hipGetLastError();
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Print time interval
float gpu_milliseconds = stop - start;
printf("Matrix Multiplication @ GPU: %f ms\n", gpu_milliseconds);
// Start timer
double begin = cpuTimer();
// Matrix multiplication in CPU
matrixMulCPU(A, B, C_cpu, N);
// Stop timer
double end = cpuTimer();
// Print time interval
float cpu_milliseconds = end - begin;
printf("Matrix Multiplication @ CPU: %f ms\n", cpu_milliseconds);
checkResult(A, B, C, C_cpu, N);
// Free memory
hipFree(A);
hipFree(B);
hipFree(C);
hipFree(C_cpu);
return 0;
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9matrixMulPfS_S_i
.globl _Z9matrixMulPfS_S_i
.p2align 8
.type _Z9matrixMulPfS_S_i,@function
_Z9matrixMulPfS_S_i:
s_clause 0x2
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x20
s_load_b32 s2, s[0:1], 0x18
v_bfe_u32 v3, v0, 10, 10
v_and_b32_e32 v6, 0x3ff, v0
s_mov_b32 s8, 0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s4, 16
s_and_b32 s4, s4, 0xffff
v_mad_u64_u32 v[0:1], null, s15, s5, v[3:4]
v_mad_u64_u32 v[1:2], null, s14, s4, v[6:7]
s_cmp_eq_u32 s3, 0
s_cbranch_scc1 .LBB0_13
s_load_b128 s[4:7], s[0:1], 0x0
v_dual_mov_b32 v11, 0 :: v_dual_lshlrev_b32 v2, 2, v6
v_lshlrev_b32_e32 v7, 6, v3
s_mul_i32 s9, s2, s2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v8, 0x400, v2
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v9, v7, v2
v_mad_u64_u32 v[4:5], null, v0, s2, v[6:7]
s_delay_alu instid0(VALU_DEP_3)
v_add_nc_u32_e32 v10, v8, v7
.LBB0_2:
s_lshl_b32 s10, s8, 4
s_mov_b32 s11, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v5, s10, v4
v_cmpx_le_i32_e64 s9, v5
s_xor_b32 s11, exec_lo, s11
s_cbranch_execz .LBB0_4
ds_store_b32 v9, v11
.LBB0_4:
s_and_not1_saveexec_b32 s11, s11
s_cbranch_execz .LBB0_6
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v5, vcc_lo, s4, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo
global_load_b32 v5, v[5:6], off
s_waitcnt vmcnt(0)
ds_store_b32 v9, v5
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s11
v_add_nc_u32_e32 v12, s10, v3
s_mov_b32 s10, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[5:6], null, v12, s2, v[1:2]
v_cmpx_le_i32_e64 s9, v5
s_xor_b32 s10, exec_lo, s10
s_cbranch_execz .LBB0_8
ds_store_b32 v10, v11
.LBB0_8:
s_and_not1_saveexec_b32 s10, s10
s_cbranch_execz .LBB0_10
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v5, vcc_lo, s6, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v6, vcc_lo
global_load_b32 v5, v[5:6], off
s_waitcnt vmcnt(0)
ds_store_b32 v10, v5
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s10
v_mov_b32_e32 v5, v8
s_mov_b32 s10, 0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_11:
v_add_nc_u32_e32 v6, s10, v7
s_add_i32 s10, s10, 4
ds_load_b32 v12, v5
ds_load_b32 v6, v6
v_add_nc_u32_e32 v5, 64, v5
s_cmp_eq_u32 s10, 64
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v2, v6, v12
s_cbranch_scc0 .LBB0_11
s_add_i32 s8, s8, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s8, s3
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_14
.LBB0_13:
v_mov_b32_e32 v2, 0
.LBB0_14:
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_max_i32_e32 v3, v0, v1
s_mov_b32 s3, exec_lo
v_cmpx_gt_i32_e64 s2, v3
s_cbranch_execz .LBB0_16
s_load_b64 s[0:1], s[0:1], 0x10
v_mad_u64_u32 v[3:4], null, v0, s2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_16:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9matrixMulPfS_S_i
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9matrixMulPfS_S_i, .Lfunc_end0-_Z9matrixMulPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9matrixMulPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9matrixMulPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
/**
* Inaki Urruta Sanchez
* Pedro Alexandre Simoes dos Reis
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define BLOCK_SIZE 16
/**
* Initialize matrix M with dimension dim with n in all matrix's entries
*/
void initWith(float* M, int dim, float n) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = n;
}
}
}
/**
* Initialize matrix M with dimension dim with a random number between 0 and 9 in all matrix's entries
*/
void init(float* M, int dim) {
for (int i = 0; i < dim; i++ ) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = (rand() % 10);
}
}
}
/**
* Multiplies matrix left by the matrix right, both with dimensions dim and stores the result in matrix res
* Operation is done in GPU
*/
__global__
void matrixMul(float* left, float* right, float* res, int dim) {
int i, j, idx;
float temp = 0;
__shared__ float Left_shared_t [BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE];
// Row i of matrix left
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) {
// Column j of matrix left
j = tileNUM * BLOCK_SIZE + threadIdx.x;
i = tileNUM * BLOCK_SIZE + threadIdx.y;
// Load left[i][j] to shared mem
idx = row * dim + tileNUM * BLOCK_SIZE + threadIdx.x;
if (idx >= dim * dim) {
Left_shared_t[threadIdx.y][threadIdx.x] = 0;// Coalesced access
} else {
Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];// Coalesced access
}
// Load right[i][j] to shared mem
idx = (tileNUM * BLOCK_SIZE + threadIdx.y) * dim + col;
if (idx >= dim * dim) {
Right_shared_t[threadIdx.y][threadIdx.x] = 0;
} else {
Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col]; // Coalesced access
}
// Synchronize before computation
__syncthreads();
// Accumulate one tile of res from tiles of left and right in shared mem
for (int k = 0; k < BLOCK_SIZE; k++) {
temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x]; //no shared memory bank conflict
}
// Synchronize
__syncthreads();
}
if ((row < dim) && (col < dim)) {
// Store accumulated value to res
res[row * dim + col] = temp;
}
}
/**
* Multiplies matrix A by matrix B, both with dimension dim X dim and stores the result in matrix C with dimension dim X dim
* Operation is done in CPU
*/
__host__
void matrixMulCPU(float* A, float* B, float* C, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
float tmp = 0.0;
for (int k = 0; k < dim; k++) {
tmp += A[i * dim + k] * B[k * dim + j];
}
C[i * dim + j] = tmp;
}
}
}
/**
* Given two matrices A and B, both with dimensions dim X dim, prints in stdout if the result stored in matrix C with dimension dim X dim
* is the same as the result given in matrix C_cpu
*/
void checkResult(float* A, float* B, float* C, float* C_cpu, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
if (abs(C[i * dim + j] - C_cpu[i * dim + j]) > 0.001) {
printf("ERROR: Incorrect Results!\n");
return;
}
}
}
printf("Everything is OK! :D\n");
}
/**
* Returns the current time in milliseconds
* Used to calculate elapsed time
*/
double cpuTimer() {
struct timeval clock;
gettimeofday(&clock, NULL);
return ((double) clock.tv_sec + (double) clock.tv_usec * 1e-6);
}
int main(int argc, char** argv) {
// Set random seed
srand(time(0));
hipError_t error;
hipDeviceProp_t prop;
int numDevices = 0;
error = hipGetDeviceCount(&numDevices);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
int totalMemory = 0;
for (int i = 0; i < numDevices; i++) {
error = hipGetDeviceProperties(&prop, i);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
totalMemory += prop.totalGlobalMem;
}
// Matrix size definition and calculation
const int N = 10;
size_t size = N * N * sizeof(float);
int allMatrixSizes = (N * N) * 3;
if (allMatrixSizes > totalMemory) {
printf("ERROR");
exit(EXIT_FAILURE);
}
// Matrix allocation
float *A, *B, *C, *C_cpu;
error = hipMallocManaged(&A, size);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipMallocManaged(&B, size);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipMallocManaged(&C, size);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error =hipMallocManaged(&C_cpu, size);
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Matrix initialization
init(A, N);
init(B, N);
// Cuda layout definition
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE);
// Start timer
double start = cpuTimer();
matrixMul<<<blocksPerGrid, threadsPerBlock>>>(A, B, C, N);
hipDeviceSynchronize();
// Stop timer
double stop = cpuTimer();
error = hipGetLastError();
if (error != hipSuccess) {
printf("ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Print time interval
float gpu_milliseconds = stop - start;
printf("Matrix Multiplication @ GPU: %f ms\n", gpu_milliseconds);
// Start timer
double begin = cpuTimer();
// Matrix multiplication in CPU
matrixMulCPU(A, B, C_cpu, N);
// Stop timer
double end = cpuTimer();
// Print time interval
float cpu_milliseconds = end - begin;
printf("Matrix Multiplication @ CPU: %f ms\n", cpu_milliseconds);
checkResult(A, B, C, C_cpu, N);
// Free memory
hipFree(A);
hipFree(B);
hipFree(C);
hipFree(C_cpu);
return 0;
}
|
.text
.file "matrix_mul_naive.hip"
.globl _Z8initWithPfif # -- Begin function _Z8initWithPfif
.p2align 4, 0x90
.type _Z8initWithPfif,@function
_Z8initWithPfif: # @_Z8initWithPfif
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_5
# %bb.1: # %.preheader.lr.ph
movl %esi, %eax
xorl %ecx, %ecx
xorl %edx, %edx
.p2align 4, 0x90
.LBB0_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_3 Depth 2
movl %ecx, %r8d
leaq (%rdi,%r8,4), %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB0_3: # Parent Loop BB0_2 Depth=1
# => This Inner Loop Header: Depth=2
movss %xmm0, (%r8,%r9,4)
incq %r9
cmpq %r9, %rax
jne .LBB0_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB0_2 Depth=1
incq %rdx
addl %esi, %ecx
cmpq %rax, %rdx
jne .LBB0_2
.LBB0_5: # %._crit_edge14
retq
.Lfunc_end0:
.size _Z8initWithPfif, .Lfunc_end0-_Z8initWithPfif
.cfi_endproc
# -- End function
.globl _Z4initPfi # -- Begin function _Z4initPfi
.p2align 4, 0x90
.type _Z4initPfi,@function
_Z4initPfi: # @_Z4initPfi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, (%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB1_5
# %bb.1: # %.preheader.lr.ph
movl %esi, %ebx
movl %esi, %r15d
xorl %r12d, %r12d
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
movl %r12d, %eax
movq (%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbp
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_3: # Parent Loop BB1_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbp,%r14,4)
incq %r14
cmpq %r14, %r15
jne .LBB1_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB1_2 Depth=1
incq %r13
addl %ebx, %r12d
cmpq %r15, %r13
jne .LBB1_2
.LBB1_5: # %._crit_edge13
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z4initPfi, .Lfunc_end1-_Z4initPfi
.cfi_endproc
# -- End function
.globl _Z24__device_stub__matrixMulPfS_S_i # -- Begin function _Z24__device_stub__matrixMulPfS_S_i
.p2align 4, 0x90
.type _Z24__device_stub__matrixMulPfS_S_i,@function
_Z24__device_stub__matrixMulPfS_S_i: # @_Z24__device_stub__matrixMulPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9matrixMulPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z24__device_stub__matrixMulPfS_S_i, .Lfunc_end2-_Z24__device_stub__matrixMulPfS_S_i
.cfi_endproc
# -- End function
.globl _Z12matrixMulCPUPfS_S_i # -- Begin function _Z12matrixMulCPUPfS_S_i
.p2align 4, 0x90
.type _Z12matrixMulCPUPfS_S_i,@function
_Z12matrixMulCPUPfS_S_i: # @_Z12matrixMulCPUPfS_S_i
.cfi_startproc
# %bb.0:
testl %ecx, %ecx
jle .LBB3_8
# %bb.1: # %.preheader26.lr.ph
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %ecx, %eax
leaq (,%rax,4), %r8
xorl %r9d, %r9d
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB3_2: # %.preheader26
# =>This Loop Header: Depth=1
# Child Loop BB3_3 Depth 2
# Child Loop BB3_4 Depth 3
movl %r9d, %r11d
leaq (%rdi,%r11,4), %r11
movq %r10, %rbx
imulq %rax, %rbx
leaq (%rdx,%rbx,4), %rbx
movq %rsi, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_3: # %.preheader
# Parent Loop BB3_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_4 Depth 3
xorps %xmm0, %xmm0
movq %r14, %r12
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_4: # Parent Loop BB3_2 Depth=1
# Parent Loop BB3_3 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r11,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r12), %xmm1
addss %xmm1, %xmm0
incq %r13
addq %r8, %r12
cmpq %r13, %rax
jne .LBB3_4
# %bb.5: # %._crit_edge
# in Loop: Header=BB3_3 Depth=2
movss %xmm0, (%rbx,%r15,4)
incq %r15
addq $4, %r14
cmpq %rax, %r15
jne .LBB3_3
# %bb.6: # %._crit_edge30
# in Loop: Header=BB3_2 Depth=1
incq %r10
addl %ecx, %r9d
cmpq %rax, %r10
jne .LBB3_2
# %bb.7:
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.LBB3_8: # %._crit_edge32
retq
.Lfunc_end3:
.size _Z12matrixMulCPUPfS_S_i, .Lfunc_end3-_Z12matrixMulCPUPfS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z11checkResultPfS_S_S_i
.LCPI4_0:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI4_1:
.quad 0x3f50624dd2f1a9fc # double 0.001
.text
.globl _Z11checkResultPfS_S_S_i
.p2align 4, 0x90
.type _Z11checkResultPfS_S_S_i,@function
_Z11checkResultPfS_S_S_i: # @_Z11checkResultPfS_S_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
testl %r8d, %r8d
setg %r15b
jle .LBB4_8
# %bb.1: # %.preheader.lr.ph
movl %r8d, %esi
leaq 4(%rdx), %r12
leaq (,%rsi,4), %rdi
leaq 4(%rcx), %rbx
leaq -1(%rsi), %r14
xorl %r13d, %r13d
movaps .LCPI4_0(%rip), %xmm1 # xmm1 = [NaN,NaN,NaN,NaN]
movsd .LCPI4_1(%rip), %xmm2 # xmm2 = mem[0],zero
movq %rcx, 32(%rsp) # 8-byte Spill
movq %rsi, 16(%rsp) # 8-byte Spill
movq %rdi, 24(%rsp) # 8-byte Spill
jmp .LBB4_2
.p2align 4, 0x90
.LBB4_7: # %.critedge
# in Loop: Header=BB4_2 Depth=1
incq %r13
movq 16(%rsp), %rsi # 8-byte Reload
cmpq %rsi, %r13
setb %r15b
addq %rdi, %r12
addq %rdi, %rbx
cmpq %rsi, %r13
je .LBB4_8
.LBB4_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_4 Depth 2
movq %r13, %rax
imulq %rsi, %rax
movss (%rdx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
subss (%rcx,%rax,4), %xmm0
andps %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
ucomisd %xmm2, %xmm0
ja .LBB4_9
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB4_2 Depth=1
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB4_4: # %.lr.ph
# Parent Loop BB4_2 Depth=1
# => This Inner Loop Header: Depth=2
cmpq %rbp, %r14
je .LBB4_7
# %bb.5: # in Loop: Header=BB4_4 Depth=2
movss (%r12,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
subss (%rbx,%rbp,4), %xmm0
andps %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
incq %rbp
ucomisd %xmm2, %xmm0
jbe .LBB4_4
# %bb.6: # %._crit_edge
# in Loop: Header=BB4_2 Depth=1
movl $.Lstr, %edi
movb %r15b, 15(%rsp) # 1-byte Spill
movq %rdx, %r15
callq puts@PLT
movsd .LCPI4_1(%rip), %xmm2 # xmm2 = mem[0],zero
movaps .LCPI4_0(%rip), %xmm1 # xmm1 = [NaN,NaN,NaN,NaN]
movq 24(%rsp), %rdi # 8-byte Reload
movq %r15, %rdx
movq 32(%rsp), %rcx # 8-byte Reload
movzbl 15(%rsp), %r15d # 1-byte Folded Reload
cmpq 16(%rsp), %rbp # 8-byte Folded Reload
jae .LBB4_7
# %bb.10: # %.loopexit
testb $1, %r15b
je .LBB4_8
.LBB4_11:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB4_9: # %.critedge41
.cfi_def_cfa_offset 96
movl $.Lstr, %edi
callq puts@PLT
testb $1, %r15b
jne .LBB4_11
.LBB4_8: # %.critedge21
movl $.Lstr.1, %edi
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp puts@PLT # TAILCALL
.Lfunc_end4:
.size _Z11checkResultPfS_S_S_i, .Lfunc_end4-_Z11checkResultPfS_S_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z8cpuTimerv
.LCPI5_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z8cpuTimerv
.p2align 4, 0x90
.type _Z8cpuTimerv,@function
_Z8cpuTimerv: # @_Z8cpuTimerv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI5_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z8cpuTimerv, .Lfunc_end5-_Z8cpuTimerv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI6_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.LCPI6_2:
.quad 0x3f50624dd2f1a9fc # double 0.001
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI6_1:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $1656, %rsp # imm = 0x678
.cfi_def_cfa_offset 1696
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
movl $0, 12(%rsp)
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
testl %eax, %eax
jne .LBB6_36
# %bb.1: # %.preheader
cmpl $0, 12(%rsp)
jle .LBB6_35
# %bb.2: # %.lr.ph
xorl %r14d, %r14d
leaq 184(%rsp), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB6_3: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
testl %eax, %eax
jne .LBB6_36
# %bb.4: # in Loop: Header=BB6_3 Depth=1
addl 472(%rsp), %r14d
incl %ebp
cmpl 12(%rsp), %ebp
jl .LBB6_3
# %bb.5: # %._crit_edge.loopexit
cmpl $300, %r14d # imm = 0x12C
jl .LBB6_35
# %bb.6:
leaq 24(%rsp), %rdi
movl $400, %esi # imm = 0x190
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
jne .LBB6_36
# %bb.7:
leaq 16(%rsp), %rdi
movl $400, %esi # imm = 0x190
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
jne .LBB6_36
# %bb.8:
leaq 88(%rsp), %rdi
movl $400, %esi # imm = 0x190
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
jne .LBB6_36
# %bb.9:
leaq 80(%rsp), %rdi
movl $400, %esi # imm = 0x190
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
jne .LBB6_36
# %bb.10:
movq 24(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB6_11: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB6_12 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB6_12: # Parent Loop BB6_11 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq $10, %r15
jne .LBB6_12
# %bb.13: # %._crit_edge.i
# in Loop: Header=BB6_11 Depth=1
incq %r14
addq $40, %rbx
cmpq $10, %r14
jne .LBB6_11
# %bb.14: # %_Z4initPfi.exit
movq 16(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB6_15: # %.preheader.i50
# =>This Loop Header: Depth=1
# Child Loop BB6_16 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB6_16: # Parent Loop BB6_15 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq $10, %r15
jne .LBB6_16
# %bb.17: # %._crit_edge.i55
# in Loop: Header=BB6_15 Depth=1
incq %r14
addq $40, %rbx
cmpq $10, %r14
jne .LBB6_15
# %bb.18: # %_Z4initPfi.exit58
leaq 32(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 32(%rsp), %xmm0
movsd %xmm0, 104(%rsp) # 8-byte Spill
xorps %xmm0, %xmm0
cvtsi2sdq 40(%rsp), %xmm0
movsd %xmm0, 72(%rsp) # 8-byte Spill
movabsq $4294967297, %rdi # imm = 0x100000001
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_20
# %bb.19:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 88(%rsp), %rdx
movq %rax, 176(%rsp)
movq %rcx, 168(%rsp)
movq %rdx, 160(%rsp)
movl $10, 100(%rsp)
leaq 176(%rsp), %rax
movq %rax, 32(%rsp)
leaq 168(%rsp), %rax
movq %rax, 40(%rsp)
leaq 160(%rsp), %rax
movq %rax, 48(%rsp)
leaq 100(%rsp), %rax
movq %rax, 56(%rsp)
leaq 144(%rsp), %rdi
leaq 128(%rsp), %rsi
leaq 120(%rsp), %rdx
leaq 112(%rsp), %rcx
callq __hipPopCallConfiguration
movq 144(%rsp), %rsi
movl 152(%rsp), %edx
movq 128(%rsp), %rcx
movl 136(%rsp), %r8d
leaq 32(%rsp), %r9
movl $_Z9matrixMulPfS_S_i, %edi
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
pushq 128(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_20:
callq hipDeviceSynchronize
leaq 32(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 32(%rsp), %rbx
movq 40(%rsp), %r14
callq hipGetLastError
testl %eax, %eax
jne .LBB6_36
# %bb.21:
movsd 72(%rsp), %xmm2 # 8-byte Reload
# xmm2 = mem[0],zero
mulsd .LCPI6_0(%rip), %xmm2
addsd 104(%rsp), %xmm2 # 8-byte Folded Reload
xorps %xmm0, %xmm0
cvtsi2sd %r14, %xmm0
mulsd .LCPI6_0(%rip), %xmm0
cvtsi2sd %rbx, %xmm1
addsd %xmm0, %xmm1
subsd %xmm2, %xmm1
xorps %xmm0, %xmm0
cvtsd2ss %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
xorl %ebx, %ebx
leaq 32(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 32(%rsp), %rax
xorps %xmm2, %xmm2
cvtsi2sdq 40(%rsp), %xmm2
mulsd .LCPI6_0(%rip), %xmm2
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq 80(%rsp), %rsi
.p2align 4, 0x90
.LBB6_22: # %.preheader26.i
# =>This Loop Header: Depth=1
# Child Loop BB6_23 Depth 2
# Child Loop BB6_24 Depth 3
leaq (%rbx,%rbx,4), %rdi
leaq (%rsi,%rdi,8), %rdi
movq %rdx, %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB6_23: # %.preheader.i59
# Parent Loop BB6_22 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB6_24 Depth 3
xorps %xmm0, %xmm0
movq %r8, %r10
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB6_24: # Parent Loop BB6_22 Depth=1
# Parent Loop BB6_23 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rcx,%r11,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r10), %xmm1
addss %xmm1, %xmm0
incq %r11
addq $40, %r10
cmpq $10, %r11
jne .LBB6_24
# %bb.25: # %._crit_edge.i63
# in Loop: Header=BB6_23 Depth=2
movss %xmm0, (%rdi,%r9,4)
incq %r9
addq $4, %r8
cmpq $10, %r9
jne .LBB6_23
# %bb.26: # %._crit_edge30.i
# in Loop: Header=BB6_22 Depth=1
incq %rbx
addq $40, %rcx
cmpq $10, %rbx
jne .LBB6_22
# %bb.27: # %_Z12matrixMulCPUPfS_S_i.exit
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
addsd %xmm0, %xmm2
movsd %xmm2, 72(%rsp) # 8-byte Spill
xorl %ebx, %ebx
leaq 32(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 32(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 40(%rsp), %xmm1
mulsd .LCPI6_0(%rip), %xmm1
addsd %xmm0, %xmm1
subsd 72(%rsp), %xmm1 # 8-byte Folded Reload
xorps %xmm0, %xmm0
cvtsd2ss %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
movb $1, %bpl
movl $.L.str.5, %edi
movb $1, %al
callq printf
movq 88(%rsp), %rax
movq 80(%rsp), %rcx
leaq 4(%rcx), %rdx
leaq 4(%rax), %rsi
movaps .LCPI6_1(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
movsd .LCPI6_2(%rip), %xmm1 # xmm1 = mem[0],zero
.LBB6_28: # %.preheader.i64
# =>This Loop Header: Depth=1
# Child Loop BB6_30 Depth 2
leaq (,%rbx,8), %rdi
leaq (%rdi,%rdi,4), %rdi
movss (%rax,%rdi), %xmm2 # xmm2 = mem[0],zero,zero,zero
subss (%rcx,%rdi), %xmm2
andps %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm1, %xmm2
ja .LBB6_32
# %bb.29: # %.lr.ph85.preheader
# in Loop: Header=BB6_28 Depth=1
xorl %edi, %edi
.p2align 4, 0x90
.LBB6_30: # %.lr.ph85
# Parent Loop BB6_28 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $9, %rdi
je .LBB6_37
# %bb.31: # in Loop: Header=BB6_30 Depth=2
movss (%rsi,%rdi,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
subss (%rdx,%rdi,4), %xmm2
andps %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
incq %rdi
ucomisd %xmm1, %xmm2
jbe .LBB6_30
jmp .LBB6_32
.p2align 4, 0x90
.LBB6_37: # %.critedge.i
# in Loop: Header=BB6_28 Depth=1
cmpq $9, %rbx
leaq 1(%rbx), %rdi
setb %bpl
addq $40, %rdx
addq $40, %rsi
movq %rdi, %rbx
cmpq $10, %rdi
jne .LBB6_28
jmp .LBB6_33
.LBB6_32: # %._crit_edge86
movl $.Lstr, %edi
callq puts@PLT
testb $1, %bpl
jne .LBB6_34
.LBB6_33: # %.critedge21.i
movl $.Lstr.1, %edi
callq puts@PLT
.LBB6_34: # %_Z11checkResultPfS_S_S_i.exit
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 88(%rsp), %rdi
callq hipFree
movq 80(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $1656, %rsp # imm = 0x678
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB6_36:
.cfi_def_cfa_offset 1696
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.LBB6_35: # %.critedge101
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end6:
.size main, .Lfunc_end6-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9matrixMulPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9matrixMulPfS_S_i,@object # @_Z9matrixMulPfS_S_i
.section .rodata,"a",@progbits
.globl _Z9matrixMulPfS_S_i
.p2align 3, 0x0
_Z9matrixMulPfS_S_i:
.quad _Z24__device_stub__matrixMulPfS_S_i
.size _Z9matrixMulPfS_S_i, 8
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "ERROR: %s\n"
.size .L.str.2, 11
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "ERROR"
.size .L.str.3, 6
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Matrix Multiplication @ GPU: %f ms\n"
.size .L.str.4, 36
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Matrix Multiplication @ CPU: %f ms\n"
.size .L.str.5, 36
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9matrixMulPfS_S_i"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "ERROR: Incorrect Results!"
.size .Lstr, 26
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Everything is OK! :D"
.size .Lstr.1, 21
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__matrixMulPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9matrixMulPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z9matrixMulPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ ISETP.NE.AND P1, PT, RZ, c[0x0][0xc], PT ; /* 0x00000300ff007a0c */
/* 0x000fe20003f25270 */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R17, SR_TID.X ; /* 0x0000000000117919 */
/* 0x000e280000002100 */
/*0060*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002600 */
/*0070*/ S2R R16, SR_TID.Y ; /* 0x0000000000107919 */
/* 0x000e620000002200 */
/*0080*/ IMAD R3, R2, c[0x0][0x0], R17 ; /* 0x0000000002037a24 */
/* 0x001fca00078e0211 */
/*0090*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x178], PT ; /* 0x00005e0003007a0c */
/* 0x000fe20003f06270 */
/*00a0*/ IMAD R0, R0, c[0x0][0x4], R16 ; /* 0x0000010000007a24 */
/* 0x002fca00078e0210 */
/*00b0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fe20000706670 */
/*00c0*/ @!P1 BRA 0x4d0 ; /* 0x0000040000009947 */
/* 0x000fd80003800000 */
/*00d0*/ SHF.L.U32 R2, R16.reuse, 0x6, RZ ; /* 0x0000000610027819 */
/* 0x040fe200000006ff */
/*00e0*/ IMAD R16, R16, c[0x0][0x178], R3 ; /* 0x00005e0010107a24 */
/* 0x000fe200078e0203 */
/*00f0*/ MOV R19, RZ ; /* 0x000000ff00137202 */
/* 0x000fe20000000f00 */
/*0100*/ IMAD R18, R0, c[0x0][0x178], R17 ; /* 0x00005e0000127a24 */
/* 0x000fe200078e0211 */
/*0110*/ MOV R9, RZ ; /* 0x000000ff00097202 */
/* 0x000fe20000000f00 */
/*0120*/ ULDC UR4, c[0x0][0x178] ; /* 0x00005e0000047ab9 */
/* 0x000fe20000000800 */
/*0130*/ LEA R20, R17, R2, 0x2 ; /* 0x0000000211147211 */
/* 0x000fe200078e10ff */
/*0140*/ UIMAD UR4, UR4, UR4, URZ ; /* 0x00000004040472a4 */
/* 0x000fcc000f8e023f */
/*0150*/ ISETP.GE.AND P1, PT, R18, UR4, PT ; /* 0x0000000412007c0c */
/* 0x000fe2000bf26270 */
/*0160*/ HFMA2.MMA R21, -RZ, RZ, 0, 0 ; /* 0x00000000ff157435 */
/* 0x000fe200000001ff */
/*0170*/ ISETP.GE.AND P2, PT, R16, UR4, PT ; /* 0x0000000410007c0c */
/* 0x000fc4000bf46270 */
/*0180*/ MOV R27, RZ ; /* 0x000000ff001b7202 */
/* 0x000fd20000000f00 */
/*0190*/ @!P1 MOV R5, 0x4 ; /* 0x0000000400059802 */
/* 0x000fe40000000f00 */
/*01a0*/ @!P2 MOV R11, 0x4 ; /* 0x00000004000ba802 */
/* 0x000fc60000000f00 */
/*01b0*/ @!P1 IMAD.WIDE R4, R18, R5, c[0x0][0x160] ; /* 0x0000580012049625 */
/* 0x000fc800078e0205 */
/*01c0*/ @!P2 IMAD.WIDE R10, R16, R11, c[0x0][0x168] ; /* 0x00005a00100aa625 */
/* 0x000fe200078e020b */
/*01d0*/ @!P1 LDG.E R21, [R4.64] ; /* 0x0000000604159981 */
/* 0x000ea8000c1e1900 */
/*01e0*/ @!P2 LDG.E R27, [R10.64] ; /* 0x000000060a1ba981 */
/* 0x000ee2000c1e1900 */
/*01f0*/ IADD3 R19, R19, 0x1, RZ ; /* 0x0000000113137810 */
/* 0x000fe40007ffe0ff */
/*0200*/ IADD3 R18, R18, 0x10, RZ ; /* 0x0000001012127810 */
/* 0x000fe40007ffe0ff */
/*0210*/ ISETP.GE.U32.AND P1, PT, R19, c[0x0][0xc], PT ; /* 0x0000030013007a0c */
/* 0x000fe20003f26070 */
/*0220*/ STS [R20], R21 ; /* 0x0000001514007388 */
/* 0x004fe80000000800 */
/*0230*/ STS [R20+0x400], R27 ; /* 0x0004001b14007388 */
/* 0x008fe80000000800 */
/*0240*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0250*/ LDS R8, [R17.X4+0x400] ; /* 0x0004000011087984 */
/* 0x000fe80000004800 */
/*0260*/ LDS.128 R12, [R2] ; /* 0x00000000020c7984 */
/* 0x000e280000000c00 */
/*0270*/ LDS R28, [R17.X4+0x440] ; /* 0x00044000111c7984 */
/* 0x000e680000004800 */
/*0280*/ LDS R29, [R17.X4+0x480] ; /* 0x00048000111d7984 */
/* 0x000ea80000004800 */
/*0290*/ LDS R24, [R17.X4+0x4c0] ; /* 0x0004c00011187984 */
/* 0x000ee80000004800 */
/*02a0*/ LDS R25, [R17.X4+0x500] ; /* 0x0005000011197984 */
/* 0x000fe80000004800 */
/*02b0*/ LDS.128 R4, [R2+0x10] ; /* 0x0000100002047984 */
/* 0x000f280000000c00 */
/*02c0*/ LDS R26, [R17.X4+0x540] ; /* 0x00054000111a7984 */
/* 0x000f680000004800 */
/*02d0*/ LDS R23, [R17.X4+0x580] ; /* 0x0005800011177984 */
/* 0x000f680000004800 */
/*02e0*/ LDS R22, [R17.X4+0x5c0] ; /* 0x0005c00011167984 */
/* 0x000f680000004800 */
/*02f0*/ LDS R21, [R17.X4+0x600] ; /* 0x0006000011157984 */
/* 0x000fe20000004800 */
/*0300*/ FFMA R8, R8, R12, R9 ; /* 0x0000000c08087223 */
/* 0x001fc80000000009 */
/*0310*/ FFMA R13, R28, R13, R8 ; /* 0x0000000d1c0d7223 */
/* 0x002fe40000000008 */
/*0320*/ LDS.128 R8, [R2+0x20] ; /* 0x0000200002087984 */
/* 0x000e240000000c00 */
/*0330*/ FFMA R13, R29, R14, R13 ; /* 0x0000000e1d0d7223 */
/* 0x004fc8000000000d */
/*0340*/ FFMA R13, R24, R15, R13 ; /* 0x0000000f180d7223 */
/* 0x008fe4000000000d */
/*0350*/ LDS R24, [R17.X4+0x640] ; /* 0x0006400011187984 */
/* 0x000e640000004800 */
/*0360*/ FFMA R4, R25, R4, R13 ; /* 0x0000000419047223 */
/* 0x010fe4000000000d */
/*0370*/ LDS R25, [R17.X4+0x680] ; /* 0x0006800011197984 */
/* 0x000ea40000004800 */
/*0380*/ FFMA R5, R26, R5, R4 ; /* 0x000000051a057223 */
/* 0x020fe40000000004 */
/*0390*/ LDS R4, [R17.X4+0x6c0] ; /* 0x0006c00011047984 */
/* 0x000ee40000004800 */
/*03a0*/ FFMA R23, R23, R6, R5 ; /* 0x0000000617177223 */
/* 0x000fc40000000005 */
/*03b0*/ LDS R5, [R17.X4+0x700] ; /* 0x0007000011057984 */
/* 0x000fe40000004800 */
/*03c0*/ FFMA R23, R22, R7, R23 ; /* 0x0000000716177223 */
/* 0x000fe40000000017 */
/*03d0*/ LDS.128 R12, [R2+0x30] ; /* 0x00003000020c7984 */
/* 0x000f280000000c00 */
/*03e0*/ LDS R6, [R17.X4+0x740] ; /* 0x0007400011067984 */
/* 0x000f680000004800 */
/*03f0*/ LDS R7, [R17.X4+0x780] ; /* 0x0007800011077984 */
/* 0x000f680000004800 */
/*0400*/ LDS R22, [R17.X4+0x7c0] ; /* 0x0007c00011167984 */
/* 0x000f620000004800 */
/*0410*/ FFMA R8, R21, R8, R23 ; /* 0x0000000815087223 */
/* 0x001fc80000000017 */
/*0420*/ FFMA R8, R24, R9, R8 ; /* 0x0000000918087223 */
/* 0x002fc80000000008 */
/*0430*/ FFMA R8, R25, R10, R8 ; /* 0x0000000a19087223 */
/* 0x004fc80000000008 */
/*0440*/ FFMA R4, R4, R11, R8 ; /* 0x0000000b04047223 */
/* 0x008fc80000000008 */
/*0450*/ FFMA R4, R5, R12, R4 ; /* 0x0000000c05047223 */
/* 0x010fe20000000004 */
/*0460*/ MOV R5, c[0x0][0x178] ; /* 0x00005e0000057a02 */
/* 0x000fc60000000f00 */
/*0470*/ FFMA R4, R6, R13, R4 ; /* 0x0000000d06047223 */
/* 0x020fe20000000004 */
/*0480*/ LEA R16, R5, R16, 0x4 ; /* 0x0000001005107211 */
/* 0x000fc600078e20ff */
/*0490*/ FFMA R4, R7, R14, R4 ; /* 0x0000000e07047223 */
/* 0x000fc80000000004 */
/*04a0*/ FFMA R9, R22, R15, R4 ; /* 0x0000000f16097223 */
/* 0x000fe20000000004 */
/*04b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*04c0*/ @!P1 BRA 0x150 ; /* 0xfffffc8000009947 */
/* 0x000fea000383ffff */
/*04d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*04e0*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*04f0*/ IMAD R3, R0, c[0x0][0x178], R3 ; /* 0x00005e0000037a24 */
/* 0x000fd200078e0203 */
/*0500*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */
/* 0x000fca00078e0202 */
/*0510*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x000fe2000c101906 */
/*0520*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0530*/ BRA 0x530; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9matrixMulPfS_S_i
.globl _Z9matrixMulPfS_S_i
.p2align 8
.type _Z9matrixMulPfS_S_i,@function
_Z9matrixMulPfS_S_i:
s_clause 0x2
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x20
s_load_b32 s2, s[0:1], 0x18
v_bfe_u32 v3, v0, 10, 10
v_and_b32_e32 v6, 0x3ff, v0
s_mov_b32 s8, 0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s4, 16
s_and_b32 s4, s4, 0xffff
v_mad_u64_u32 v[0:1], null, s15, s5, v[3:4]
v_mad_u64_u32 v[1:2], null, s14, s4, v[6:7]
s_cmp_eq_u32 s3, 0
s_cbranch_scc1 .LBB0_13
s_load_b128 s[4:7], s[0:1], 0x0
v_dual_mov_b32 v11, 0 :: v_dual_lshlrev_b32 v2, 2, v6
v_lshlrev_b32_e32 v7, 6, v3
s_mul_i32 s9, s2, s2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v8, 0x400, v2
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v9, v7, v2
v_mad_u64_u32 v[4:5], null, v0, s2, v[6:7]
s_delay_alu instid0(VALU_DEP_3)
v_add_nc_u32_e32 v10, v8, v7
.LBB0_2:
s_lshl_b32 s10, s8, 4
s_mov_b32 s11, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v5, s10, v4
v_cmpx_le_i32_e64 s9, v5
s_xor_b32 s11, exec_lo, s11
s_cbranch_execz .LBB0_4
ds_store_b32 v9, v11
.LBB0_4:
s_and_not1_saveexec_b32 s11, s11
s_cbranch_execz .LBB0_6
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v5, vcc_lo, s4, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo
global_load_b32 v5, v[5:6], off
s_waitcnt vmcnt(0)
ds_store_b32 v9, v5
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s11
v_add_nc_u32_e32 v12, s10, v3
s_mov_b32 s10, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[5:6], null, v12, s2, v[1:2]
v_cmpx_le_i32_e64 s9, v5
s_xor_b32 s10, exec_lo, s10
s_cbranch_execz .LBB0_8
ds_store_b32 v10, v11
.LBB0_8:
s_and_not1_saveexec_b32 s10, s10
s_cbranch_execz .LBB0_10
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v5, vcc_lo, s6, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v6, vcc_lo
global_load_b32 v5, v[5:6], off
s_waitcnt vmcnt(0)
ds_store_b32 v10, v5
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s10
v_mov_b32_e32 v5, v8
s_mov_b32 s10, 0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_11:
v_add_nc_u32_e32 v6, s10, v7
s_add_i32 s10, s10, 4
ds_load_b32 v12, v5
ds_load_b32 v6, v6
v_add_nc_u32_e32 v5, 64, v5
s_cmp_eq_u32 s10, 64
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v2, v6, v12
s_cbranch_scc0 .LBB0_11
s_add_i32 s8, s8, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s8, s3
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_14
.LBB0_13:
v_mov_b32_e32 v2, 0
.LBB0_14:
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_max_i32_e32 v3, v0, v1
s_mov_b32 s3, exec_lo
v_cmpx_gt_i32_e64 s2, v3
s_cbranch_execz .LBB0_16
s_load_b64 s[0:1], s[0:1], 0x10
v_mad_u64_u32 v[3:4], null, v0, s2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_16:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9matrixMulPfS_S_i
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9matrixMulPfS_S_i, .Lfunc_end0-_Z9matrixMulPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9matrixMulPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9matrixMulPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_00093563_00000000-6_matrix_mul_naive.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2065:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8initWithPfif
.type _Z8initWithPfif, @function
_Z8initWithPfif:
.LFB2057:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L3
movslq %esi, %rax
leaq 0(,%rax,4), %r8
leaq (%rdi,%r8), %rdx
negq %rax
leaq 0(,%rax,4), %rdi
movl $0, %ecx
.L5:
leaq (%rdx,%rdi), %rax
.L6:
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L6
addl $1, %ecx
addq %r8, %rdx
cmpl %ecx, %esi
jne .L5
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z8initWithPfif, .-_Z8initWithPfif
.globl _Z4initPfi
.type _Z4initPfi, @function
_Z4initPfi:
.LFB2058:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L14
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movl %esi, %r14d
movslq %esi, %r13
leaq 0(,%r13,4), %r15
leaq (%rdi,%r15), %rbp
negq %r13
salq $2, %r13
movl $0, %r12d
.L10:
leaq 0(%rbp,%r13), %rbx
.L11:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L11
addl $1, %r12d
addq %r15, %rbp
cmpl %r12d, %r14d
jne .L10
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2058:
.size _Z4initPfi, .-_Z4initPfi
.globl _Z12matrixMulCPUPfS_S_i
.type _Z12matrixMulCPUPfS_S_i, @function
_Z12matrixMulCPUPfS_S_i:
.LFB2059:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L25
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rsi, %rbx
movq %rdx, %r9
movl %ecx, %r12d
movslq %ecx, %r11
leaq 0(,%r11,4), %rcx
movq %rdi, %r10
leaq (%rdi,%rcx), %rsi
movl $0, %ebp
.L19:
movq %rbx, %r8
movl $0, %edi
.L22:
movq %r8, %rdx
movq %r10, %rax
pxor %xmm1, %xmm1
.L20:
movss (%rax), %xmm0
mulss (%rdx), %xmm0
addss %xmm0, %xmm1
addq $4, %rax
addq %rcx, %rdx
cmpq %rsi, %rax
jne .L20
movss %xmm1, (%r9,%rdi,4)
addq $1, %rdi
addq $4, %r8
cmpq %r11, %rdi
jne .L22
addl $1, %ebp
addq %rcx, %r9
addq %rcx, %r10
addq %rcx, %rsi
cmpl %ebp, %r12d
jne .L19
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
ret
.cfi_endproc
.LFE2059:
.size _Z12matrixMulCPUPfS_S_i, .-_Z12matrixMulCPUPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "ERROR: Incorrect Results!\n"
.LC4:
.string "Everything is OK! :D\n"
.text
.globl _Z11checkResultPfS_S_S_i
.type _Z11checkResultPfS_S_S_i, @function
_Z11checkResultPfS_S_S_i:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
testl %r8d, %r8d
jle .L29
movq %rdx, %rsi
movslq %r8d, %rax
leaq 0(,%rax,4), %r10
negq %rax
leaq 0(,%rax,4), %rdi
movq %r10, %rdx
movl $0, %r9d
movss .LC1(%rip), %xmm2
movsd .LC2(%rip), %xmm1
.L30:
leaq (%rdx,%rdi), %rax
.L34:
movss (%rsi,%rax), %xmm0
subss (%rcx,%rax), %xmm0
andps %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm1, %xmm0
ja .L39
addq $4, %rax
cmpq %rdx, %rax
jne .L34
addl $1, %r9d
addq %r10, %rdx
cmpl %r9d, %r8d
jne .L30
.L29:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L28
.L39:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L28:
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z11checkResultPfS_S_S_i, .-_Z11checkResultPfS_S_S_i
.globl _Z8cpuTimerv
.type _Z8cpuTimerv, @function
_Z8cpuTimerv:
.LFB2061:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC5(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L43
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z8cpuTimerv, .-_Z8cpuTimerv
.globl _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
.type _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i, @function
_Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L48
.L44:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L49
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9matrixMulPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L44
.L49:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i, .-_Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
.globl _Z9matrixMulPfS_S_i
.type _Z9matrixMulPfS_S_i, @function
_Z9matrixMulPfS_S_i:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z9matrixMulPfS_S_i, .-_Z9matrixMulPfS_S_i
.section .rodata.str1.1
.LC6:
.string "ERROR: %s\n"
.LC7:
.string "ERROR"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC8:
.string "Matrix Multiplication @ GPU: %f ms\n"
.align 8
.LC9:
.string "Matrix Multiplication @ CPU: %f ms\n"
.text
.globl main
.type main, @function
main:
.LFB2062:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $1120, %rsp
.cfi_def_cfa_offset 1152
movq %fs:40, %rax
movq %rax, 1112(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movl $0, 20(%rsp)
leaq 20(%rsp), %rdi
call cudaGetDeviceCount@PLT
testl %eax, %eax
jne .L53
movl $0, %ebx
movl $0, %ebp
leaq 80(%rsp), %r12
cmpl $0, 20(%rsp)
jle .L55
.L54:
movl %ebx, %esi
movq %r12, %rdi
call cudaGetDeviceProperties_v2@PLT
testl %eax, %eax
jne .L69
movl %ebp, %eax
addl 368(%rsp), %eax
movl %eax, %ebp
addl $1, %ebx
cmpl %ebx, 20(%rsp)
jg .L54
cmpl $299, %eax
jle .L55
leaq 24(%rsp), %rdi
movl $1, %edx
movl $400, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L70
leaq 32(%rsp), %rdi
movl $1, %edx
movl $400, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L71
leaq 40(%rsp), %rdi
movl $1, %edx
movl $400, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L72
leaq 48(%rsp), %rdi
movl $1, %edx
movl $400, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L73
movl $10, %esi
movq 24(%rsp), %rdi
call _Z4initPfi
movl $10, %esi
movq 32(%rsp), %rdi
call _Z4initPfi
movl $16, 56(%rsp)
movl $16, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
call _Z8cpuTimerv
movsd %xmm0, (%rsp)
movl 64(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 56(%rsp), %rdx
movq 68(%rsp), %rdi
movl 76(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L74
.L62:
call cudaDeviceSynchronize@PLT
call _Z8cpuTimerv
movsd %xmm0, 8(%rsp)
call cudaGetLastError@PLT
testl %eax, %eax
jne .L75
movsd 8(%rsp), %xmm0
subsd (%rsp), %xmm0
cvtsd2ss %xmm0, %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
call _Z8cpuTimerv
movsd %xmm0, (%rsp)
movl $10, %ecx
movq 48(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z12matrixMulCPUPfS_S_i
call _Z8cpuTimerv
subsd (%rsp), %xmm0
cvtsd2ss %xmm0, %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $10, %r8d
movq 48(%rsp), %rcx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z11checkResultPfS_S_S_i
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 1112(%rsp), %rax
subq %fs:40, %rax
jne .L76
movl $0, %eax
addq $1120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L69:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L55:
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L70:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L71:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L72:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L73:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L74:
movl $10, %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
jmp .L62
.L75:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L76:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2062:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z9matrixMulPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z9matrixMulPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC1:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long -755914244
.long 1062232653
.align 8
.LC5:
.long -1598689907
.long 1051772663
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "matrix_mul_naive.hip"
.globl _Z8initWithPfif # -- Begin function _Z8initWithPfif
.p2align 4, 0x90
.type _Z8initWithPfif,@function
_Z8initWithPfif: # @_Z8initWithPfif
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_5
# %bb.1: # %.preheader.lr.ph
movl %esi, %eax
xorl %ecx, %ecx
xorl %edx, %edx
.p2align 4, 0x90
.LBB0_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_3 Depth 2
movl %ecx, %r8d
leaq (%rdi,%r8,4), %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB0_3: # Parent Loop BB0_2 Depth=1
# => This Inner Loop Header: Depth=2
movss %xmm0, (%r8,%r9,4)
incq %r9
cmpq %r9, %rax
jne .LBB0_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB0_2 Depth=1
incq %rdx
addl %esi, %ecx
cmpq %rax, %rdx
jne .LBB0_2
.LBB0_5: # %._crit_edge14
retq
.Lfunc_end0:
.size _Z8initWithPfif, .Lfunc_end0-_Z8initWithPfif
.cfi_endproc
# -- End function
.globl _Z4initPfi # -- Begin function _Z4initPfi
.p2align 4, 0x90
.type _Z4initPfi,@function
_Z4initPfi: # @_Z4initPfi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, (%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB1_5
# %bb.1: # %.preheader.lr.ph
movl %esi, %ebx
movl %esi, %r15d
xorl %r12d, %r12d
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
movl %r12d, %eax
movq (%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbp
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_3: # Parent Loop BB1_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbp,%r14,4)
incq %r14
cmpq %r14, %r15
jne .LBB1_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB1_2 Depth=1
incq %r13
addl %ebx, %r12d
cmpq %r15, %r13
jne .LBB1_2
.LBB1_5: # %._crit_edge13
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z4initPfi, .Lfunc_end1-_Z4initPfi
.cfi_endproc
# -- End function
.globl _Z24__device_stub__matrixMulPfS_S_i # -- Begin function _Z24__device_stub__matrixMulPfS_S_i
.p2align 4, 0x90
.type _Z24__device_stub__matrixMulPfS_S_i,@function
_Z24__device_stub__matrixMulPfS_S_i: # @_Z24__device_stub__matrixMulPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9matrixMulPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z24__device_stub__matrixMulPfS_S_i, .Lfunc_end2-_Z24__device_stub__matrixMulPfS_S_i
.cfi_endproc
# -- End function
.globl _Z12matrixMulCPUPfS_S_i # -- Begin function _Z12matrixMulCPUPfS_S_i
.p2align 4, 0x90
.type _Z12matrixMulCPUPfS_S_i,@function
_Z12matrixMulCPUPfS_S_i: # @_Z12matrixMulCPUPfS_S_i
.cfi_startproc
# %bb.0:
testl %ecx, %ecx
jle .LBB3_8
# %bb.1: # %.preheader26.lr.ph
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %ecx, %eax
leaq (,%rax,4), %r8
xorl %r9d, %r9d
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB3_2: # %.preheader26
# =>This Loop Header: Depth=1
# Child Loop BB3_3 Depth 2
# Child Loop BB3_4 Depth 3
movl %r9d, %r11d
leaq (%rdi,%r11,4), %r11
movq %r10, %rbx
imulq %rax, %rbx
leaq (%rdx,%rbx,4), %rbx
movq %rsi, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_3: # %.preheader
# Parent Loop BB3_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_4 Depth 3
xorps %xmm0, %xmm0
movq %r14, %r12
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_4: # Parent Loop BB3_2 Depth=1
# Parent Loop BB3_3 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r11,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r12), %xmm1
addss %xmm1, %xmm0
incq %r13
addq %r8, %r12
cmpq %r13, %rax
jne .LBB3_4
# %bb.5: # %._crit_edge
# in Loop: Header=BB3_3 Depth=2
movss %xmm0, (%rbx,%r15,4)
incq %r15
addq $4, %r14
cmpq %rax, %r15
jne .LBB3_3
# %bb.6: # %._crit_edge30
# in Loop: Header=BB3_2 Depth=1
incq %r10
addl %ecx, %r9d
cmpq %rax, %r10
jne .LBB3_2
# %bb.7:
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.LBB3_8: # %._crit_edge32
retq
.Lfunc_end3:
.size _Z12matrixMulCPUPfS_S_i, .Lfunc_end3-_Z12matrixMulCPUPfS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z11checkResultPfS_S_S_i
.LCPI4_0:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI4_1:
.quad 0x3f50624dd2f1a9fc # double 0.001
.text
.globl _Z11checkResultPfS_S_S_i
.p2align 4, 0x90
.type _Z11checkResultPfS_S_S_i,@function
_Z11checkResultPfS_S_S_i: # @_Z11checkResultPfS_S_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
testl %r8d, %r8d
setg %r15b
jle .LBB4_8
# %bb.1: # %.preheader.lr.ph
movl %r8d, %esi
leaq 4(%rdx), %r12
leaq (,%rsi,4), %rdi
leaq 4(%rcx), %rbx
leaq -1(%rsi), %r14
xorl %r13d, %r13d
movaps .LCPI4_0(%rip), %xmm1 # xmm1 = [NaN,NaN,NaN,NaN]
movsd .LCPI4_1(%rip), %xmm2 # xmm2 = mem[0],zero
movq %rcx, 32(%rsp) # 8-byte Spill
movq %rsi, 16(%rsp) # 8-byte Spill
movq %rdi, 24(%rsp) # 8-byte Spill
jmp .LBB4_2
.p2align 4, 0x90
.LBB4_7: # %.critedge
# in Loop: Header=BB4_2 Depth=1
incq %r13
movq 16(%rsp), %rsi # 8-byte Reload
cmpq %rsi, %r13
setb %r15b
addq %rdi, %r12
addq %rdi, %rbx
cmpq %rsi, %r13
je .LBB4_8
.LBB4_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_4 Depth 2
movq %r13, %rax
imulq %rsi, %rax
movss (%rdx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
subss (%rcx,%rax,4), %xmm0
andps %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
ucomisd %xmm2, %xmm0
ja .LBB4_9
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB4_2 Depth=1
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB4_4: # %.lr.ph
# Parent Loop BB4_2 Depth=1
# => This Inner Loop Header: Depth=2
cmpq %rbp, %r14
je .LBB4_7
# %bb.5: # in Loop: Header=BB4_4 Depth=2
movss (%r12,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
subss (%rbx,%rbp,4), %xmm0
andps %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
incq %rbp
ucomisd %xmm2, %xmm0
jbe .LBB4_4
# %bb.6: # %._crit_edge
# in Loop: Header=BB4_2 Depth=1
movl $.Lstr, %edi
movb %r15b, 15(%rsp) # 1-byte Spill
movq %rdx, %r15
callq puts@PLT
movsd .LCPI4_1(%rip), %xmm2 # xmm2 = mem[0],zero
movaps .LCPI4_0(%rip), %xmm1 # xmm1 = [NaN,NaN,NaN,NaN]
movq 24(%rsp), %rdi # 8-byte Reload
movq %r15, %rdx
movq 32(%rsp), %rcx # 8-byte Reload
movzbl 15(%rsp), %r15d # 1-byte Folded Reload
cmpq 16(%rsp), %rbp # 8-byte Folded Reload
jae .LBB4_7
# %bb.10: # %.loopexit
testb $1, %r15b
je .LBB4_8
.LBB4_11:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB4_9: # %.critedge41
.cfi_def_cfa_offset 96
movl $.Lstr, %edi
callq puts@PLT
testb $1, %r15b
jne .LBB4_11
.LBB4_8: # %.critedge21
movl $.Lstr.1, %edi
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp puts@PLT # TAILCALL
.Lfunc_end4:
.size _Z11checkResultPfS_S_S_i, .Lfunc_end4-_Z11checkResultPfS_S_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z8cpuTimerv
.LCPI5_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z8cpuTimerv
.p2align 4, 0x90
.type _Z8cpuTimerv,@function
_Z8cpuTimerv: # @_Z8cpuTimerv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI5_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z8cpuTimerv, .Lfunc_end5-_Z8cpuTimerv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI6_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.LCPI6_2:
.quad 0x3f50624dd2f1a9fc # double 0.001
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI6_1:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $1656, %rsp # imm = 0x678
.cfi_def_cfa_offset 1696
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
movl $0, 12(%rsp)
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
testl %eax, %eax
jne .LBB6_36
# %bb.1: # %.preheader
cmpl $0, 12(%rsp)
jle .LBB6_35
# %bb.2: # %.lr.ph
xorl %r14d, %r14d
leaq 184(%rsp), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB6_3: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
testl %eax, %eax
jne .LBB6_36
# %bb.4: # in Loop: Header=BB6_3 Depth=1
addl 472(%rsp), %r14d
incl %ebp
cmpl 12(%rsp), %ebp
jl .LBB6_3
# %bb.5: # %._crit_edge.loopexit
cmpl $300, %r14d # imm = 0x12C
jl .LBB6_35
# %bb.6:
leaq 24(%rsp), %rdi
movl $400, %esi # imm = 0x190
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
jne .LBB6_36
# %bb.7:
leaq 16(%rsp), %rdi
movl $400, %esi # imm = 0x190
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
jne .LBB6_36
# %bb.8:
leaq 88(%rsp), %rdi
movl $400, %esi # imm = 0x190
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
jne .LBB6_36
# %bb.9:
leaq 80(%rsp), %rdi
movl $400, %esi # imm = 0x190
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
jne .LBB6_36
# %bb.10:
movq 24(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB6_11: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB6_12 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB6_12: # Parent Loop BB6_11 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq $10, %r15
jne .LBB6_12
# %bb.13: # %._crit_edge.i
# in Loop: Header=BB6_11 Depth=1
incq %r14
addq $40, %rbx
cmpq $10, %r14
jne .LBB6_11
# %bb.14: # %_Z4initPfi.exit
movq 16(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB6_15: # %.preheader.i50
# =>This Loop Header: Depth=1
# Child Loop BB6_16 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB6_16: # Parent Loop BB6_15 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq $10, %r15
jne .LBB6_16
# %bb.17: # %._crit_edge.i55
# in Loop: Header=BB6_15 Depth=1
incq %r14
addq $40, %rbx
cmpq $10, %r14
jne .LBB6_15
# %bb.18: # %_Z4initPfi.exit58
leaq 32(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 32(%rsp), %xmm0
movsd %xmm0, 104(%rsp) # 8-byte Spill
xorps %xmm0, %xmm0
cvtsi2sdq 40(%rsp), %xmm0
movsd %xmm0, 72(%rsp) # 8-byte Spill
movabsq $4294967297, %rdi # imm = 0x100000001
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_20
# %bb.19:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 88(%rsp), %rdx
movq %rax, 176(%rsp)
movq %rcx, 168(%rsp)
movq %rdx, 160(%rsp)
movl $10, 100(%rsp)
leaq 176(%rsp), %rax
movq %rax, 32(%rsp)
leaq 168(%rsp), %rax
movq %rax, 40(%rsp)
leaq 160(%rsp), %rax
movq %rax, 48(%rsp)
leaq 100(%rsp), %rax
movq %rax, 56(%rsp)
leaq 144(%rsp), %rdi
leaq 128(%rsp), %rsi
leaq 120(%rsp), %rdx
leaq 112(%rsp), %rcx
callq __hipPopCallConfiguration
movq 144(%rsp), %rsi
movl 152(%rsp), %edx
movq 128(%rsp), %rcx
movl 136(%rsp), %r8d
leaq 32(%rsp), %r9
movl $_Z9matrixMulPfS_S_i, %edi
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
pushq 128(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_20:
callq hipDeviceSynchronize
leaq 32(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 32(%rsp), %rbx
movq 40(%rsp), %r14
callq hipGetLastError
testl %eax, %eax
jne .LBB6_36
# %bb.21:
movsd 72(%rsp), %xmm2 # 8-byte Reload
# xmm2 = mem[0],zero
mulsd .LCPI6_0(%rip), %xmm2
addsd 104(%rsp), %xmm2 # 8-byte Folded Reload
xorps %xmm0, %xmm0
cvtsi2sd %r14, %xmm0
mulsd .LCPI6_0(%rip), %xmm0
cvtsi2sd %rbx, %xmm1
addsd %xmm0, %xmm1
subsd %xmm2, %xmm1
xorps %xmm0, %xmm0
cvtsd2ss %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
xorl %ebx, %ebx
leaq 32(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 32(%rsp), %rax
xorps %xmm2, %xmm2
cvtsi2sdq 40(%rsp), %xmm2
mulsd .LCPI6_0(%rip), %xmm2
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq 80(%rsp), %rsi
.p2align 4, 0x90
.LBB6_22: # %.preheader26.i
# =>This Loop Header: Depth=1
# Child Loop BB6_23 Depth 2
# Child Loop BB6_24 Depth 3
leaq (%rbx,%rbx,4), %rdi
leaq (%rsi,%rdi,8), %rdi
movq %rdx, %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB6_23: # %.preheader.i59
# Parent Loop BB6_22 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB6_24 Depth 3
xorps %xmm0, %xmm0
movq %r8, %r10
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB6_24: # Parent Loop BB6_22 Depth=1
# Parent Loop BB6_23 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rcx,%r11,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r10), %xmm1
addss %xmm1, %xmm0
incq %r11
addq $40, %r10
cmpq $10, %r11
jne .LBB6_24
# %bb.25: # %._crit_edge.i63
# in Loop: Header=BB6_23 Depth=2
movss %xmm0, (%rdi,%r9,4)
incq %r9
addq $4, %r8
cmpq $10, %r9
jne .LBB6_23
# %bb.26: # %._crit_edge30.i
# in Loop: Header=BB6_22 Depth=1
incq %rbx
addq $40, %rcx
cmpq $10, %rbx
jne .LBB6_22
# %bb.27: # %_Z12matrixMulCPUPfS_S_i.exit
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
addsd %xmm0, %xmm2
movsd %xmm2, 72(%rsp) # 8-byte Spill
xorl %ebx, %ebx
leaq 32(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 32(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 40(%rsp), %xmm1
mulsd .LCPI6_0(%rip), %xmm1
addsd %xmm0, %xmm1
subsd 72(%rsp), %xmm1 # 8-byte Folded Reload
xorps %xmm0, %xmm0
cvtsd2ss %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
movb $1, %bpl
movl $.L.str.5, %edi
movb $1, %al
callq printf
movq 88(%rsp), %rax
movq 80(%rsp), %rcx
leaq 4(%rcx), %rdx
leaq 4(%rax), %rsi
movaps .LCPI6_1(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
movsd .LCPI6_2(%rip), %xmm1 # xmm1 = mem[0],zero
.LBB6_28: # %.preheader.i64
# =>This Loop Header: Depth=1
# Child Loop BB6_30 Depth 2
leaq (,%rbx,8), %rdi
leaq (%rdi,%rdi,4), %rdi
movss (%rax,%rdi), %xmm2 # xmm2 = mem[0],zero,zero,zero
subss (%rcx,%rdi), %xmm2
andps %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm1, %xmm2
ja .LBB6_32
# %bb.29: # %.lr.ph85.preheader
# in Loop: Header=BB6_28 Depth=1
xorl %edi, %edi
.p2align 4, 0x90
.LBB6_30: # %.lr.ph85
# Parent Loop BB6_28 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $9, %rdi
je .LBB6_37
# %bb.31: # in Loop: Header=BB6_30 Depth=2
movss (%rsi,%rdi,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
subss (%rdx,%rdi,4), %xmm2
andps %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
incq %rdi
ucomisd %xmm1, %xmm2
jbe .LBB6_30
jmp .LBB6_32
.p2align 4, 0x90
.LBB6_37: # %.critedge.i
# in Loop: Header=BB6_28 Depth=1
cmpq $9, %rbx
leaq 1(%rbx), %rdi
setb %bpl
addq $40, %rdx
addq $40, %rsi
movq %rdi, %rbx
cmpq $10, %rdi
jne .LBB6_28
jmp .LBB6_33
.LBB6_32: # %._crit_edge86
movl $.Lstr, %edi
callq puts@PLT
testb $1, %bpl
jne .LBB6_34
.LBB6_33: # %.critedge21.i
movl $.Lstr.1, %edi
callq puts@PLT
.LBB6_34: # %_Z11checkResultPfS_S_S_i.exit
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 88(%rsp), %rdi
callq hipFree
movq 80(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $1656, %rsp # imm = 0x678
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB6_36:
.cfi_def_cfa_offset 1696
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.LBB6_35: # %.critedge101
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end6:
.size main, .Lfunc_end6-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9matrixMulPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9matrixMulPfS_S_i,@object # @_Z9matrixMulPfS_S_i
.section .rodata,"a",@progbits
.globl _Z9matrixMulPfS_S_i
.p2align 3, 0x0
_Z9matrixMulPfS_S_i:
.quad _Z24__device_stub__matrixMulPfS_S_i
.size _Z9matrixMulPfS_S_i, 8
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "ERROR: %s\n"
.size .L.str.2, 11
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "ERROR"
.size .L.str.3, 6
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Matrix Multiplication @ GPU: %f ms\n"
.size .L.str.4, 36
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Matrix Multiplication @ CPU: %f ms\n"
.size .L.str.5, 36
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9matrixMulPfS_S_i"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "ERROR: Incorrect Results!"
.size .Lstr, 26
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Everything is OK! :D"
.size .Lstr.1, 21
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__matrixMulPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9matrixMulPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#ifndef __CUDA_KERNELHEADER__
#define __CUDA_KERNELHEADER__
/********************************************/
/* Added codes for OpenACC2CUDA translation */
/********************************************/
#ifdef __cplusplus
#define restrict __restrict__
#endif
#define MAX(a,b) (((a) > (b)) ? (a) : (b))
#define MIN(a,b) (((a) < (b)) ? (a) : (b))
#ifndef FLT_MAX
#define FLT_MAX 3.402823466e+38
#endif
#ifndef FLT_MIN
#define FLT_MIN 1.175494351e-38
#endif
#ifndef DBL_MAX
#define DBL_MAX 1.7976931348623158e+308
#endif
#ifndef DBL_MIN
#define DBL_MIN 2.2250738585072014e-308
#endif
#endif
extern "C" __global__ void accLog_kernel_kernel0(float * in, float * out, float k_val, float n_val, int noutput_items)
{
int lwpriv__i;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
out[lwpriv__i]=((n_val*log10(in[lwpriv__i]))+k_val);
}
}
struct FComplexStruct
{
float real;
float imag;
};
typedef struct FComplexStruct FComplex;
extern "C" __global__ void accComplexToArg_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
out[lwpriv__i]=atan2(in[lwpriv__i].imag, in[lwpriv__i].real);
}
}
extern "C" __global__ void accComplexToMag_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out[lwpriv__i]=sqrt(((aval*aval)+(bval*bval)));
}
}
extern "C" __global__ void accComplexToMagPhase_kernel_kernel0(FComplex * in, float * out0, float * out1, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out0[lwpriv__i]=sqrt(((aval*aval)+(bval*bval)));
out1[lwpriv__i]=atan2(aval, bval);
}
}
extern "C" __global__ void accComplexToMagSquared_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out[lwpriv__i]=((aval*aval)+(bval*bval));
}
}
extern "C" __global__ void accMagPhaseToComplex_kernel_kernel0(float * a, float * b, FComplex * c, int noutput_items)
{
int lwpriv__i;
float mag;
float phase;
float real;
float imag;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
mag=a[lwpriv__i];
phase=b[lwpriv__i];
real=mag*cos(phase);
imag=mag*sin(phase);
c[lwpriv__i].real=real;
c[lwpriv__i].imag=imag;
}
}
|
.file "tmpxft_000094d1_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z46__device_stub__Z21accLog_kernel_kernel0PfS_ffiPfS_ffi
.type _Z46__device_stub__Z21accLog_kernel_kernel0PfS_ffiPfS_ffi, @function
_Z46__device_stub__Z21accLog_kernel_kernel0PfS_ffiPfS_ffi:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movss %xmm1, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq accLog_kernel_kernel0(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z46__device_stub__Z21accLog_kernel_kernel0PfS_ffiPfS_ffi, .-_Z46__device_stub__Z21accLog_kernel_kernel0PfS_ffiPfS_ffi
.globl accLog_kernel_kernel0
.type accLog_kernel_kernel0, @function
accLog_kernel_kernel0:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z21accLog_kernel_kernel0PfS_ffiPfS_ffi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size accLog_kernel_kernel0, .-accLog_kernel_kernel0
.globl _Z68__device_stub__Z30accComplexToArg_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
.type _Z68__device_stub__Z30accComplexToArg_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi, @function
_Z68__device_stub__Z30accComplexToArg_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi:
.LFB2053:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq accComplexToArg_kernel_kernel0(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z68__device_stub__Z30accComplexToArg_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi, .-_Z68__device_stub__Z30accComplexToArg_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
.globl accComplexToArg_kernel_kernel0
.type accComplexToArg_kernel_kernel0, @function
accComplexToArg_kernel_kernel0:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z68__device_stub__Z30accComplexToArg_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size accComplexToArg_kernel_kernel0, .-accComplexToArg_kernel_kernel0
.globl _Z68__device_stub__Z30accComplexToMag_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
.type _Z68__device_stub__Z30accComplexToMag_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi, @function
_Z68__device_stub__Z30accComplexToMag_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi:
.LFB2055:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq accComplexToMag_kernel_kernel0(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2055:
.size _Z68__device_stub__Z30accComplexToMag_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi, .-_Z68__device_stub__Z30accComplexToMag_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
.globl accComplexToMag_kernel_kernel0
.type accComplexToMag_kernel_kernel0, @function
accComplexToMag_kernel_kernel0:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z68__device_stub__Z30accComplexToMag_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size accComplexToMag_kernel_kernel0, .-accComplexToMag_kernel_kernel0
.globl _Z76__device_stub__Z35accComplexToMagPhase_kernel_kernel0P14FComplexStructPfS1_iP14FComplexStructPfS1_i
.type _Z76__device_stub__Z35accComplexToMagPhase_kernel_kernel0P14FComplexStructPfS1_iP14FComplexStructPfS1_i, @function
_Z76__device_stub__Z35accComplexToMagPhase_kernel_kernel0P14FComplexStructPfS1_iP14FComplexStructPfS1_i:
.LFB2057:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq accComplexToMagPhase_kernel_kernel0(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z76__device_stub__Z35accComplexToMagPhase_kernel_kernel0P14FComplexStructPfS1_iP14FComplexStructPfS1_i, .-_Z76__device_stub__Z35accComplexToMagPhase_kernel_kernel0P14FComplexStructPfS1_iP14FComplexStructPfS1_i
.globl accComplexToMagPhase_kernel_kernel0
.type accComplexToMagPhase_kernel_kernel0, @function
accComplexToMagPhase_kernel_kernel0:
.LFB2058:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z76__device_stub__Z35accComplexToMagPhase_kernel_kernel0P14FComplexStructPfS1_iP14FComplexStructPfS1_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size accComplexToMagPhase_kernel_kernel0, .-accComplexToMagPhase_kernel_kernel0
.globl _Z75__device_stub__Z37accComplexToMagSquared_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
.type _Z75__device_stub__Z37accComplexToMagSquared_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi, @function
_Z75__device_stub__Z37accComplexToMagSquared_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi:
.LFB2059:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq accComplexToMagSquared_kernel_kernel0(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size _Z75__device_stub__Z37accComplexToMagSquared_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi, .-_Z75__device_stub__Z37accComplexToMagSquared_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
.globl accComplexToMagSquared_kernel_kernel0
.type accComplexToMagSquared_kernel_kernel0, @function
accComplexToMagSquared_kernel_kernel0:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z75__device_stub__Z37accComplexToMagSquared_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size accComplexToMagSquared_kernel_kernel0, .-accComplexToMagSquared_kernel_kernel0
.globl _Z75__device_stub__Z35accMagPhaseToComplex_kernel_kernel0PfS_P14FComplexStructiPfS_P14FComplexStructi
.type _Z75__device_stub__Z35accMagPhaseToComplex_kernel_kernel0PfS_P14FComplexStructiPfS_P14FComplexStructi, @function
_Z75__device_stub__Z35accMagPhaseToComplex_kernel_kernel0PfS_P14FComplexStructiPfS_P14FComplexStructi:
.LFB2061:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L47
.L43:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L48
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq accMagPhaseToComplex_kernel_kernel0(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L43
.L48:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z75__device_stub__Z35accMagPhaseToComplex_kernel_kernel0PfS_P14FComplexStructiPfS_P14FComplexStructi, .-_Z75__device_stub__Z35accMagPhaseToComplex_kernel_kernel0PfS_P14FComplexStructiPfS_P14FComplexStructi
.globl accMagPhaseToComplex_kernel_kernel0
.type accMagPhaseToComplex_kernel_kernel0, @function
accMagPhaseToComplex_kernel_kernel0:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z75__device_stub__Z35accMagPhaseToComplex_kernel_kernel0PfS_P14FComplexStructiPfS_P14FComplexStructi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size accMagPhaseToComplex_kernel_kernel0, .-accMagPhaseToComplex_kernel_kernel0
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "accMagPhaseToComplex_kernel_kernel0"
.align 8
.LC1:
.string "accComplexToMagSquared_kernel_kernel0"
.align 8
.LC2:
.string "accComplexToMagPhase_kernel_kernel0"
.align 8
.LC3:
.string "accComplexToMag_kernel_kernel0"
.align 8
.LC4:
.string "accComplexToArg_kernel_kernel0"
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "accLog_kernel_kernel0"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2064:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq accMagPhaseToComplex_kernel_kernel0(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq accComplexToMagSquared_kernel_kernel0(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq accComplexToMagPhase_kernel_kernel0(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq accComplexToMag_kernel_kernel0(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq accComplexToArg_kernel_kernel0(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq accLog_kernel_kernel0(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#ifndef __CUDA_KERNELHEADER__
#define __CUDA_KERNELHEADER__
/********************************************/
/* Added codes for OpenACC2CUDA translation */
/********************************************/
#ifdef __cplusplus
#define restrict __restrict__
#endif
#define MAX(a,b) (((a) > (b)) ? (a) : (b))
#define MIN(a,b) (((a) < (b)) ? (a) : (b))
#ifndef FLT_MAX
#define FLT_MAX 3.402823466e+38
#endif
#ifndef FLT_MIN
#define FLT_MIN 1.175494351e-38
#endif
#ifndef DBL_MAX
#define DBL_MAX 1.7976931348623158e+308
#endif
#ifndef DBL_MIN
#define DBL_MIN 2.2250738585072014e-308
#endif
#endif
extern "C" __global__ void accLog_kernel_kernel0(float * in, float * out, float k_val, float n_val, int noutput_items)
{
int lwpriv__i;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
out[lwpriv__i]=((n_val*log10(in[lwpriv__i]))+k_val);
}
}
struct FComplexStruct
{
float real;
float imag;
};
typedef struct FComplexStruct FComplex;
extern "C" __global__ void accComplexToArg_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
out[lwpriv__i]=atan2(in[lwpriv__i].imag, in[lwpriv__i].real);
}
}
extern "C" __global__ void accComplexToMag_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out[lwpriv__i]=sqrt(((aval*aval)+(bval*bval)));
}
}
extern "C" __global__ void accComplexToMagPhase_kernel_kernel0(FComplex * in, float * out0, float * out1, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out0[lwpriv__i]=sqrt(((aval*aval)+(bval*bval)));
out1[lwpriv__i]=atan2(aval, bval);
}
}
extern "C" __global__ void accComplexToMagSquared_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out[lwpriv__i]=((aval*aval)+(bval*bval));
}
}
extern "C" __global__ void accMagPhaseToComplex_kernel_kernel0(float * a, float * b, FComplex * c, int noutput_items)
{
int lwpriv__i;
float mag;
float phase;
float real;
float imag;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
mag=a[lwpriv__i];
phase=b[lwpriv__i];
real=mag*cos(phase);
imag=mag*sin(phase);
c[lwpriv__i].real=real;
c[lwpriv__i].imag=imag;
}
}
|
#include <hip/hip_runtime.h>
#ifndef __CUDA_KERNELHEADER__
#define __CUDA_KERNELHEADER__
/********************************************/
/* Added codes for OpenACC2CUDA translation */
/********************************************/
#ifdef __cplusplus
#define restrict __restrict__
#endif
#define MAX(a,b) (((a) > (b)) ? (a) : (b))
#define MIN(a,b) (((a) < (b)) ? (a) : (b))
#ifndef FLT_MAX
#define FLT_MAX 3.402823466e+38
#endif
#ifndef FLT_MIN
#define FLT_MIN 1.175494351e-38
#endif
#ifndef DBL_MAX
#define DBL_MAX 1.7976931348623158e+308
#endif
#ifndef DBL_MIN
#define DBL_MIN 2.2250738585072014e-308
#endif
#endif
extern "C" __global__ void accLog_kernel_kernel0(float * in, float * out, float k_val, float n_val, int noutput_items)
{
int lwpriv__i;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
out[lwpriv__i]=((n_val*log10(in[lwpriv__i]))+k_val);
}
}
struct FComplexStruct
{
float real;
float imag;
};
typedef struct FComplexStruct FComplex;
extern "C" __global__ void accComplexToArg_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
out[lwpriv__i]=atan2(in[lwpriv__i].imag, in[lwpriv__i].real);
}
}
extern "C" __global__ void accComplexToMag_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out[lwpriv__i]=sqrt(((aval*aval)+(bval*bval)));
}
}
extern "C" __global__ void accComplexToMagPhase_kernel_kernel0(FComplex * in, float * out0, float * out1, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out0[lwpriv__i]=sqrt(((aval*aval)+(bval*bval)));
out1[lwpriv__i]=atan2(aval, bval);
}
}
extern "C" __global__ void accComplexToMagSquared_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out[lwpriv__i]=((aval*aval)+(bval*bval));
}
}
extern "C" __global__ void accMagPhaseToComplex_kernel_kernel0(float * a, float * b, FComplex * c, int noutput_items)
{
int lwpriv__i;
float mag;
float phase;
float real;
float imag;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
mag=a[lwpriv__i];
phase=b[lwpriv__i];
real=mag*cos(phase);
imag=mag*sin(phase);
c[lwpriv__i].real=real;
c[lwpriv__i].imag=imag;
}
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#ifndef __CUDA_KERNELHEADER__
#define __CUDA_KERNELHEADER__
/********************************************/
/* Added codes for OpenACC2CUDA translation */
/********************************************/
#ifdef __cplusplus
#define restrict __restrict__
#endif
#define MAX(a,b) (((a) > (b)) ? (a) : (b))
#define MIN(a,b) (((a) < (b)) ? (a) : (b))
#ifndef FLT_MAX
#define FLT_MAX 3.402823466e+38
#endif
#ifndef FLT_MIN
#define FLT_MIN 1.175494351e-38
#endif
#ifndef DBL_MAX
#define DBL_MAX 1.7976931348623158e+308
#endif
#ifndef DBL_MIN
#define DBL_MIN 2.2250738585072014e-308
#endif
#endif
extern "C" __global__ void accLog_kernel_kernel0(float * in, float * out, float k_val, float n_val, int noutput_items)
{
int lwpriv__i;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
out[lwpriv__i]=((n_val*log10(in[lwpriv__i]))+k_val);
}
}
struct FComplexStruct
{
float real;
float imag;
};
typedef struct FComplexStruct FComplex;
extern "C" __global__ void accComplexToArg_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
out[lwpriv__i]=atan2(in[lwpriv__i].imag, in[lwpriv__i].real);
}
}
extern "C" __global__ void accComplexToMag_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out[lwpriv__i]=sqrt(((aval*aval)+(bval*bval)));
}
}
extern "C" __global__ void accComplexToMagPhase_kernel_kernel0(FComplex * in, float * out0, float * out1, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out0[lwpriv__i]=sqrt(((aval*aval)+(bval*bval)));
out1[lwpriv__i]=atan2(aval, bval);
}
}
extern "C" __global__ void accComplexToMagSquared_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out[lwpriv__i]=((aval*aval)+(bval*bval));
}
}
extern "C" __global__ void accMagPhaseToComplex_kernel_kernel0(float * a, float * b, FComplex * c, int noutput_items)
{
int lwpriv__i;
float mag;
float phase;
float real;
float imag;
lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x));
if (lwpriv__i<noutput_items)
{
mag=a[lwpriv__i];
phase=b[lwpriv__i];
real=mag*cos(phase);
imag=mag*sin(phase);
c[lwpriv__i].real=real;
c[lwpriv__i].imag=imag;
}
}
|
.text
.file "kernel.hip"
.globl __device_stub__accLog_kernel_kernel0 # -- Begin function __device_stub__accLog_kernel_kernel0
.p2align 4, 0x90
.type __device_stub__accLog_kernel_kernel0,@function
__device_stub__accLog_kernel_kernel0: # @__device_stub__accLog_kernel_kernel0
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
movss %xmm1, 8(%rsp)
movl %edx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $accLog_kernel_kernel0, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size __device_stub__accLog_kernel_kernel0, .Lfunc_end0-__device_stub__accLog_kernel_kernel0
.cfi_endproc
# -- End function
.globl __device_stub__accComplexToArg_kernel_kernel0 # -- Begin function __device_stub__accComplexToArg_kernel_kernel0
.p2align 4, 0x90
.type __device_stub__accComplexToArg_kernel_kernel0,@function
__device_stub__accComplexToArg_kernel_kernel0: # @__device_stub__accComplexToArg_kernel_kernel0
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $accComplexToArg_kernel_kernel0, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size __device_stub__accComplexToArg_kernel_kernel0, .Lfunc_end1-__device_stub__accComplexToArg_kernel_kernel0
.cfi_endproc
# -- End function
.globl __device_stub__accComplexToMag_kernel_kernel0 # -- Begin function __device_stub__accComplexToMag_kernel_kernel0
.p2align 4, 0x90
.type __device_stub__accComplexToMag_kernel_kernel0,@function
__device_stub__accComplexToMag_kernel_kernel0: # @__device_stub__accComplexToMag_kernel_kernel0
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $accComplexToMag_kernel_kernel0, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size __device_stub__accComplexToMag_kernel_kernel0, .Lfunc_end2-__device_stub__accComplexToMag_kernel_kernel0
.cfi_endproc
# -- End function
.globl __device_stub__accComplexToMagPhase_kernel_kernel0 # -- Begin function __device_stub__accComplexToMagPhase_kernel_kernel0
.p2align 4, 0x90
.type __device_stub__accComplexToMagPhase_kernel_kernel0,@function
__device_stub__accComplexToMagPhase_kernel_kernel0: # @__device_stub__accComplexToMagPhase_kernel_kernel0
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $accComplexToMagPhase_kernel_kernel0, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size __device_stub__accComplexToMagPhase_kernel_kernel0, .Lfunc_end3-__device_stub__accComplexToMagPhase_kernel_kernel0
.cfi_endproc
# -- End function
.globl __device_stub__accComplexToMagSquared_kernel_kernel0 # -- Begin function __device_stub__accComplexToMagSquared_kernel_kernel0
.p2align 4, 0x90
.type __device_stub__accComplexToMagSquared_kernel_kernel0,@function
__device_stub__accComplexToMagSquared_kernel_kernel0: # @__device_stub__accComplexToMagSquared_kernel_kernel0
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $accComplexToMagSquared_kernel_kernel0, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end4:
.size __device_stub__accComplexToMagSquared_kernel_kernel0, .Lfunc_end4-__device_stub__accComplexToMagSquared_kernel_kernel0
.cfi_endproc
# -- End function
.globl __device_stub__accMagPhaseToComplex_kernel_kernel0 # -- Begin function __device_stub__accMagPhaseToComplex_kernel_kernel0
.p2align 4, 0x90
.type __device_stub__accMagPhaseToComplex_kernel_kernel0,@function
__device_stub__accMagPhaseToComplex_kernel_kernel0: # @__device_stub__accMagPhaseToComplex_kernel_kernel0
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $accMagPhaseToComplex_kernel_kernel0, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end5:
.size __device_stub__accMagPhaseToComplex_kernel_kernel0, .Lfunc_end5-__device_stub__accMagPhaseToComplex_kernel_kernel0
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $accLog_kernel_kernel0, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $accComplexToArg_kernel_kernel0, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $accComplexToMag_kernel_kernel0, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $accComplexToMagPhase_kernel_kernel0, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $accComplexToMagSquared_kernel_kernel0, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $accMagPhaseToComplex_kernel_kernel0, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type accLog_kernel_kernel0,@object # @accLog_kernel_kernel0
.section .rodata,"a",@progbits
.globl accLog_kernel_kernel0
.p2align 3, 0x0
accLog_kernel_kernel0:
.quad __device_stub__accLog_kernel_kernel0
.size accLog_kernel_kernel0, 8
.type accComplexToArg_kernel_kernel0,@object # @accComplexToArg_kernel_kernel0
.globl accComplexToArg_kernel_kernel0
.p2align 3, 0x0
accComplexToArg_kernel_kernel0:
.quad __device_stub__accComplexToArg_kernel_kernel0
.size accComplexToArg_kernel_kernel0, 8
.type accComplexToMag_kernel_kernel0,@object # @accComplexToMag_kernel_kernel0
.globl accComplexToMag_kernel_kernel0
.p2align 3, 0x0
accComplexToMag_kernel_kernel0:
.quad __device_stub__accComplexToMag_kernel_kernel0
.size accComplexToMag_kernel_kernel0, 8
.type accComplexToMagPhase_kernel_kernel0,@object # @accComplexToMagPhase_kernel_kernel0
.globl accComplexToMagPhase_kernel_kernel0
.p2align 3, 0x0
accComplexToMagPhase_kernel_kernel0:
.quad __device_stub__accComplexToMagPhase_kernel_kernel0
.size accComplexToMagPhase_kernel_kernel0, 8
.type accComplexToMagSquared_kernel_kernel0,@object # @accComplexToMagSquared_kernel_kernel0
.globl accComplexToMagSquared_kernel_kernel0
.p2align 3, 0x0
accComplexToMagSquared_kernel_kernel0:
.quad __device_stub__accComplexToMagSquared_kernel_kernel0
.size accComplexToMagSquared_kernel_kernel0, 8
.type accMagPhaseToComplex_kernel_kernel0,@object # @accMagPhaseToComplex_kernel_kernel0
.globl accMagPhaseToComplex_kernel_kernel0
.p2align 3, 0x0
accMagPhaseToComplex_kernel_kernel0:
.quad __device_stub__accMagPhaseToComplex_kernel_kernel0
.size accMagPhaseToComplex_kernel_kernel0, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "accLog_kernel_kernel0"
.size .L__unnamed_1, 22
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "accComplexToArg_kernel_kernel0"
.size .L__unnamed_2, 31
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "accComplexToMag_kernel_kernel0"
.size .L__unnamed_3, 31
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "accComplexToMagPhase_kernel_kernel0"
.size .L__unnamed_4, 36
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "accComplexToMagSquared_kernel_kernel0"
.size .L__unnamed_5, 38
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "accMagPhaseToComplex_kernel_kernel0"
.size .L__unnamed_6, 36
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__accLog_kernel_kernel0
.addrsig_sym __device_stub__accComplexToArg_kernel_kernel0
.addrsig_sym __device_stub__accComplexToMag_kernel_kernel0
.addrsig_sym __device_stub__accComplexToMagPhase_kernel_kernel0
.addrsig_sym __device_stub__accComplexToMagSquared_kernel_kernel0
.addrsig_sym __device_stub__accMagPhaseToComplex_kernel_kernel0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym accLog_kernel_kernel0
.addrsig_sym accComplexToArg_kernel_kernel0
.addrsig_sym accComplexToMag_kernel_kernel0
.addrsig_sym accComplexToMagPhase_kernel_kernel0
.addrsig_sym accComplexToMagSquared_kernel_kernel0
.addrsig_sym accMagPhaseToComplex_kernel_kernel0
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_000094d1_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z46__device_stub__Z21accLog_kernel_kernel0PfS_ffiPfS_ffi
.type _Z46__device_stub__Z21accLog_kernel_kernel0PfS_ffiPfS_ffi, @function
_Z46__device_stub__Z21accLog_kernel_kernel0PfS_ffiPfS_ffi:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movss %xmm1, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq accLog_kernel_kernel0(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z46__device_stub__Z21accLog_kernel_kernel0PfS_ffiPfS_ffi, .-_Z46__device_stub__Z21accLog_kernel_kernel0PfS_ffiPfS_ffi
.globl accLog_kernel_kernel0
.type accLog_kernel_kernel0, @function
accLog_kernel_kernel0:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z21accLog_kernel_kernel0PfS_ffiPfS_ffi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size accLog_kernel_kernel0, .-accLog_kernel_kernel0
.globl _Z68__device_stub__Z30accComplexToArg_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
.type _Z68__device_stub__Z30accComplexToArg_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi, @function
_Z68__device_stub__Z30accComplexToArg_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi:
.LFB2053:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq accComplexToArg_kernel_kernel0(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z68__device_stub__Z30accComplexToArg_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi, .-_Z68__device_stub__Z30accComplexToArg_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
.globl accComplexToArg_kernel_kernel0
.type accComplexToArg_kernel_kernel0, @function
accComplexToArg_kernel_kernel0:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z68__device_stub__Z30accComplexToArg_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size accComplexToArg_kernel_kernel0, .-accComplexToArg_kernel_kernel0
.globl _Z68__device_stub__Z30accComplexToMag_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
.type _Z68__device_stub__Z30accComplexToMag_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi, @function
_Z68__device_stub__Z30accComplexToMag_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi:
.LFB2055:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq accComplexToMag_kernel_kernel0(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2055:
.size _Z68__device_stub__Z30accComplexToMag_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi, .-_Z68__device_stub__Z30accComplexToMag_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
.globl accComplexToMag_kernel_kernel0
.type accComplexToMag_kernel_kernel0, @function
accComplexToMag_kernel_kernel0:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z68__device_stub__Z30accComplexToMag_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size accComplexToMag_kernel_kernel0, .-accComplexToMag_kernel_kernel0
.globl _Z76__device_stub__Z35accComplexToMagPhase_kernel_kernel0P14FComplexStructPfS1_iP14FComplexStructPfS1_i
.type _Z76__device_stub__Z35accComplexToMagPhase_kernel_kernel0P14FComplexStructPfS1_iP14FComplexStructPfS1_i, @function
_Z76__device_stub__Z35accComplexToMagPhase_kernel_kernel0P14FComplexStructPfS1_iP14FComplexStructPfS1_i:
.LFB2057:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq accComplexToMagPhase_kernel_kernel0(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z76__device_stub__Z35accComplexToMagPhase_kernel_kernel0P14FComplexStructPfS1_iP14FComplexStructPfS1_i, .-_Z76__device_stub__Z35accComplexToMagPhase_kernel_kernel0P14FComplexStructPfS1_iP14FComplexStructPfS1_i
.globl accComplexToMagPhase_kernel_kernel0
.type accComplexToMagPhase_kernel_kernel0, @function
accComplexToMagPhase_kernel_kernel0:
.LFB2058:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z76__device_stub__Z35accComplexToMagPhase_kernel_kernel0P14FComplexStructPfS1_iP14FComplexStructPfS1_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size accComplexToMagPhase_kernel_kernel0, .-accComplexToMagPhase_kernel_kernel0
.globl _Z75__device_stub__Z37accComplexToMagSquared_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
.type _Z75__device_stub__Z37accComplexToMagSquared_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi, @function
_Z75__device_stub__Z37accComplexToMagSquared_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi:
.LFB2059:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq accComplexToMagSquared_kernel_kernel0(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size _Z75__device_stub__Z37accComplexToMagSquared_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi, .-_Z75__device_stub__Z37accComplexToMagSquared_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
.globl accComplexToMagSquared_kernel_kernel0
.type accComplexToMagSquared_kernel_kernel0, @function
accComplexToMagSquared_kernel_kernel0:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z75__device_stub__Z37accComplexToMagSquared_kernel_kernel0P14FComplexStructPfiP14FComplexStructPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size accComplexToMagSquared_kernel_kernel0, .-accComplexToMagSquared_kernel_kernel0
.globl _Z75__device_stub__Z35accMagPhaseToComplex_kernel_kernel0PfS_P14FComplexStructiPfS_P14FComplexStructi
.type _Z75__device_stub__Z35accMagPhaseToComplex_kernel_kernel0PfS_P14FComplexStructiPfS_P14FComplexStructi, @function
_Z75__device_stub__Z35accMagPhaseToComplex_kernel_kernel0PfS_P14FComplexStructiPfS_P14FComplexStructi:
.LFB2061:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L47
.L43:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L48
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq accMagPhaseToComplex_kernel_kernel0(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L43
.L48:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z75__device_stub__Z35accMagPhaseToComplex_kernel_kernel0PfS_P14FComplexStructiPfS_P14FComplexStructi, .-_Z75__device_stub__Z35accMagPhaseToComplex_kernel_kernel0PfS_P14FComplexStructiPfS_P14FComplexStructi
.globl accMagPhaseToComplex_kernel_kernel0
.type accMagPhaseToComplex_kernel_kernel0, @function
accMagPhaseToComplex_kernel_kernel0:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z75__device_stub__Z35accMagPhaseToComplex_kernel_kernel0PfS_P14FComplexStructiPfS_P14FComplexStructi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size accMagPhaseToComplex_kernel_kernel0, .-accMagPhaseToComplex_kernel_kernel0
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "accMagPhaseToComplex_kernel_kernel0"
.align 8
.LC1:
.string "accComplexToMagSquared_kernel_kernel0"
.align 8
.LC2:
.string "accComplexToMagPhase_kernel_kernel0"
.align 8
.LC3:
.string "accComplexToMag_kernel_kernel0"
.align 8
.LC4:
.string "accComplexToArg_kernel_kernel0"
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "accLog_kernel_kernel0"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2064:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq accMagPhaseToComplex_kernel_kernel0(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq accComplexToMagSquared_kernel_kernel0(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq accComplexToMagPhase_kernel_kernel0(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq accComplexToMag_kernel_kernel0(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq accComplexToArg_kernel_kernel0(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq accLog_kernel_kernel0(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "kernel.hip"
.globl __device_stub__accLog_kernel_kernel0 # -- Begin function __device_stub__accLog_kernel_kernel0
.p2align 4, 0x90
.type __device_stub__accLog_kernel_kernel0,@function
__device_stub__accLog_kernel_kernel0: # @__device_stub__accLog_kernel_kernel0
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
movss %xmm1, 8(%rsp)
movl %edx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $accLog_kernel_kernel0, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size __device_stub__accLog_kernel_kernel0, .Lfunc_end0-__device_stub__accLog_kernel_kernel0
.cfi_endproc
# -- End function
.globl __device_stub__accComplexToArg_kernel_kernel0 # -- Begin function __device_stub__accComplexToArg_kernel_kernel0
.p2align 4, 0x90
.type __device_stub__accComplexToArg_kernel_kernel0,@function
__device_stub__accComplexToArg_kernel_kernel0: # @__device_stub__accComplexToArg_kernel_kernel0
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $accComplexToArg_kernel_kernel0, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size __device_stub__accComplexToArg_kernel_kernel0, .Lfunc_end1-__device_stub__accComplexToArg_kernel_kernel0
.cfi_endproc
# -- End function
.globl __device_stub__accComplexToMag_kernel_kernel0 # -- Begin function __device_stub__accComplexToMag_kernel_kernel0
.p2align 4, 0x90
.type __device_stub__accComplexToMag_kernel_kernel0,@function
__device_stub__accComplexToMag_kernel_kernel0: # @__device_stub__accComplexToMag_kernel_kernel0
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $accComplexToMag_kernel_kernel0, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size __device_stub__accComplexToMag_kernel_kernel0, .Lfunc_end2-__device_stub__accComplexToMag_kernel_kernel0
.cfi_endproc
# -- End function
.globl __device_stub__accComplexToMagPhase_kernel_kernel0 # -- Begin function __device_stub__accComplexToMagPhase_kernel_kernel0
.p2align 4, 0x90
.type __device_stub__accComplexToMagPhase_kernel_kernel0,@function
__device_stub__accComplexToMagPhase_kernel_kernel0: # @__device_stub__accComplexToMagPhase_kernel_kernel0
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $accComplexToMagPhase_kernel_kernel0, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size __device_stub__accComplexToMagPhase_kernel_kernel0, .Lfunc_end3-__device_stub__accComplexToMagPhase_kernel_kernel0
.cfi_endproc
# -- End function
.globl __device_stub__accComplexToMagSquared_kernel_kernel0 # -- Begin function __device_stub__accComplexToMagSquared_kernel_kernel0
.p2align 4, 0x90
.type __device_stub__accComplexToMagSquared_kernel_kernel0,@function
__device_stub__accComplexToMagSquared_kernel_kernel0: # @__device_stub__accComplexToMagSquared_kernel_kernel0
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $accComplexToMagSquared_kernel_kernel0, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end4:
.size __device_stub__accComplexToMagSquared_kernel_kernel0, .Lfunc_end4-__device_stub__accComplexToMagSquared_kernel_kernel0
.cfi_endproc
# -- End function
.globl __device_stub__accMagPhaseToComplex_kernel_kernel0 # -- Begin function __device_stub__accMagPhaseToComplex_kernel_kernel0
.p2align 4, 0x90
.type __device_stub__accMagPhaseToComplex_kernel_kernel0,@function
__device_stub__accMagPhaseToComplex_kernel_kernel0: # @__device_stub__accMagPhaseToComplex_kernel_kernel0
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $accMagPhaseToComplex_kernel_kernel0, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end5:
.size __device_stub__accMagPhaseToComplex_kernel_kernel0, .Lfunc_end5-__device_stub__accMagPhaseToComplex_kernel_kernel0
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $accLog_kernel_kernel0, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $accComplexToArg_kernel_kernel0, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $accComplexToMag_kernel_kernel0, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $accComplexToMagPhase_kernel_kernel0, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $accComplexToMagSquared_kernel_kernel0, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $accMagPhaseToComplex_kernel_kernel0, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type accLog_kernel_kernel0,@object # @accLog_kernel_kernel0
.section .rodata,"a",@progbits
.globl accLog_kernel_kernel0
.p2align 3, 0x0
accLog_kernel_kernel0:
.quad __device_stub__accLog_kernel_kernel0
.size accLog_kernel_kernel0, 8
.type accComplexToArg_kernel_kernel0,@object # @accComplexToArg_kernel_kernel0
.globl accComplexToArg_kernel_kernel0
.p2align 3, 0x0
accComplexToArg_kernel_kernel0:
.quad __device_stub__accComplexToArg_kernel_kernel0
.size accComplexToArg_kernel_kernel0, 8
.type accComplexToMag_kernel_kernel0,@object # @accComplexToMag_kernel_kernel0
.globl accComplexToMag_kernel_kernel0
.p2align 3, 0x0
accComplexToMag_kernel_kernel0:
.quad __device_stub__accComplexToMag_kernel_kernel0
.size accComplexToMag_kernel_kernel0, 8
.type accComplexToMagPhase_kernel_kernel0,@object # @accComplexToMagPhase_kernel_kernel0
.globl accComplexToMagPhase_kernel_kernel0
.p2align 3, 0x0
accComplexToMagPhase_kernel_kernel0:
.quad __device_stub__accComplexToMagPhase_kernel_kernel0
.size accComplexToMagPhase_kernel_kernel0, 8
.type accComplexToMagSquared_kernel_kernel0,@object # @accComplexToMagSquared_kernel_kernel0
.globl accComplexToMagSquared_kernel_kernel0
.p2align 3, 0x0
accComplexToMagSquared_kernel_kernel0:
.quad __device_stub__accComplexToMagSquared_kernel_kernel0
.size accComplexToMagSquared_kernel_kernel0, 8
.type accMagPhaseToComplex_kernel_kernel0,@object # @accMagPhaseToComplex_kernel_kernel0
.globl accMagPhaseToComplex_kernel_kernel0
.p2align 3, 0x0
accMagPhaseToComplex_kernel_kernel0:
.quad __device_stub__accMagPhaseToComplex_kernel_kernel0
.size accMagPhaseToComplex_kernel_kernel0, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "accLog_kernel_kernel0"
.size .L__unnamed_1, 22
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "accComplexToArg_kernel_kernel0"
.size .L__unnamed_2, 31
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "accComplexToMag_kernel_kernel0"
.size .L__unnamed_3, 31
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "accComplexToMagPhase_kernel_kernel0"
.size .L__unnamed_4, 36
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "accComplexToMagSquared_kernel_kernel0"
.size .L__unnamed_5, 38
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "accMagPhaseToComplex_kernel_kernel0"
.size .L__unnamed_6, 36
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__accLog_kernel_kernel0
.addrsig_sym __device_stub__accComplexToArg_kernel_kernel0
.addrsig_sym __device_stub__accComplexToMag_kernel_kernel0
.addrsig_sym __device_stub__accComplexToMagPhase_kernel_kernel0
.addrsig_sym __device_stub__accComplexToMagSquared_kernel_kernel0
.addrsig_sym __device_stub__accMagPhaseToComplex_kernel_kernel0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym accLog_kernel_kernel0
.addrsig_sym accComplexToArg_kernel_kernel0
.addrsig_sym accComplexToMag_kernel_kernel0
.addrsig_sym accComplexToMagPhase_kernel_kernel0
.addrsig_sym accComplexToMagSquared_kernel_kernel0
.addrsig_sym accMagPhaseToComplex_kernel_kernel0
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
extern "C"
__global__ void limitCoeff
(int nBatch, int rbs, int rScale, float maxCoeff,
float *DA, float *RA, float *CA)
{
int taskIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (taskIdx < nBatch)
{
int i = taskIdx; // % (nBatch / 2);
// support only 2 coefficients
int nCoeff = 2;
// locate arrays pointer
int daOffset = i * rbs * rScale * nCoeff;
int raOffset = i * rbs * rScale;
int caOffset = i * nCoeff; // support only 2 coefficients
// check if need to refit coefficients
if (CA[caOffset + 1] > maxCoeff || CA[caOffset + 1] < -maxCoeff) {
// set to maximum or minimum depend on sign
if (CA[caOffset + 1] > maxCoeff) {
CA[caOffset + 1] = maxCoeff;
} else if (CA[caOffset + 1] < -maxCoeff) {
CA[caOffset + 1] = -maxCoeff;
}
// refit coefficients
float suma = 0.0f; // power 1 coeff
float sumb = 0.0f; // power 0 coeff
for(int j = 0; j<rbs * rScale ;j++){
suma += DA[daOffset + rbs * rScale + j];
sumb += RA[raOffset + j];
}
CA[caOffset] = (sumb - CA[caOffset + 1] * suma) / (rbs * rScale);
}
}
}
|
.file "tmpxft_001b794c_00000000-6_limitCoeff.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z10limitCoeffiiifPfS_S_iiifPfS_S_
.type _Z38__device_stub__Z10limitCoeffiiifPfS_S_iiifPfS_S_, @function
_Z38__device_stub__Z10limitCoeffiiifPfS_S_iiifPfS_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movl %edx, 36(%rsp)
movss %xmm0, 32(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq limitCoeff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z10limitCoeffiiifPfS_S_iiifPfS_S_, .-_Z38__device_stub__Z10limitCoeffiiifPfS_S_iiifPfS_S_
.globl limitCoeff
.type limitCoeff, @function
limitCoeff:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z10limitCoeffiiifPfS_S_iiifPfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size limitCoeff, .-limitCoeff
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "limitCoeff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq limitCoeff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
extern "C"
__global__ void limitCoeff
(int nBatch, int rbs, int rScale, float maxCoeff,
float *DA, float *RA, float *CA)
{
int taskIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (taskIdx < nBatch)
{
int i = taskIdx; // % (nBatch / 2);
// support only 2 coefficients
int nCoeff = 2;
// locate arrays pointer
int daOffset = i * rbs * rScale * nCoeff;
int raOffset = i * rbs * rScale;
int caOffset = i * nCoeff; // support only 2 coefficients
// check if need to refit coefficients
if (CA[caOffset + 1] > maxCoeff || CA[caOffset + 1] < -maxCoeff) {
// set to maximum or minimum depend on sign
if (CA[caOffset + 1] > maxCoeff) {
CA[caOffset + 1] = maxCoeff;
} else if (CA[caOffset + 1] < -maxCoeff) {
CA[caOffset + 1] = -maxCoeff;
}
// refit coefficients
float suma = 0.0f; // power 1 coeff
float sumb = 0.0f; // power 0 coeff
for(int j = 0; j<rbs * rScale ;j++){
suma += DA[daOffset + rbs * rScale + j];
sumb += RA[raOffset + j];
}
CA[caOffset] = (sumb - CA[caOffset + 1] * suma) / (rbs * rScale);
}
}
}
|
#include <hip/hip_runtime.h>
extern "C"
__global__ void limitCoeff
(int nBatch, int rbs, int rScale, float maxCoeff,
float *DA, float *RA, float *CA)
{
int taskIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (taskIdx < nBatch)
{
int i = taskIdx; // % (nBatch / 2);
// support only 2 coefficients
int nCoeff = 2;
// locate arrays pointer
int daOffset = i * rbs * rScale * nCoeff;
int raOffset = i * rbs * rScale;
int caOffset = i * nCoeff; // support only 2 coefficients
// check if need to refit coefficients
if (CA[caOffset + 1] > maxCoeff || CA[caOffset + 1] < -maxCoeff) {
// set to maximum or minimum depend on sign
if (CA[caOffset + 1] > maxCoeff) {
CA[caOffset + 1] = maxCoeff;
} else if (CA[caOffset + 1] < -maxCoeff) {
CA[caOffset + 1] = -maxCoeff;
}
// refit coefficients
float suma = 0.0f; // power 1 coeff
float sumb = 0.0f; // power 0 coeff
for(int j = 0; j<rbs * rScale ;j++){
suma += DA[daOffset + rbs * rScale + j];
sumb += RA[raOffset + j];
}
CA[caOffset] = (sumb - CA[caOffset + 1] * suma) / (rbs * rScale);
}
}
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
extern "C"
__global__ void limitCoeff
(int nBatch, int rbs, int rScale, float maxCoeff,
float *DA, float *RA, float *CA)
{
int taskIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (taskIdx < nBatch)
{
int i = taskIdx; // % (nBatch / 2);
// support only 2 coefficients
int nCoeff = 2;
// locate arrays pointer
int daOffset = i * rbs * rScale * nCoeff;
int raOffset = i * rbs * rScale;
int caOffset = i * nCoeff; // support only 2 coefficients
// check if need to refit coefficients
if (CA[caOffset + 1] > maxCoeff || CA[caOffset + 1] < -maxCoeff) {
// set to maximum or minimum depend on sign
if (CA[caOffset + 1] > maxCoeff) {
CA[caOffset + 1] = maxCoeff;
} else if (CA[caOffset + 1] < -maxCoeff) {
CA[caOffset + 1] = -maxCoeff;
}
// refit coefficients
float suma = 0.0f; // power 1 coeff
float sumb = 0.0f; // power 0 coeff
for(int j = 0; j<rbs * rScale ;j++){
suma += DA[daOffset + rbs * rScale + j];
sumb += RA[raOffset + j];
}
CA[caOffset] = (sumb - CA[caOffset + 1] * suma) / (rbs * rScale);
}
}
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected limitCoeff
.globl limitCoeff
.p2align 8
.type limitCoeff,@function
limitCoeff:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[3:4], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v3
s_cbranch_execz .LBB0_8
v_lshlrev_b32_e32 v0, 1, v3
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x20
s_load_b32 s4, s[0:1], 0xc
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_or_b32_e32 v1, 1, v0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s2, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
global_load_b32 v4, v[1:2], off
s_waitcnt vmcnt(0)
v_cmp_lt_f32_e64 s5, s4, v4
v_cmp_lt_f32_e64 s6, v4, -s4
s_or_b32 s7, s5, s6
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s7
s_cbranch_execz .LBB0_8
s_or_b32 s7, s5, s6
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s6, s7
s_cbranch_execz .LBB0_4
v_cndmask_b32_e64 v4, -s4, s4, s5
global_store_b32 v[1:2], v4, off
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s6
s_load_b64 s[4:5], s[0:1], 0x4
v_dual_mov_b32 v7, 0 :: v_dual_mov_b32 v8, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s4, s5, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lt_i32 s4, 1
s_cbranch_scc1 .LBB0_7
v_mul_lo_u32 v3, s4, v3
s_load_b128 s[8:11], s[0:1], 0x10
v_dual_mov_b32 v7, 0 :: v_dual_mov_b32 v8, 0
s_mov_b32 s0, s4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshl_add_u32 v5, v3, 1, s4
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[3:4]
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s10, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s11, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v5, vcc_lo, s8, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s9, v6, vcc_lo
.LBB0_6:
global_load_b32 v9, v[5:6], off
global_load_b32 v10, v[3:4], off
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
v_add_co_u32 v5, vcc_lo, v5, 4
v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
s_add_i32 s0, s0, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s0, 0
s_waitcnt vmcnt(0)
v_dual_add_f32 v8, v8, v9 :: v_dual_add_f32 v7, v7, v10
s_cbranch_scc0 .LBB0_6
.LBB0_7:
global_load_b32 v1, v[1:2], off
v_cvt_f32_i32_e32 v2, s4
s_waitcnt vmcnt(0)
v_fma_f32 v3, -v8, v1, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v1, null, v2, v2, v3
v_div_scale_f32 v6, vcc_lo, v3, v2, v3
v_rcp_f32_e32 v4, v1
s_waitcnt_depctr 0xfff
v_fma_f32 v5, -v1, v4, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v5, v4
v_mul_f32_e32 v5, v6, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v7, -v1, v5, v6
v_fmac_f32_e32 v5, v7, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fma_f32 v6, -v1, v5, v6
v_ashrrev_i32_e32 v1, 31, v0
v_div_fmas_f32 v4, v6, v4, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_div_fixup_f32 v2, v4, v2, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel limitCoeff
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size limitCoeff, .Lfunc_end0-limitCoeff
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: limitCoeff
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: limitCoeff.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
extern "C"
__global__ void limitCoeff
(int nBatch, int rbs, int rScale, float maxCoeff,
float *DA, float *RA, float *CA)
{
int taskIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (taskIdx < nBatch)
{
int i = taskIdx; // % (nBatch / 2);
// support only 2 coefficients
int nCoeff = 2;
// locate arrays pointer
int daOffset = i * rbs * rScale * nCoeff;
int raOffset = i * rbs * rScale;
int caOffset = i * nCoeff; // support only 2 coefficients
// check if need to refit coefficients
if (CA[caOffset + 1] > maxCoeff || CA[caOffset + 1] < -maxCoeff) {
// set to maximum or minimum depend on sign
if (CA[caOffset + 1] > maxCoeff) {
CA[caOffset + 1] = maxCoeff;
} else if (CA[caOffset + 1] < -maxCoeff) {
CA[caOffset + 1] = -maxCoeff;
}
// refit coefficients
float suma = 0.0f; // power 1 coeff
float sumb = 0.0f; // power 0 coeff
for(int j = 0; j<rbs * rScale ;j++){
suma += DA[daOffset + rbs * rScale + j];
sumb += RA[raOffset + j];
}
CA[caOffset] = (sumb - CA[caOffset + 1] * suma) / (rbs * rScale);
}
}
}
|
.text
.file "limitCoeff.hip"
.globl __device_stub__limitCoeff # -- Begin function __device_stub__limitCoeff
.p2align 4, 0x90
.type __device_stub__limitCoeff,@function
__device_stub__limitCoeff: # @__device_stub__limitCoeff
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $limitCoeff, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size __device_stub__limitCoeff, .Lfunc_end0-__device_stub__limitCoeff
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $limitCoeff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type limitCoeff,@object # @limitCoeff
.section .rodata,"a",@progbits
.globl limitCoeff
.p2align 3, 0x0
limitCoeff:
.quad __device_stub__limitCoeff
.size limitCoeff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "limitCoeff"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__limitCoeff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym limitCoeff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_001b794c_00000000-6_limitCoeff.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z10limitCoeffiiifPfS_S_iiifPfS_S_
.type _Z38__device_stub__Z10limitCoeffiiifPfS_S_iiifPfS_S_, @function
_Z38__device_stub__Z10limitCoeffiiifPfS_S_iiifPfS_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movl %edx, 36(%rsp)
movss %xmm0, 32(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq limitCoeff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z10limitCoeffiiifPfS_S_iiifPfS_S_, .-_Z38__device_stub__Z10limitCoeffiiifPfS_S_iiifPfS_S_
.globl limitCoeff
.type limitCoeff, @function
limitCoeff:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z10limitCoeffiiifPfS_S_iiifPfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size limitCoeff, .-limitCoeff
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "limitCoeff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq limitCoeff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "limitCoeff.hip"
.globl __device_stub__limitCoeff # -- Begin function __device_stub__limitCoeff
.p2align 4, 0x90
.type __device_stub__limitCoeff,@function
__device_stub__limitCoeff: # @__device_stub__limitCoeff
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $limitCoeff, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size __device_stub__limitCoeff, .Lfunc_end0-__device_stub__limitCoeff
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $limitCoeff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type limitCoeff,@object # @limitCoeff
.section .rodata,"a",@progbits
.globl limitCoeff
.p2align 3, 0x0
limitCoeff:
.quad __device_stub__limitCoeff
.size limitCoeff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "limitCoeff"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__limitCoeff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym limitCoeff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
//
// Average extreme spread of five-shot group assuming impact coordinates follow standard normal distribution
//
// Building:
// nvcc -std=c++11 es_cuda.cu -o es_cuda -lcurand
//
// Running:
// for run in {1..10}; do ./es_cuda 15 | tee -a es_cuda.csv; done
//
#include <string>
#include <vector>
#include <numeric>
#include <stdexcept>
#include <typeinfo>
#include <cooperative_groups.h>
#include <iostream>
#include <iomanip>
#include <stdexcept>
#include <cuda_runtime.h>
#include <math.h>
#include <chrono>
#include <curand.h>
namespace cg = cooperative_groups;
using std::string;
using std::vector;
// First level of reduction
__device__ double reduce_sum(double in, cg::thread_block cta)
{
extern __shared__ double sdata[];
// Write to shared memory
unsigned ltid = threadIdx.x;
sdata[ltid] = in;
cg::sync(cta);
// Do reduction in shared memory
for (unsigned s = blockDim.x / 2 ; s > 0 ; s >>= 1) {
if (ltid < s) {
sdata[ltid] += sdata[ltid + s];
}
cg::sync(cta);
}
return sdata[0];
}
// Estimator kernel
__global__ void computeValue(double* const results,
const double* const points,
const unsigned numGroups)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
// Determine thread ID
unsigned bid = blockIdx.x;
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned step = gridDim.x * blockDim.x;
// Shift the input/output pointers
const double* pointx = points + tid;
const double* pointy = pointx + 5 * numGroups;
// Sum up extreme spread of all groups
double sum = 0;
for (unsigned i = tid ; i < numGroups; i += step, pointx += step * 5, pointy += step * 5) {
// Pairwise distances
double dx[10], dy[10];
// Unroll nested comparison loops
dx[0] = pointx[0] - pointx[1]; dy[0] = pointy[0] - pointy[1];
dx[1] = pointx[0] - pointx[2]; dy[1] = pointy[0] - pointy[2];
dx[2] = pointx[0] - pointx[3]; dy[2] = pointy[0] - pointy[3];
dx[3] = pointx[0] - pointx[4]; dy[3] = pointy[0] - pointy[4];
dx[4] = pointx[1] - pointx[2]; dy[4] = pointy[1] - pointy[2];
dx[5] = pointx[1] - pointx[3]; dy[5] = pointy[1] - pointy[3];
dx[6] = pointx[1] - pointx[4]; dy[6] = pointy[1] - pointy[4];
dx[7] = pointx[2] - pointx[3]; dy[7] = pointy[2] - pointy[3];
dx[8] = pointx[2] - pointx[4]; dy[8] = pointy[2] - pointy[4];
dx[9] = pointx[3] - pointx[4]; dy[9] = pointy[3] - pointy[4];
double max_d2 = 0;
for (unsigned j = 0; j < 10; j++) {
auto candidate_d2 = dx[j] * dx[j] + dy[j] * dy[j];
max_d2 = max(max_d2, candidate_d2);
}
double es = sqrt(max_d2);
sum += es;
}
// Reduce within the block
sum = reduce_sum(sum, cta);
// Store the result
if (threadIdx.x == 0) {
results[bid] = sum;
}
}
double es_cuda(unsigned power_of_4, unsigned seed)
{
// Get device properties
struct cudaDeviceProp deviceProperties;
cudaError_t cudaResult = cudaGetDeviceProperties(&deviceProperties, 0);
if (cudaResult != cudaSuccess) {
string msg("Could not get device properties: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Check precision is valid
if (deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3)) {
throw std::runtime_error("Device does not have double precision support");
}
// Check requested size is valid
const unsigned threadBlockSize = 128;
if (threadBlockSize > (deviceProperties.maxThreadsPerBlock)) {
throw std::runtime_error("Thread block size is greater than maxThreadsPerBlock");
}
dim3 block;
block.x = threadBlockSize;
// Attach to GPU
cudaResult = cudaSetDevice(0);
if (cudaResult != cudaSuccess) {
string msg("Could not set CUDA device: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Aim to launch around ten or more times as many blocks as there
// are multiprocessors on the target device.
dim3 grid;
const unsigned numGroups = 1 << (2 * power_of_4);
grid.x = numGroups / threadBlockSize;
while (grid.x > 20 * deviceProperties.multiProcessorCount) {
grid.x >>= 1;
}
// Get computeValue function properties and check the maximum block size
struct cudaFuncAttributes funcAttributes;
cudaResult = cudaFuncGetAttributes(&funcAttributes, computeValue);
if (cudaResult != cudaSuccess) {
string msg("Could not get function attributes: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
if (block.x > (unsigned)funcAttributes.maxThreadsPerBlock) {
throw std::runtime_error("Block X dimension is too large for computeValue kernel");
}
// Check the dimensions are valid
if (block.x > (unsigned)deviceProperties.maxThreadsDim[0]) {
throw std::runtime_error("Block X dimension is too large for device");
}
if (grid.x > (unsigned)deviceProperties.maxGridSize[0]) {
throw std::runtime_error("Grid X dimension is too large for device");
}
// Allocate memory for points
// Each simulation has ten random numbers to give five pairs of X and Y coordinates
double* d_points = 0;
cudaResult = cudaMalloc((void **)&d_points, 10 * numGroups * sizeof(double));
if (cudaResult != cudaSuccess) {
string msg("Could not allocate memory on device for random numbers: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Allocate memory for result
// Each thread block will produce one result
double* d_results = 0;
cudaResult = cudaMalloc((void**)&d_results, grid.x * sizeof(double));
if (cudaResult != cudaSuccess) {
string msg("Could not allocate memory on device for partial results: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Generate random points
curandStatus_t curandResult;
curandGenerator_t prng;
curandResult = curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not create pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandSetPseudoRandomGeneratorSeed(prng, seed);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not set seed for pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandGenerateNormalDouble(prng, (double*)d_points, 10 * numGroups, 0, 1);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not generate pseudo-random numbers: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandDestroyGenerator(prng);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not destroy pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
// Calculate and average group size
computeValue<<<grid, block, block.x * sizeof(double)>>>(d_results, d_points, numGroups);
// Copy the results back to host
vector<double> results(grid.x);
cudaResult = cudaMemcpy(&results[0], d_results, grid.x * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaResult != cudaSuccess) {
string msg("Could not copy results to host: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Complete sum-reduction
double sum = std::accumulate(results.begin(), results.end(), double(0));
// Cleanup
if (d_points) {
cudaFree(d_points);
}
if (d_results) {
cudaFree(d_results);
}
// Divide sum by count to get the average
return sum / numGroups;
}
int main(int argc, char **argv)
{
unsigned power_of_4 = 12;
if (argc == 2) {
power_of_4 = atoi(argv[1]);
}
unsigned nt = 12;
if (power_of_4 > 12) {
nt <<= 2 * (power_of_4 - 12);
power_of_4 = 12;
}
try {
auto start_time = std::chrono::system_clock::now();
double avg = 0, min = 100, max = 0;
__uint128_t mcg128_state = time(NULL) | 1; // can be seeded to any odd number
for (unsigned j = 0; j < nt; j++) {
double r = es_cuda(power_of_4, (unsigned)(mcg128_state >> 64));
avg += r;
min = fmin(r, min);
max = fmax(r, max);
mcg128_state *= 0xda942042e4dd58b5ULL;
}
avg /= nt;
auto end_time = std::chrono::system_clock::now();
std::chrono::duration<double> seconds = end_time - start_time;
std::cout.precision(14);
std::cout << "code,threads,power_of_4,min,avg,max,time\n";
std::cout << "CUDA," << nt << "," << power_of_4 << "," << min << "," << avg << "," << max << "," << seconds.count() << "\n";
} catch (std::runtime_error &e) { // es_cuda() can throw runtime exceptions
fprintf(stderr, "runtime error (%s)\n", e.what());
return(EXIT_FAILURE);
}
return(EXIT_SUCCESS);
}
|
code for sm_80
Function : _Z12computeValuePdPKdj
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0030*/ BSSY B0, 0xae0 ; /* 0x00000aa000007945 */
/* 0x000fe20003800000 */
/*0040*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0060*/ IMAD R2, R0, c[0x0][0x0], R3 ; /* 0x0000000000027a24 */
/* 0x001fca00078e0203 */
/*0070*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06070 */
/*0080*/ @P0 BRA 0xad0 ; /* 0x00000a4000000947 */
/* 0x000fea0003800000 */
/*0090*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0xc] ; /* 0x00000300ff057624 */
/* 0x000fe400078e00ff */
/*00a0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x8 ; /* 0x00000008ff077424 */
/* 0x000fe400078e00ff */
/*00b0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff097624 */
/* 0x000fe400078e00ff */
/*00c0*/ IMAD R5, R5, c[0x0][0x0], RZ ; /* 0x0000000005057a24 */
/* 0x000fe400078e02ff */
/*00d0*/ IMAD.WIDE.U32 R6, R2, R7, c[0x0][0x168] ; /* 0x00005a0002067625 */
/* 0x000fc800078e0007 */
/*00e0*/ IMAD R9, R9, 0x5, RZ ; /* 0x0000000509097824 */
/* 0x000fe200078e02ff */
/*00f0*/ MOV R11, R7 ; /* 0x00000007000b7202 */
/* 0x000fe20000000f00 */
/*0100*/ IMAD R37, R5, 0x5, RZ ; /* 0x0000000505257824 */
/* 0x000fe400078e02ff */
/*0110*/ IMAD.WIDE.U32 R8, R9, 0x8, R6 ; /* 0x0000000809087825 */
/* 0x000fc800078e0006 */
/*0120*/ IMAD.MOV.U32 R10, RZ, RZ, R6 ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e0006 */
/*0130*/ IMAD.SHL.U32 R35, R37, 0x8, RZ ; /* 0x0000000825237824 */
/* 0x000fe200078e00ff */
/*0140*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0150*/ SHF.R.U32.HI R37, RZ, 0x1d, R37 ; /* 0x0000001dff257819 */
/* 0x000fe40000011625 */
/*0160*/ LDG.E.64 R12, [R8.64+0x8] ; /* 0x00000806080c7981 */
/* 0x000ea8000c1e1b00 */
/*0170*/ LDG.E.64 R14, [R8.64] ; /* 0x00000006080e7981 */
/* 0x000ea8000c1e1b00 */
/*0180*/ LDG.E.64 R16, [R10.64+0x8] ; /* 0x000008060a107981 */
/* 0x001ee8000c1e1b00 */
/*0190*/ LDG.E.64 R18, [R10.64] ; /* 0x000000060a127981 */
/* 0x000ee8000c1e1b00 */
/*01a0*/ LDG.E.64 R20, [R8.64+0x10] ; /* 0x0000100608147981 */
/* 0x000f28000c1e1b00 */
/*01b0*/ LDG.E.64 R22, [R10.64+0x10] ; /* 0x000010060a167981 */
/* 0x000f68000c1e1b00 */
/*01c0*/ LDG.E.64 R24, [R8.64+0x18] ; /* 0x0000180608187981 */
/* 0x000f28000c1e1b00 */
/*01d0*/ LDG.E.64 R26, [R10.64+0x18] ; /* 0x000018060a1a7981 */
/* 0x000f62000c1e1b00 */
/*01e0*/ DADD R30, -R12, R14 ; /* 0x000000000c1e7229 */
/* 0x004e0c000000010e */
/*01f0*/ DMUL R30, R30, R30 ; /* 0x0000001e1e1e7228 */
/* 0x001fc80000000000 */
/*0200*/ DADD R28, -R16, R18 ; /* 0x00000000101c7229 */
/* 0x008e0c0000000112 */
/*0210*/ DFMA R30, R28, R28, R30 ; /* 0x0000001c1c1e722b */
/* 0x001e08000000001e */
/*0220*/ DADD R28, R14, -R20 ; /* 0x000000000e1c7229 */
/* 0x010e480000000814 */
/*0230*/ DSETP.MAX.AND P0, P1, RZ, R30, PT ; /* 0x0000001eff00722a */
/* 0x0010a4000390f000 */
/*0240*/ IMAD.MOV.U32 R4, RZ, RZ, R30 ; /* 0x000000ffff047224 */
/* 0x000fe400078e001e */
/*0250*/ DMUL R28, R28, R28 ; /* 0x0000001c1c1c7228 */
/* 0x002fe20000000000 */
/*0260*/ IMAD.MOV.U32 R30, RZ, RZ, RZ ; /* 0x000000ffff1e7224 */
/* 0x001fc600078e00ff */
/*0270*/ DADD R32, R18, -R22 ; /* 0x0000000012207229 */
/* 0x020e240000000816 */
/*0280*/ SEL R30, R30, R4, P0 ; /* 0x000000041e1e7207 */
/* 0x004fc80000000000 */
/*0290*/ DFMA R28, R32, R32, R28 ; /* 0x00000020201c722b */
/* 0x001064000000001c */
/*02a0*/ IMAD.MOV.U32 R33, RZ, RZ, RZ ; /* 0x000000ffff217224 */
/* 0x001fe400078e00ff */
/*02b0*/ IMAD.MOV.U32 R32, RZ, RZ, R31 ; /* 0x000000ffff207224 */
/* 0x000fca00078e001f */
/*02c0*/ FSEL R33, R33, R32, P0 ; /* 0x0000002021217208 */
/* 0x000fe20000000000 */
/*02d0*/ IMAD.MOV.U32 R4, RZ, RZ, R29 ; /* 0x000000ffff047224 */
/* 0x002fe200078e001d */
/*02e0*/ @P1 LOP3.LUT R33, R32, 0x80000, RZ, 0xfc, !PT ; /* 0x0008000020211812 */
/* 0x000fe200078efcff */
/*02f0*/ IMAD.MOV.U32 R32, RZ, RZ, R30 ; /* 0x000000ffff207224 */
/* 0x000fc600078e001e */
/*0300*/ MOV R31, R33 ; /* 0x00000021001f7202 */
/* 0x000fe20000000f00 */
/*0310*/ IMAD.MOV.U32 R33, RZ, RZ, R28 ; /* 0x000000ffff217224 */
/* 0x000fca00078e001c */
/*0320*/ DSETP.MAX.AND P0, P1, R30, R28, PT ; /* 0x0000001c1e00722a */
/* 0x000e08000390f000 */
/*0330*/ DADD R28, R14, -R24 ; /* 0x000000000e1c7229 */
/* 0x000e640000000818 */
/*0340*/ SEL R30, R32, R33, P0 ; /* 0x00000021201e7207 */
/* 0x001fe40000000000 */
/*0350*/ DADD R32, R18, -R26 ; /* 0x0000000012207229 */
/* 0x000fe2000000081a */
/*0360*/ FSEL R31, R31, R4, P0 ; /* 0x000000041f1f7208 */
/* 0x000fe40000000000 */
/*0370*/ IMAD.MOV.U32 R34, RZ, RZ, R30 ; /* 0x000000ffff227224 */
/* 0x000fe200078e001e */
/*0380*/ DMUL R28, R28, R28 ; /* 0x0000001c1c1c7228 */
/* 0x002e240000000000 */
/*0390*/ @P1 LOP3.LUT R31, R4, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000041f1812 */
/* 0x000fc800078efcff */
/*03a0*/ DFMA R28, R32, R32, R28 ; /* 0x00000020201c722b */
/* 0x001062000000001c */
/*03b0*/ IMAD.MOV.U32 R4, RZ, RZ, R31 ; /* 0x000000ffff047224 */
/* 0x000fe200078e001f */
/*03c0*/ LDG.E.64 R32, [R10.64+0x20] ; /* 0x000020060a207981 */
/* 0x001ea8000c1e1b00 */
/*03d0*/ DSETP.MAX.AND P0, P1, R30, R28, PT ; /* 0x0000001c1e00722a */
/* 0x002064000390f000 */
/*03e0*/ LDG.E.64 R30, [R8.64+0x20] ; /* 0x00002006081e7981 */
/* 0x001ee4000c1e1b00 */
/*03f0*/ MOV R36, R28 ; /* 0x0000001c00247202 */
/* 0x000fc40000000f00 */
/*0400*/ FSEL R4, R4, R29, P0 ; /* 0x0000001d04047208 */
/* 0x002fe40000000000 */
/*0410*/ SEL R28, R34, R36, P0 ; /* 0x00000024221c7207 */
/* 0x000fcc0000000000 */
/*0420*/ @P1 LOP3.LUT R4, R29, 0x80000, RZ, 0xfc, !PT ; /* 0x000800001d041812 */
/* 0x000fca00078efcff */
/*0430*/ IMAD.MOV.U32 R29, RZ, RZ, R4 ; /* 0x000000ffff1d7224 */
/* 0x000fe400078e0004 */
/*0440*/ IMAD.IADD R2, R5, 0x1, R2 ; /* 0x0000000105027824 */
/* 0x000fe200078e0202 */
/*0450*/ BSSY B1, 0xa70 ; /* 0x0000061000017945 */
/* 0x000fe20003800000 */
/*0460*/ DADD R18, R18, -R32 ; /* 0x0000000012127229 */
/* 0x004fc80000000820 */
/*0470*/ DADD R14, R14, -R30 ; /* 0x000000000e0e7229 */
/* 0x008e0c000000081e */
/*0480*/ DMUL R14, R14, R14 ; /* 0x0000000e0e0e7228 */
/* 0x001e0c0000000000 */
/*0490*/ DFMA R14, R18, R18, R14 ; /* 0x00000012120e722b */
/* 0x001064000000000e */
/*04a0*/ IMAD.MOV.U32 R18, RZ, RZ, R28 ; /* 0x000000ffff127224 */
/* 0x001fc800078e001c */
/*04b0*/ DSETP.MAX.AND P0, P1, R28, R14, PT ; /* 0x0000000e1c00722a */
/* 0x002e08000390f000 */
/*04c0*/ IMAD.MOV.U32 R19, RZ, RZ, R14 ; /* 0x000000ffff137224 */
/* 0x000fe400078e000e */
/*04d0*/ IMAD.MOV.U32 R4, RZ, RZ, R15 ; /* 0x000000ffff047224 */
/* 0x000fc600078e000f */
/*04e0*/ SEL R14, R18, R19, P0 ; /* 0x00000013120e7207 */
/* 0x001fe20000000000 */
/*04f0*/ DADD R18, R12, -R20 ; /* 0x000000000c127229 */
/* 0x000e220000000814 */
/*0500*/ FSEL R15, R29, R4, P0 ; /* 0x000000041d0f7208 */
/* 0x000fc60000000000 */
/*0510*/ DADD R28, R16, -R22 ; /* 0x00000000101c7229 */
/* 0x000fc80000000816 */
/*0520*/ DMUL R18, R18, R18 ; /* 0x0000001212127228 */
/* 0x001e220000000000 */
/*0530*/ @P1 LOP3.LUT R15, R4, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000040f1812 */
/* 0x000fca00078efcff */
/*0540*/ DFMA R28, R28, R28, R18 ; /* 0x0000001c1c1c722b */
/* 0x001e0c0000000012 */
/*0550*/ DSETP.MAX.AND P0, P1, R14, R28, PT ; /* 0x0000001c0e00722a */
/* 0x001e22000390f000 */
/*0560*/ MOV R18, R14 ; /* 0x0000000e00127202 */
/* 0x000fe20000000f00 */
/*0570*/ IMAD.MOV.U32 R4, RZ, RZ, R15 ; /* 0x000000ffff047224 */
/* 0x000fe400078e000f */
/*0580*/ IMAD.MOV.U32 R19, RZ, RZ, R28 ; /* 0x000000ffff137224 */
/* 0x000fe200078e001c */
/*0590*/ DADD R14, R12, -R24 ; /* 0x000000000c0e7229 */
/* 0x000e480000000818 */
/*05a0*/ SEL R18, R18, R19, P0 ; /* 0x0000001312127207 */
/* 0x001fe40000000000 */
/*05b0*/ FSEL R19, R4, R29, P0 ; /* 0x0000001d04137208 */
/* 0x000fe20000000000 */
/*05c0*/ DMUL R14, R14, R14 ; /* 0x0000000e0e0e7228 */
/* 0x002fe40000000000 */
/*05d0*/ @P1 LOP3.LUT R19, R29, 0x80000, RZ, 0xfc, !PT ; /* 0x000800001d131812 */
/* 0x000fe400078efcff */
/*05e0*/ DADD R28, R16, -R26 ; /* 0x00000000101c7229 */
/* 0x000e0c000000081a */
/*05f0*/ DFMA R14, R28, R28, R14 ; /* 0x0000001c1c0e722b */
/* 0x001e08000000000e */
/*0600*/ DADD R12, R12, -R30 ; /* 0x000000000c0c7229 */
/* 0x000e48000000081e */
/*0610*/ DSETP.MAX.AND P0, P1, R18, R14, PT ; /* 0x0000000e1200722a */
/* 0x001e08000390f000 */
/*0620*/ DADD R16, R16, -R32 ; /* 0x0000000010107229 */
/* 0x000fc80000000820 */
/*0630*/ DMUL R12, R12, R12 ; /* 0x0000000c0c0c7228 */
/* 0x002e620000000000 */
/*0640*/ FSEL R19, R19, R15, P0 ; /* 0x0000000f13137208 */
/* 0x001fca0000000000 */
/*0650*/ DFMA R12, R16, R16, R12 ; /* 0x00000010100c722b */
/* 0x002062000000000c */
/*0660*/ @P1 LOP3.LUT R19, R15, 0x80000, RZ, 0xfc, !PT ; /* 0x000800000f131812 */
/* 0x000fe200078efcff */
/*0670*/ IMAD.MOV.U32 R17, RZ, RZ, R14 ; /* 0x000000ffff117224 */
/* 0x001fe400078e000e */
/*0680*/ IMAD.MOV.U32 R14, RZ, RZ, R18 ; /* 0x000000ffff0e7224 */
/* 0x000fe400078e0012 */
/*0690*/ IMAD.MOV.U32 R15, RZ, RZ, R19 ; /* 0x000000ffff0f7224 */
/* 0x000fc600078e0013 */
/*06a0*/ SEL R14, R14, R17, P0 ; /* 0x000000110e0e7207 */
/* 0x000fe20000000000 */
/*06b0*/ DADD R16, R20, -R24 ; /* 0x0000000014107229 */
/* 0x000e0a0000000818 */
/*06c0*/ DSETP.MAX.AND P0, P1, R14, R12, PT ; /* 0x0000000c0e00722a */
/* 0x002e62000390f000 */
/*06d0*/ MOV R4, R15 ; /* 0x0000000f00047202 */
/* 0x000fc60000000f00 */
/*06e0*/ DMUL R16, R16, R16 ; /* 0x0000001010107228 */
/* 0x001fc80000000000 */
/*06f0*/ DADD R18, R22, -R26 ; /* 0x0000000016127229 */
/* 0x000e22000000081a */
/*0700*/ FSEL R29, R4, R13, P0 ; /* 0x0000000d041d7208 */
/* 0x002fca0000000000 */
/*0710*/ DFMA R16, R18, R18, R16 ; /* 0x000000121210722b */
/* 0x0010620000000010 */
/*0720*/ @P1 LOP3.LUT R29, R13, 0x80000, RZ, 0xfc, !PT ; /* 0x000800000d1d1812 */
/* 0x000fe200078efcff */
/*0730*/ IMAD.MOV.U32 R19, RZ, RZ, R12 ; /* 0x000000ffff137224 */
/* 0x001fc800078e000c */
/*0740*/ IMAD.MOV.U32 R13, RZ, RZ, R29 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e001d */
/*0750*/ SEL R12, R14, R19, P0 ; /* 0x000000130e0c7207 */
/* 0x000fcc0000000000 */
/*0760*/ DSETP.MAX.AND P0, P1, R12, R16, PT ; /* 0x000000100c00722a */
/* 0x002e22000390f000 */
/*0770*/ IMAD.MOV.U32 R4, RZ, RZ, R13 ; /* 0x000000ffff047224 */
/* 0x000fc600078e000d */
/*0780*/ DADD R20, R20, -R30 ; /* 0x0000000014147229 */
/* 0x000e62000000081e */
/*0790*/ IMAD.MOV.U32 R15, RZ, RZ, R16 ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e0010 */
/*07a0*/ FSEL R19, R4, R17, P0 ; /* 0x0000001104137208 */
/* 0x001fe40000000000 */
/*07b0*/ DADD R22, R22, -R32 ; /* 0x0000000016167229 */
/* 0x000fc80000000820 */
/*07c0*/ DMUL R20, R20, R20 ; /* 0x0000001414147228 */
/* 0x002e240000000000 */
/*07d0*/ @P1 LOP3.LUT R19, R17, 0x80000, RZ, 0xfc, !PT ; /* 0x0008000011131812 */
/* 0x000fe400078efcff */
/*07e0*/ SEL R12, R12, R15, P0 ; /* 0x0000000f0c0c7207 */
/* 0x000fe40000000000 */
/*07f0*/ DFMA R22, R22, R22, R20 ; /* 0x000000161616722b */
/* 0x001e220000000014 */
/*0800*/ IMAD.MOV.U32 R13, RZ, RZ, R19 ; /* 0x000000ffff0d7224 */
/* 0x000fcc00078e0013 */
/*0810*/ DSETP.MAX.AND P0, P1, R12, R22, PT ; /* 0x000000160c00722a */
/* 0x001e22000390f000 */
/*0820*/ MOV R4, R13 ; /* 0x0000000d00047202 */
/* 0x000fe40000000f00 */
/*0830*/ IMAD.MOV.U32 R17, RZ, RZ, R23 ; /* 0x000000ffff117224 */
/* 0x000fe200078e0017 */
/*0840*/ DADD R24, R24, -R30 ; /* 0x0000000018187229 */
/* 0x000e48000000081e */
/*0850*/ DADD R14, R26, -R32 ; /* 0x000000001a0e7229 */
/* 0x000fe20000000820 */
/*0860*/ FSEL R19, R4, R17, P0 ; /* 0x0000001104137208 */
/* 0x001fc60000000000 */
/*0870*/ DMUL R24, R24, R24 ; /* 0x0000001818187228 */
/* 0x002e240000000000 */
/*0880*/ @P1 LOP3.LUT R19, R17, 0x80000, RZ, 0xfc, !PT ; /* 0x0008000011131812 */
/* 0x000fe400078efcff */
/*0890*/ SEL R12, R12, R22, P0 ; /* 0x000000160c0c7207 */
/* 0x000fe40000000000 */
/*08a0*/ DFMA R14, R14, R14, R24 ; /* 0x0000000e0e0e722b */
/* 0x001e220000000018 */
/*08b0*/ IMAD.MOV.U32 R13, RZ, RZ, R19 ; /* 0x000000ffff0d7224 */
/* 0x000fcc00078e0013 */
/*08c0*/ DSETP.MAX.AND P0, P1, R12, R14, PT ; /* 0x0000000e0c00722a */
/* 0x001e22000390f000 */
/*08d0*/ IMAD.MOV.U32 R4, RZ, RZ, R13 ; /* 0x000000ffff047224 */
/* 0x000fe400078e000d */
/*08e0*/ IMAD.MOV.U32 R17, RZ, RZ, R15 ; /* 0x000000ffff117224 */
/* 0x000fca00078e000f */
/*08f0*/ FSEL R19, R4, R17, P0 ; /* 0x0000001104137208 */
/* 0x001fcc0000000000 */
/*0900*/ @P1 LOP3.LUT R19, R17, 0x80000, RZ, 0xfc, !PT ; /* 0x0008000011131812 */
/* 0x000fca00078efcff */
/*0910*/ IMAD.MOV.U32 R15, RZ, RZ, R19 ; /* 0x000000ffff0f7224 */
/* 0x000fc800078e0013 */
/*0920*/ MUFU.RSQ64H R13, R15 ; /* 0x0000000f000d7308 */
/* 0x000e220000001c00 */
/*0930*/ SEL R14, R12, R14, P0 ; /* 0x0000000e0c0e7207 */
/* 0x000fe40000000000 */
/*0940*/ IADD3 R12, R15, -0x3500000, RZ ; /* 0xfcb000000f0c7810 */
/* 0x000fe20007ffe0ff */
/*0950*/ IMAD.MOV.U32 R18, RZ, RZ, 0x0 ; /* 0x00000000ff127424 */
/* 0x000fe200078e00ff */
/*0960*/ MOV R19, 0x3fd80000 ; /* 0x3fd8000000137802 */
/* 0x000fc80000000f00 */
/*0970*/ DMUL R16, R12, R12 ; /* 0x0000000c0c107228 */
/* 0x001e0c0000000000 */
/*0980*/ DFMA R16, R14, -R16, 1 ; /* 0x3ff000000e10742b */
/* 0x001e0c0000000810 */
/*0990*/ DFMA R18, R16, R18, 0.5 ; /* 0x3fe000001012742b */
/* 0x001fc80000000012 */
/*09a0*/ DMUL R16, R12, R16 ; /* 0x000000100c107228 */
/* 0x000e220000000000 */
/*09b0*/ ISETP.GE.U32.AND P1, PT, R12, 0x7ca00000, PT ; /* 0x7ca000000c00780c */
/* 0x000fca0003f26070 */
/*09c0*/ DFMA R24, R18, R16, R12 ; /* 0x000000101218722b */
/* 0x001e0c000000000c */
/*09d0*/ DMUL R16, R14, R24 ; /* 0x000000180e107228 */
/* 0x001e080000000000 */
/*09e0*/ IADD3 R23, R25, -0x100000, RZ ; /* 0xfff0000019177810 */
/* 0x000fe20007ffe0ff */
/*09f0*/ IMAD.MOV.U32 R22, RZ, RZ, R24 ; /* 0x000000ffff167224 */
/* 0x000fe200078e0018 */
/*0a00*/ DFMA R18, R16, -R16, R14 ; /* 0x800000101012722b */
/* 0x001e22000000000e */
/*0a10*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fca0003f06070 */
/*0a20*/ DFMA R20, R18, R22, R16 ; /* 0x000000161214722b */
/* 0x0010620000000010 */
/*0a30*/ @!P1 BRA 0xa60 ; /* 0x0000002000009947 */
/* 0x000fea0003800000 */
/*0a40*/ MOV R4, 0xa60 ; /* 0x00000a6000047802 */
/* 0x000fca0000000f00 */
/*0a50*/ CALL.REL.NOINC 0xc70 ; /* 0x0000021000007944 */
/* 0x003fea0003c00000 */
/*0a60*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0a70*/ IADD3 R10, P1, R10, R35.reuse, RZ ; /* 0x000000230a0a7210 */
/* 0x080fe20007f3e0ff */
/*0a80*/ DADD R6, R20, R6 ; /* 0x0000000014067229 */
/* 0x0022a20000000006 */
/*0a90*/ IADD3 R8, P2, R8, R35, RZ ; /* 0x0000002308087210 */
/* 0x000fc60007f5e0ff */
/*0aa0*/ IMAD.X R11, R11, 0x1, R37.reuse, P1 ; /* 0x000000010b0b7824 */
/* 0x100fe400008e0625 */
/*0ab0*/ IMAD.X R9, R9, 0x1, R37, P2 ; /* 0x0000000109097824 */
/* 0x000fe200010e0625 */
/*0ac0*/ @!P0 BRA 0x160 ; /* 0xfffff69000008947 */
/* 0x006fea000383ffff */
/*0ad0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0ae0*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0af0*/ STS.64 [R3.X8], R6 ; /* 0x0000000603007388 */
/* 0x000fe20000008a00 */
/*0b00*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fc60008011604 */
/*0b10*/ BAR.SYNC 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000000000 */
/*0b20*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe4000bf25270 */
/*0b30*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fd60003f05270 */
/*0b40*/ @!P1 BRA 0xc10 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*0b50*/ SHF.L.U32 R2, R3, 0x3, RZ ; /* 0x0000000303027819 */
/* 0x000fe200000006ff */
/*0b60*/ IMAD.U32 R8, RZ, RZ, UR4 ; /* 0x00000004ff087e24 */
/* 0x000fca000f8e00ff */
/*0b70*/ ISETP.GE.U32.AND P1, PT, R3, R8, PT ; /* 0x000000080300720c */
/* 0x000fda0003f26070 */
/*0b80*/ @!P1 IMAD R6, R8, 0x8, R2 ; /* 0x0000000808069824 */
/* 0x000fe200078e0202 */
/*0b90*/ @!P1 LDS.64 R4, [R3.X8] ; /* 0x0000000003049984 */
/* 0x000fe20000008a00 */
/*0ba0*/ SHF.R.U32.HI R8, RZ, 0x1, R8 ; /* 0x00000001ff087819 */
/* 0x000fc80000011608 */
/*0bb0*/ @!P1 LDS.64 R6, [R6] ; /* 0x0000000006069984 */
/* 0x000e640000000a00 */
/*0bc0*/ @!P1 DADD R4, R4, R6 ; /* 0x0000000004049229 */
/* 0x002e4e0000000006 */
/*0bd0*/ @!P1 STS.64 [R3.X8], R4 ; /* 0x0000000403009388 */
/* 0x002fe80000008a00 */
/*0be0*/ BAR.SYNC 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000000000 */
/*0bf0*/ ISETP.NE.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fda0003f25270 */
/*0c00*/ @P1 BRA 0xb70 ; /* 0xffffff6000001947 */
/* 0x000fea000383ffff */
/*0c10*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0c20*/ LDS.64 R2, [RZ] ; /* 0x00000000ff027984 */
/* 0x000e620000000a00 */
/*0c30*/ IMAD.MOV.U32 R5, RZ, RZ, 0x8 ; /* 0x00000008ff057424 */
/* 0x000fc800078e00ff */
/*0c40*/ IMAD.WIDE.U32 R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fca00078e0005 */
/*0c50*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x002fe2000c101b06 */
/*0c60*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c70*/ ISETP.GE.U32.AND P1, PT, R12, -0x3400000, PT ; /* 0xfcc000000c00780c */
/* 0x000fe20003f26070 */
/*0c80*/ BSSY B2, 0xf10 ; /* 0x0000028000027945 */
/* 0x000fe20003800000 */
/*0c90*/ MOV R12, R14 ; /* 0x0000000e000c7202 */
/* 0x000fe20000000f00 */
/*0ca0*/ IMAD.MOV.U32 R13, RZ, RZ, R15 ; /* 0x000000ffff0d7224 */
/* 0x000fe400078e000f */
/*0cb0*/ IMAD.MOV.U32 R22, RZ, RZ, R24 ; /* 0x000000ffff167224 */
/* 0x000fe400078e0018 */
/*0cc0*/ IMAD.MOV.U32 R14, RZ, RZ, R18 ; /* 0x000000ffff0e7224 */
/* 0x000fe400078e0012 */
/*0cd0*/ IMAD.MOV.U32 R15, RZ, RZ, R19 ; /* 0x000000ffff0f7224 */
/* 0x000fc800078e0013 */
/*0ce0*/ @!P1 BRA 0xd70 ; /* 0x0000008000009947 */
/* 0x000fea0003800000 */
/*0cf0*/ DFMA.RM R14, R14, R22, R16 ; /* 0x000000160e0e722b */
/* 0x000e140000004010 */
/*0d00*/ IADD3 R16, P1, R14, 0x1, RZ ; /* 0x000000010e107810 */
/* 0x001fca0007f3e0ff */
/*0d10*/ IMAD.X R17, RZ, RZ, R15, P1 ; /* 0x000000ffff117224 */
/* 0x000fcc00008e060f */
/*0d20*/ DFMA.RP R12, -R14, R16, R12 ; /* 0x000000100e0c722b */
/* 0x000e0c000000810c */
/*0d30*/ DSETP.GT.AND P1, PT, R12, RZ, PT ; /* 0x000000ff0c00722a */
/* 0x001e0c0003f24000 */
/*0d40*/ FSEL R14, R16, R14, P1 ; /* 0x0000000e100e7208 */
/* 0x001fe40000800000 */
/*0d50*/ FSEL R15, R17, R15, P1 ; /* 0x0000000f110f7208 */
/* 0x000fe20000800000 */
/*0d60*/ BRA 0xf00 ; /* 0x0000019000007947 */
/* 0x000fea0003800000 */
/*0d70*/ DSETP.NE.AND P1, PT, R12, RZ, PT ; /* 0x000000ff0c00722a */
/* 0x000e1c0003f25000 */
/*0d80*/ @!P1 BRA 0xef0 ; /* 0x0000016000009947 */
/* 0x001fea0003800000 */
/*0d90*/ ISETP.GE.AND P1, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x000fda0003f26270 */
/*0da0*/ @!P1 MOV R14, 0x0 ; /* 0x00000000000e9802 */
/* 0x000fe20000000f00 */
/*0db0*/ @!P1 IMAD.MOV.U32 R15, RZ, RZ, -0x80000 ; /* 0xfff80000ff0f9424 */
/* 0x000fe200078e00ff */
/*0dc0*/ @!P1 BRA 0xf00 ; /* 0x0000013000009947 */
/* 0x000fea0003800000 */
/*0dd0*/ ISETP.GT.AND P1, PT, R13, 0x7fefffff, PT ; /* 0x7fefffff0d00780c */
/* 0x000fda0003f24270 */
/*0de0*/ @P1 BRA 0xef0 ; /* 0x0000010000001947 */
/* 0x000fea0003800000 */
/*0df0*/ DMUL R12, R12, 8.11296384146066816958e+31 ; /* 0x469000000c0c7828 */
/* 0x000e220000000000 */
/*0e00*/ IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e7224 */
/* 0x000fe400078e00ff */
/*0e10*/ IMAD.MOV.U32 R18, RZ, RZ, 0x0 ; /* 0x00000000ff127424 */
/* 0x000fe400078e00ff */
/*0e20*/ IMAD.MOV.U32 R19, RZ, RZ, 0x3fd80000 ; /* 0x3fd80000ff137424 */
/* 0x000fe200078e00ff */
/*0e30*/ MUFU.RSQ64H R15, R13 ; /* 0x0000000d000f7308 */
/* 0x001e240000001c00 */
/*0e40*/ DMUL R16, R14, R14 ; /* 0x0000000e0e107228 */
/* 0x001e0c0000000000 */
/*0e50*/ DFMA R16, R12, -R16, 1 ; /* 0x3ff000000c10742b */
/* 0x001e0c0000000810 */
/*0e60*/ DFMA R18, R16, R18, 0.5 ; /* 0x3fe000001012742b */
/* 0x001fc80000000012 */
/*0e70*/ DMUL R16, R14, R16 ; /* 0x000000100e107228 */
/* 0x000e0c0000000000 */
/*0e80*/ DFMA R16, R18, R16, R14 ; /* 0x000000101210722b */
/* 0x001e0c000000000e */
/*0e90*/ DMUL R14, R12, R16 ; /* 0x000000100c0e7228 */
/* 0x0010480000000000 */
/*0ea0*/ IADD3 R17, R17, -0x100000, RZ ; /* 0xfff0000011117810 */
/* 0x001fe40007ffe0ff */
/*0eb0*/ DFMA R18, R14, -R14, R12 ; /* 0x8000000e0e12722b */
/* 0x002e0c000000000c */
/*0ec0*/ DFMA R14, R16, R18, R14 ; /* 0x00000012100e722b */
/* 0x001e14000000000e */
/*0ed0*/ IADD3 R15, R15, -0x3500000, RZ ; /* 0xfcb000000f0f7810 */
/* 0x001fe20007ffe0ff */
/*0ee0*/ BRA 0xf00 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0ef0*/ DADD R14, R12, R12 ; /* 0x000000000c0e7229 */
/* 0x00004c000000000c */
/*0f00*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0f10*/ IMAD.MOV.U32 R12, RZ, RZ, R4 ; /* 0x000000ffff0c7224 */
/* 0x001fe200078e0004 */
/*0f20*/ MOV R20, R14 ; /* 0x0000000e00147202 */
/* 0x002fe20000000f00 */
/*0f30*/ IMAD.MOV.U32 R13, RZ, RZ, 0x0 ; /* 0x00000000ff0d7424 */
/* 0x000fe400078e00ff */
/*0f40*/ IMAD.MOV.U32 R21, RZ, RZ, R15 ; /* 0x000000ffff157224 */
/* 0x000fe400078e000f */
/*0f50*/ RET.REL.NODEC R12 0x0 ; /* 0xfffff0a00c007950 */
/* 0x000fea0003c3ffff */
/*0f60*/ BRA 0xf60; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0f70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fe0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ff0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
//
// Average extreme spread of five-shot group assuming impact coordinates follow standard normal distribution
//
// Building:
// nvcc -std=c++11 es_cuda.cu -o es_cuda -lcurand
//
// Running:
// for run in {1..10}; do ./es_cuda 15 | tee -a es_cuda.csv; done
//
#include <string>
#include <vector>
#include <numeric>
#include <stdexcept>
#include <typeinfo>
#include <cooperative_groups.h>
#include <iostream>
#include <iomanip>
#include <stdexcept>
#include <cuda_runtime.h>
#include <math.h>
#include <chrono>
#include <curand.h>
namespace cg = cooperative_groups;
using std::string;
using std::vector;
// First level of reduction
__device__ double reduce_sum(double in, cg::thread_block cta)
{
extern __shared__ double sdata[];
// Write to shared memory
unsigned ltid = threadIdx.x;
sdata[ltid] = in;
cg::sync(cta);
// Do reduction in shared memory
for (unsigned s = blockDim.x / 2 ; s > 0 ; s >>= 1) {
if (ltid < s) {
sdata[ltid] += sdata[ltid + s];
}
cg::sync(cta);
}
return sdata[0];
}
// Estimator kernel
__global__ void computeValue(double* const results,
const double* const points,
const unsigned numGroups)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
// Determine thread ID
unsigned bid = blockIdx.x;
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned step = gridDim.x * blockDim.x;
// Shift the input/output pointers
const double* pointx = points + tid;
const double* pointy = pointx + 5 * numGroups;
// Sum up extreme spread of all groups
double sum = 0;
for (unsigned i = tid ; i < numGroups; i += step, pointx += step * 5, pointy += step * 5) {
// Pairwise distances
double dx[10], dy[10];
// Unroll nested comparison loops
dx[0] = pointx[0] - pointx[1]; dy[0] = pointy[0] - pointy[1];
dx[1] = pointx[0] - pointx[2]; dy[1] = pointy[0] - pointy[2];
dx[2] = pointx[0] - pointx[3]; dy[2] = pointy[0] - pointy[3];
dx[3] = pointx[0] - pointx[4]; dy[3] = pointy[0] - pointy[4];
dx[4] = pointx[1] - pointx[2]; dy[4] = pointy[1] - pointy[2];
dx[5] = pointx[1] - pointx[3]; dy[5] = pointy[1] - pointy[3];
dx[6] = pointx[1] - pointx[4]; dy[6] = pointy[1] - pointy[4];
dx[7] = pointx[2] - pointx[3]; dy[7] = pointy[2] - pointy[3];
dx[8] = pointx[2] - pointx[4]; dy[8] = pointy[2] - pointy[4];
dx[9] = pointx[3] - pointx[4]; dy[9] = pointy[3] - pointy[4];
double max_d2 = 0;
for (unsigned j = 0; j < 10; j++) {
auto candidate_d2 = dx[j] * dx[j] + dy[j] * dy[j];
max_d2 = max(max_d2, candidate_d2);
}
double es = sqrt(max_d2);
sum += es;
}
// Reduce within the block
sum = reduce_sum(sum, cta);
// Store the result
if (threadIdx.x == 0) {
results[bid] = sum;
}
}
double es_cuda(unsigned power_of_4, unsigned seed)
{
// Get device properties
struct cudaDeviceProp deviceProperties;
cudaError_t cudaResult = cudaGetDeviceProperties(&deviceProperties, 0);
if (cudaResult != cudaSuccess) {
string msg("Could not get device properties: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Check precision is valid
if (deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3)) {
throw std::runtime_error("Device does not have double precision support");
}
// Check requested size is valid
const unsigned threadBlockSize = 128;
if (threadBlockSize > (deviceProperties.maxThreadsPerBlock)) {
throw std::runtime_error("Thread block size is greater than maxThreadsPerBlock");
}
dim3 block;
block.x = threadBlockSize;
// Attach to GPU
cudaResult = cudaSetDevice(0);
if (cudaResult != cudaSuccess) {
string msg("Could not set CUDA device: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Aim to launch around ten or more times as many blocks as there
// are multiprocessors on the target device.
dim3 grid;
const unsigned numGroups = 1 << (2 * power_of_4);
grid.x = numGroups / threadBlockSize;
while (grid.x > 20 * deviceProperties.multiProcessorCount) {
grid.x >>= 1;
}
// Get computeValue function properties and check the maximum block size
struct cudaFuncAttributes funcAttributes;
cudaResult = cudaFuncGetAttributes(&funcAttributes, computeValue);
if (cudaResult != cudaSuccess) {
string msg("Could not get function attributes: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
if (block.x > (unsigned)funcAttributes.maxThreadsPerBlock) {
throw std::runtime_error("Block X dimension is too large for computeValue kernel");
}
// Check the dimensions are valid
if (block.x > (unsigned)deviceProperties.maxThreadsDim[0]) {
throw std::runtime_error("Block X dimension is too large for device");
}
if (grid.x > (unsigned)deviceProperties.maxGridSize[0]) {
throw std::runtime_error("Grid X dimension is too large for device");
}
// Allocate memory for points
// Each simulation has ten random numbers to give five pairs of X and Y coordinates
double* d_points = 0;
cudaResult = cudaMalloc((void **)&d_points, 10 * numGroups * sizeof(double));
if (cudaResult != cudaSuccess) {
string msg("Could not allocate memory on device for random numbers: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Allocate memory for result
// Each thread block will produce one result
double* d_results = 0;
cudaResult = cudaMalloc((void**)&d_results, grid.x * sizeof(double));
if (cudaResult != cudaSuccess) {
string msg("Could not allocate memory on device for partial results: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Generate random points
curandStatus_t curandResult;
curandGenerator_t prng;
curandResult = curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not create pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandSetPseudoRandomGeneratorSeed(prng, seed);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not set seed for pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandGenerateNormalDouble(prng, (double*)d_points, 10 * numGroups, 0, 1);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not generate pseudo-random numbers: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandDestroyGenerator(prng);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not destroy pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
// Calculate and average group size
computeValue<<<grid, block, block.x * sizeof(double)>>>(d_results, d_points, numGroups);
// Copy the results back to host
vector<double> results(grid.x);
cudaResult = cudaMemcpy(&results[0], d_results, grid.x * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaResult != cudaSuccess) {
string msg("Could not copy results to host: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Complete sum-reduction
double sum = std::accumulate(results.begin(), results.end(), double(0));
// Cleanup
if (d_points) {
cudaFree(d_points);
}
if (d_results) {
cudaFree(d_results);
}
// Divide sum by count to get the average
return sum / numGroups;
}
int main(int argc, char **argv)
{
unsigned power_of_4 = 12;
if (argc == 2) {
power_of_4 = atoi(argv[1]);
}
unsigned nt = 12;
if (power_of_4 > 12) {
nt <<= 2 * (power_of_4 - 12);
power_of_4 = 12;
}
try {
auto start_time = std::chrono::system_clock::now();
double avg = 0, min = 100, max = 0;
__uint128_t mcg128_state = time(NULL) | 1; // can be seeded to any odd number
for (unsigned j = 0; j < nt; j++) {
double r = es_cuda(power_of_4, (unsigned)(mcg128_state >> 64));
avg += r;
min = fmin(r, min);
max = fmax(r, max);
mcg128_state *= 0xda942042e4dd58b5ULL;
}
avg /= nt;
auto end_time = std::chrono::system_clock::now();
std::chrono::duration<double> seconds = end_time - start_time;
std::cout.precision(14);
std::cout << "code,threads,power_of_4,min,avg,max,time\n";
std::cout << "CUDA," << nt << "," << power_of_4 << "," << min << "," << avg << "," << max << "," << seconds.count() << "\n";
} catch (std::runtime_error &e) { // es_cuda() can throw runtime exceptions
fprintf(stderr, "runtime error (%s)\n", e.what());
return(EXIT_FAILURE);
}
return(EXIT_SUCCESS);
}
|
//
// Average extreme spread of five-shot group assuming impact coordinates follow standard normal distribution
//
// Building:
// nvcc -std=c++11 es_cuda.cu -o es_cuda -lcurand
//
// Running:
// for run in {1..10}; do ./es_cuda 15 | tee -a es_cuda.csv; done
//
#include <string>
#include <vector>
#include <numeric>
#include <stdexcept>
#include <typeinfo>
#include <hip/hip_cooperative_groups.h>
#include <iostream>
#include <iomanip>
#include <stdexcept>
#include <hip/hip_runtime.h>
#include <math.h>
#include <chrono>
#include <hiprand/hiprand.h>
namespace cg = cooperative_groups;
using std::string;
using std::vector;
// First level of reduction
__device__ double reduce_sum(double in, cg::thread_block cta)
{
extern __shared__ double sdata[];
// Write to shared memory
unsigned ltid = threadIdx.x;
sdata[ltid] = in;
cg::sync(cta);
// Do reduction in shared memory
for (unsigned s = blockDim.x / 2 ; s > 0 ; s >>= 1) {
if (ltid < s) {
sdata[ltid] += sdata[ltid + s];
}
cg::sync(cta);
}
return sdata[0];
}
// Estimator kernel
__global__ void computeValue(double* const results,
const double* const points,
const unsigned numGroups)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
// Determine thread ID
unsigned bid = blockIdx.x;
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned step = gridDim.x * blockDim.x;
// Shift the input/output pointers
const double* pointx = points + tid;
const double* pointy = pointx + 5 * numGroups;
// Sum up extreme spread of all groups
double sum = 0;
for (unsigned i = tid ; i < numGroups; i += step, pointx += step * 5, pointy += step * 5) {
// Pairwise distances
double dx[10], dy[10];
// Unroll nested comparison loops
dx[0] = pointx[0] - pointx[1]; dy[0] = pointy[0] - pointy[1];
dx[1] = pointx[0] - pointx[2]; dy[1] = pointy[0] - pointy[2];
dx[2] = pointx[0] - pointx[3]; dy[2] = pointy[0] - pointy[3];
dx[3] = pointx[0] - pointx[4]; dy[3] = pointy[0] - pointy[4];
dx[4] = pointx[1] - pointx[2]; dy[4] = pointy[1] - pointy[2];
dx[5] = pointx[1] - pointx[3]; dy[5] = pointy[1] - pointy[3];
dx[6] = pointx[1] - pointx[4]; dy[6] = pointy[1] - pointy[4];
dx[7] = pointx[2] - pointx[3]; dy[7] = pointy[2] - pointy[3];
dx[8] = pointx[2] - pointx[4]; dy[8] = pointy[2] - pointy[4];
dx[9] = pointx[3] - pointx[4]; dy[9] = pointy[3] - pointy[4];
double max_d2 = 0;
for (unsigned j = 0; j < 10; j++) {
auto candidate_d2 = dx[j] * dx[j] + dy[j] * dy[j];
max_d2 = max(max_d2, candidate_d2);
}
double es = sqrt(max_d2);
sum += es;
}
// Reduce within the block
sum = reduce_sum(sum, cta);
// Store the result
if (threadIdx.x == 0) {
results[bid] = sum;
}
}
double es_cuda(unsigned power_of_4, unsigned seed)
{
// Get device properties
struct hipDeviceProp_t deviceProperties;
hipError_t cudaResult = hipGetDeviceProperties(&deviceProperties, 0);
if (cudaResult != hipSuccess) {
string msg("Could not get device properties: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Check precision is valid
if (deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3)) {
throw std::runtime_error("Device does not have double precision support");
}
// Check requested size is valid
const unsigned threadBlockSize = 128;
if (threadBlockSize > (deviceProperties.maxThreadsPerBlock)) {
throw std::runtime_error("Thread block size is greater than maxThreadsPerBlock");
}
dim3 block;
block.x = threadBlockSize;
// Attach to GPU
cudaResult = hipSetDevice(0);
if (cudaResult != hipSuccess) {
string msg("Could not set CUDA device: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Aim to launch around ten or more times as many blocks as there
// are multiprocessors on the target device.
dim3 grid;
const unsigned numGroups = 1 << (2 * power_of_4);
grid.x = numGroups / threadBlockSize;
while (grid.x > 20 * deviceProperties.multiProcessorCount) {
grid.x >>= 1;
}
// Get computeValue function properties and check the maximum block size
struct hipFuncAttributes funcAttributes;
cudaResult = hipFuncGetAttributes(&funcAttributes, reinterpret_cast<const void*>(computeValue));
if (cudaResult != hipSuccess) {
string msg("Could not get function attributes: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
if (block.x > (unsigned)funcAttributes.maxThreadsPerBlock) {
throw std::runtime_error("Block X dimension is too large for computeValue kernel");
}
// Check the dimensions are valid
if (block.x > (unsigned)deviceProperties.maxThreadsDim[0]) {
throw std::runtime_error("Block X dimension is too large for device");
}
if (grid.x > (unsigned)deviceProperties.maxGridSize[0]) {
throw std::runtime_error("Grid X dimension is too large for device");
}
// Allocate memory for points
// Each simulation has ten random numbers to give five pairs of X and Y coordinates
double* d_points = 0;
cudaResult = hipMalloc((void **)&d_points, 10 * numGroups * sizeof(double));
if (cudaResult != hipSuccess) {
string msg("Could not allocate memory on device for random numbers: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Allocate memory for result
// Each thread block will produce one result
double* d_results = 0;
cudaResult = hipMalloc((void**)&d_results, grid.x * sizeof(double));
if (cudaResult != hipSuccess) {
string msg("Could not allocate memory on device for partial results: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Generate random points
hiprandStatus_t curandResult;
hiprandGenerator_t prng;
curandResult = hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT);
if (curandResult != HIPRAND_STATUS_SUCCESS) {
string msg("Could not create pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = hiprandSetPseudoRandomGeneratorSeed(prng, seed);
if (curandResult != HIPRAND_STATUS_SUCCESS) {
string msg("Could not set seed for pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = hiprandGenerateNormalDouble(prng, (double*)d_points, 10 * numGroups, 0, 1);
if (curandResult != HIPRAND_STATUS_SUCCESS) {
string msg("Could not generate pseudo-random numbers: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = hiprandDestroyGenerator(prng);
if (curandResult != HIPRAND_STATUS_SUCCESS) {
string msg("Could not destroy pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
// Calculate and average group size
computeValue<<<grid, block, block.x * sizeof(double)>>>(d_results, d_points, numGroups);
// Copy the results back to host
vector<double> results(grid.x);
cudaResult = hipMemcpy(&results[0], d_results, grid.x * sizeof(double), hipMemcpyDeviceToHost);
if (cudaResult != hipSuccess) {
string msg("Could not copy results to host: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Complete sum-reduction
double sum = std::accumulate(results.begin(), results.end(), double(0));
// Cleanup
if (d_points) {
hipFree(d_points);
}
if (d_results) {
hipFree(d_results);
}
// Divide sum by count to get the average
return sum / numGroups;
}
int main(int argc, char **argv)
{
unsigned power_of_4 = 12;
if (argc == 2) {
power_of_4 = atoi(argv[1]);
}
unsigned nt = 12;
if (power_of_4 > 12) {
nt <<= 2 * (power_of_4 - 12);
power_of_4 = 12;
}
try {
auto start_time = std::chrono::system_clock::now();
double avg = 0, min = 100, max = 0;
__uint128_t mcg128_state = time(NULL) | 1; // can be seeded to any odd number
for (unsigned j = 0; j < nt; j++) {
double r = es_cuda(power_of_4, (unsigned)(mcg128_state >> 64));
avg += r;
min = fmin(r, min);
max = fmax(r, max);
mcg128_state *= 0xda942042e4dd58b5ULL;
}
avg /= nt;
auto end_time = std::chrono::system_clock::now();
std::chrono::duration<double> seconds = end_time - start_time;
std::cout.precision(14);
std::cout << "code,threads,power_of_4,min,avg,max,time\n";
std::cout << "CUDA," << nt << "," << power_of_4 << "," << min << "," << avg << "," << max << "," << seconds.count() << "\n";
} catch (std::runtime_error &e) { // es_cuda() can throw runtime exceptions
fprintf(stderr, "runtime error (%s)\n", e.what());
return(EXIT_FAILURE);
}
return(EXIT_SUCCESS);
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
//
// Average extreme spread of five-shot group assuming impact coordinates follow standard normal distribution
//
// Building:
// nvcc -std=c++11 es_cuda.cu -o es_cuda -lcurand
//
// Running:
// for run in {1..10}; do ./es_cuda 15 | tee -a es_cuda.csv; done
//
#include <string>
#include <vector>
#include <numeric>
#include <stdexcept>
#include <typeinfo>
#include <hip/hip_cooperative_groups.h>
#include <iostream>
#include <iomanip>
#include <stdexcept>
#include <hip/hip_runtime.h>
#include <math.h>
#include <chrono>
#include <hiprand/hiprand.h>
namespace cg = cooperative_groups;
using std::string;
using std::vector;
// First level of reduction
__device__ double reduce_sum(double in, cg::thread_block cta)
{
extern __shared__ double sdata[];
// Write to shared memory
unsigned ltid = threadIdx.x;
sdata[ltid] = in;
cg::sync(cta);
// Do reduction in shared memory
for (unsigned s = blockDim.x / 2 ; s > 0 ; s >>= 1) {
if (ltid < s) {
sdata[ltid] += sdata[ltid + s];
}
cg::sync(cta);
}
return sdata[0];
}
// Estimator kernel
__global__ void computeValue(double* const results,
const double* const points,
const unsigned numGroups)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
// Determine thread ID
unsigned bid = blockIdx.x;
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned step = gridDim.x * blockDim.x;
// Shift the input/output pointers
const double* pointx = points + tid;
const double* pointy = pointx + 5 * numGroups;
// Sum up extreme spread of all groups
double sum = 0;
for (unsigned i = tid ; i < numGroups; i += step, pointx += step * 5, pointy += step * 5) {
// Pairwise distances
double dx[10], dy[10];
// Unroll nested comparison loops
dx[0] = pointx[0] - pointx[1]; dy[0] = pointy[0] - pointy[1];
dx[1] = pointx[0] - pointx[2]; dy[1] = pointy[0] - pointy[2];
dx[2] = pointx[0] - pointx[3]; dy[2] = pointy[0] - pointy[3];
dx[3] = pointx[0] - pointx[4]; dy[3] = pointy[0] - pointy[4];
dx[4] = pointx[1] - pointx[2]; dy[4] = pointy[1] - pointy[2];
dx[5] = pointx[1] - pointx[3]; dy[5] = pointy[1] - pointy[3];
dx[6] = pointx[1] - pointx[4]; dy[6] = pointy[1] - pointy[4];
dx[7] = pointx[2] - pointx[3]; dy[7] = pointy[2] - pointy[3];
dx[8] = pointx[2] - pointx[4]; dy[8] = pointy[2] - pointy[4];
dx[9] = pointx[3] - pointx[4]; dy[9] = pointy[3] - pointy[4];
double max_d2 = 0;
for (unsigned j = 0; j < 10; j++) {
auto candidate_d2 = dx[j] * dx[j] + dy[j] * dy[j];
max_d2 = max(max_d2, candidate_d2);
}
double es = sqrt(max_d2);
sum += es;
}
// Reduce within the block
sum = reduce_sum(sum, cta);
// Store the result
if (threadIdx.x == 0) {
results[bid] = sum;
}
}
double es_cuda(unsigned power_of_4, unsigned seed)
{
// Get device properties
struct hipDeviceProp_t deviceProperties;
hipError_t cudaResult = hipGetDeviceProperties(&deviceProperties, 0);
if (cudaResult != hipSuccess) {
string msg("Could not get device properties: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Check precision is valid
if (deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3)) {
throw std::runtime_error("Device does not have double precision support");
}
// Check requested size is valid
const unsigned threadBlockSize = 128;
if (threadBlockSize > (deviceProperties.maxThreadsPerBlock)) {
throw std::runtime_error("Thread block size is greater than maxThreadsPerBlock");
}
dim3 block;
block.x = threadBlockSize;
// Attach to GPU
cudaResult = hipSetDevice(0);
if (cudaResult != hipSuccess) {
string msg("Could not set CUDA device: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Aim to launch around ten or more times as many blocks as there
// are multiprocessors on the target device.
dim3 grid;
const unsigned numGroups = 1 << (2 * power_of_4);
grid.x = numGroups / threadBlockSize;
while (grid.x > 20 * deviceProperties.multiProcessorCount) {
grid.x >>= 1;
}
// Get computeValue function properties and check the maximum block size
struct hipFuncAttributes funcAttributes;
cudaResult = hipFuncGetAttributes(&funcAttributes, reinterpret_cast<const void*>(computeValue));
if (cudaResult != hipSuccess) {
string msg("Could not get function attributes: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
if (block.x > (unsigned)funcAttributes.maxThreadsPerBlock) {
throw std::runtime_error("Block X dimension is too large for computeValue kernel");
}
// Check the dimensions are valid
if (block.x > (unsigned)deviceProperties.maxThreadsDim[0]) {
throw std::runtime_error("Block X dimension is too large for device");
}
if (grid.x > (unsigned)deviceProperties.maxGridSize[0]) {
throw std::runtime_error("Grid X dimension is too large for device");
}
// Allocate memory for points
// Each simulation has ten random numbers to give five pairs of X and Y coordinates
double* d_points = 0;
cudaResult = hipMalloc((void **)&d_points, 10 * numGroups * sizeof(double));
if (cudaResult != hipSuccess) {
string msg("Could not allocate memory on device for random numbers: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Allocate memory for result
// Each thread block will produce one result
double* d_results = 0;
cudaResult = hipMalloc((void**)&d_results, grid.x * sizeof(double));
if (cudaResult != hipSuccess) {
string msg("Could not allocate memory on device for partial results: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Generate random points
hiprandStatus_t curandResult;
hiprandGenerator_t prng;
curandResult = hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT);
if (curandResult != HIPRAND_STATUS_SUCCESS) {
string msg("Could not create pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = hiprandSetPseudoRandomGeneratorSeed(prng, seed);
if (curandResult != HIPRAND_STATUS_SUCCESS) {
string msg("Could not set seed for pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = hiprandGenerateNormalDouble(prng, (double*)d_points, 10 * numGroups, 0, 1);
if (curandResult != HIPRAND_STATUS_SUCCESS) {
string msg("Could not generate pseudo-random numbers: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = hiprandDestroyGenerator(prng);
if (curandResult != HIPRAND_STATUS_SUCCESS) {
string msg("Could not destroy pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
// Calculate and average group size
computeValue<<<grid, block, block.x * sizeof(double)>>>(d_results, d_points, numGroups);
// Copy the results back to host
vector<double> results(grid.x);
cudaResult = hipMemcpy(&results[0], d_results, grid.x * sizeof(double), hipMemcpyDeviceToHost);
if (cudaResult != hipSuccess) {
string msg("Could not copy results to host: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Complete sum-reduction
double sum = std::accumulate(results.begin(), results.end(), double(0));
// Cleanup
if (d_points) {
hipFree(d_points);
}
if (d_results) {
hipFree(d_results);
}
// Divide sum by count to get the average
return sum / numGroups;
}
int main(int argc, char **argv)
{
unsigned power_of_4 = 12;
if (argc == 2) {
power_of_4 = atoi(argv[1]);
}
unsigned nt = 12;
if (power_of_4 > 12) {
nt <<= 2 * (power_of_4 - 12);
power_of_4 = 12;
}
try {
auto start_time = std::chrono::system_clock::now();
double avg = 0, min = 100, max = 0;
__uint128_t mcg128_state = time(NULL) | 1; // can be seeded to any odd number
for (unsigned j = 0; j < nt; j++) {
double r = es_cuda(power_of_4, (unsigned)(mcg128_state >> 64));
avg += r;
min = fmin(r, min);
max = fmax(r, max);
mcg128_state *= 0xda942042e4dd58b5ULL;
}
avg /= nt;
auto end_time = std::chrono::system_clock::now();
std::chrono::duration<double> seconds = end_time - start_time;
std::cout.precision(14);
std::cout << "code,threads,power_of_4,min,avg,max,time\n";
std::cout << "CUDA," << nt << "," << power_of_4 << "," << min << "," << avg << "," << max << "," << seconds.count() << "\n";
} catch (std::runtime_error &e) { // es_cuda() can throw runtime exceptions
fprintf(stderr, "runtime error (%s)\n", e.what());
return(EXIT_FAILURE);
}
return(EXIT_SUCCESS);
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12computeValuePdPKdj
.globl _Z12computeValuePdPKdj
.p2align 8
.type _Z12computeValuePdPKdj,@function
_Z12computeValuePdPKdj:
s_clause 0x1
s_load_b32 s5, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x10
s_add_u32 s2, s0, 24
s_mov_b32 s4, s15
s_addc_u32 s3, s1, 0
s_mov_b32 s11, exec_lo
s_waitcnt lgkmcnt(0)
s_and_b32 s6, s5, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_mad_u64_u32 v[41:42], null, s4, s6, v[0:1]
v_mov_b32_e32 v42, 0
v_mov_b32_e32 v43, 0
v_cmpx_gt_u32_e64 s10, v41
s_cbranch_execz .LBB0_6
s_load_b64 s[8:9], s[0:1], 0x8
s_load_b32 s12, s[2:3], 0x0
v_mov_b32_e32 v42, 0
v_mov_b32_e32 v44, 0
v_mov_b32_e32 v45, 0
s_mov_b32 s3, 0
s_mul_i32 s2, s10, 5
v_lshlrev_b64 v[1:2], 3, v[41:42]
s_lshl_b64 s[14:15], s[2:3], 3
v_dual_mov_b32 v42, v44 :: v_dual_mov_b32 v43, v45
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v46, vcc_lo, s8, v1
v_add_co_ci_u32_e32 v47, vcc_lo, s9, v2, vcc_lo
s_mul_i32 s12, s12, s6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v48, vcc_lo, v46, s14
v_add_co_ci_u32_e32 v49, vcc_lo, s15, v47, vcc_lo
s_mul_i32 s2, s12, 5
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[6:7], s[2:3], 3
.LBB0_2:
global_load_b128 v[11:14], v[46:47], off
global_load_b128 v[15:18], v[48:49], off
global_load_b128 v[37:40], v[46:47], off offset:16
global_load_b128 v[50:53], v[48:49], off offset:16
global_load_b64 v[19:20], v[46:47], off offset:32
global_load_b64 v[54:55], v[48:49], off offset:32
s_mov_b64 s[8:9], 0
s_waitcnt vmcnt(5)
v_add_f64 v[1:2], v[11:12], -v[13:14]
s_waitcnt vmcnt(4)
v_add_f64 v[21:22], v[15:16], -v[17:18]
s_waitcnt vmcnt(3)
v_add_f64 v[3:4], v[11:12], -v[37:38]
s_waitcnt vmcnt(2)
v_add_f64 v[23:24], v[15:16], -v[50:51]
v_add_f64 v[5:6], v[11:12], -v[39:40]
v_add_f64 v[25:26], v[15:16], -v[52:53]
s_waitcnt vmcnt(1)
v_add_f64 v[7:8], v[11:12], -v[19:20]
s_waitcnt vmcnt(0)
v_add_f64 v[27:28], v[15:16], -v[54:55]
v_add_f64 v[9:10], v[13:14], -v[37:38]
v_add_f64 v[29:30], v[17:18], -v[50:51]
v_add_f64 v[11:12], v[13:14], -v[39:40]
v_add_f64 v[31:32], v[17:18], -v[52:53]
v_add_f64 v[13:14], v[13:14], -v[19:20]
v_add_f64 v[33:34], v[17:18], -v[54:55]
v_add_f64 v[15:16], v[37:38], -v[39:40]
v_add_f64 v[35:36], v[50:51], -v[52:53]
v_add_f64 v[17:18], v[37:38], -v[19:20]
v_add_f64 v[37:38], v[50:51], -v[54:55]
v_add_f64 v[19:20], v[39:40], -v[19:20]
v_add_f64 v[39:40], v[52:53], -v[54:55]
v_dual_mov_b32 v51, v45 :: v_dual_mov_b32 v50, v44
.p2align 6
.LBB0_3:
s_lshl_b32 s2, s8, 1
s_add_u32 s8, s8, 1
s_mov_b32 m0, s2
s_addc_u32 s9, s9, 0
v_movrels_b32_e32 v53, v22
v_movrels_b32_e32 v52, v21
v_movrels_b32_e32 v55, v2
v_movrels_b32_e32 v54, v1
v_max_f64 v[50:51], v[50:51], v[50:51]
s_cmp_eq_u32 s8, 10
v_mul_f64 v[52:53], v[52:53], v[52:53]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[52:53], v[54:55], v[54:55], v[52:53]
v_max_f64 v[50:51], v[50:51], v[52:53]
s_cbranch_scc0 .LBB0_3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_cmp_gt_f64_e32 vcc_lo, 0x10000000, v[50:51]
v_add_nc_u32_e32 v41, s12, v41
v_add_co_u32 v48, s2, v48, s6
v_add_co_ci_u32_e64 v49, s2, s7, v49, s2
v_cndmask_b32_e64 v1, 0, 1, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v1, 8, v1
v_ldexp_f64 v[1:2], v[50:51], v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_rsq_f64_e32 v[3:4], v[1:2]
s_waitcnt_depctr 0xfff
v_mul_f64 v[5:6], v[1:2], v[3:4]
v_mul_f64 v[3:4], v[3:4], 0.5
v_fma_f64 v[7:8], -v[3:4], v[5:6], 0.5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fma_f64 v[5:6], v[5:6], v[7:8], v[5:6]
v_fma_f64 v[3:4], v[3:4], v[7:8], v[3:4]
v_fma_f64 v[7:8], -v[5:6], v[5:6], v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[5:6], v[7:8], v[3:4], v[5:6]
v_fma_f64 v[7:8], -v[5:6], v[5:6], v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_fma_f64 v[3:4], v[7:8], v[3:4], v[5:6]
v_cndmask_b32_e64 v5, 0, 0xffffff80, vcc_lo
v_cmp_class_f64_e64 vcc_lo, v[1:2], 0x260
v_ldexp_f64 v[3:4], v[3:4], v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_dual_cndmask_b32 v2, v4, v2 :: v_dual_cndmask_b32 v1, v3, v1
v_add_co_u32 v46, vcc_lo, v46, s6
v_add_co_ci_u32_e32 v47, vcc_lo, s7, v47, vcc_lo
v_add_f64 v[42:43], v[42:43], v[1:2]
v_cmp_le_u32_e32 vcc_lo, s10, v41
s_or_b32 s3, vcc_lo, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_2
s_or_b32 exec_lo, exec_lo, s3
.LBB0_6:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
s_or_b32 exec_lo, exec_lo, s11
v_cmp_lt_u16_e64 s2, s5, 2
v_lshl_add_u32 v1, v0, 3, 0
s_and_b32 vcc_lo, exec_lo, s2
ds_store_b64 v1, v[42:43]
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_vccnz .LBB0_11
s_and_b32 s2, 0xffff, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_lshr_b32 s2, s2, 1
s_branch .LBB0_9
.p2align 6
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s3
s_lshr_b32 s3, s2, 1
s_cmp_gt_u32 s2, 1
s_mov_b32 s2, s3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_11
.LBB0_9:
s_mov_b32 s3, exec_lo
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB0_8
v_add_nc_u32_e32 v2, s2, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v2, v2, 3, 0
ds_load_b64 v[2:3], v2
ds_load_b64 v[4:5], v1
s_waitcnt lgkmcnt(0)
v_add_f64 v[2:3], v[2:3], v[4:5]
ds_store_b64 v1, v[2:3]
s_branch .LBB0_8
.LBB0_11:
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_13
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x0
s_mov_b32 s5, 0
v_mov_b32_e32 v2, 0
s_lshl_b64 s[2:3], s[4:5], 3
ds_load_b64 v[0:1], v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b64 v2, v[0:1], s[0:1]
.LBB0_13:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12computeValuePdPKdj
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 56
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12computeValuePdPKdj, .Lfunc_end0-_Z12computeValuePdPKdj
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12computeValuePdPKdj
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12computeValuePdPKdj.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 56
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z12computeValuePdPKdj
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0030*/ BSSY B0, 0xae0 ; /* 0x00000aa000007945 */
/* 0x000fe20003800000 */
/*0040*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0060*/ IMAD R2, R0, c[0x0][0x0], R3 ; /* 0x0000000000027a24 */
/* 0x001fca00078e0203 */
/*0070*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06070 */
/*0080*/ @P0 BRA 0xad0 ; /* 0x00000a4000000947 */
/* 0x000fea0003800000 */
/*0090*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0xc] ; /* 0x00000300ff057624 */
/* 0x000fe400078e00ff */
/*00a0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x8 ; /* 0x00000008ff077424 */
/* 0x000fe400078e00ff */
/*00b0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff097624 */
/* 0x000fe400078e00ff */
/*00c0*/ IMAD R5, R5, c[0x0][0x0], RZ ; /* 0x0000000005057a24 */
/* 0x000fe400078e02ff */
/*00d0*/ IMAD.WIDE.U32 R6, R2, R7, c[0x0][0x168] ; /* 0x00005a0002067625 */
/* 0x000fc800078e0007 */
/*00e0*/ IMAD R9, R9, 0x5, RZ ; /* 0x0000000509097824 */
/* 0x000fe200078e02ff */
/*00f0*/ MOV R11, R7 ; /* 0x00000007000b7202 */
/* 0x000fe20000000f00 */
/*0100*/ IMAD R37, R5, 0x5, RZ ; /* 0x0000000505257824 */
/* 0x000fe400078e02ff */
/*0110*/ IMAD.WIDE.U32 R8, R9, 0x8, R6 ; /* 0x0000000809087825 */
/* 0x000fc800078e0006 */
/*0120*/ IMAD.MOV.U32 R10, RZ, RZ, R6 ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e0006 */
/*0130*/ IMAD.SHL.U32 R35, R37, 0x8, RZ ; /* 0x0000000825237824 */
/* 0x000fe200078e00ff */
/*0140*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0150*/ SHF.R.U32.HI R37, RZ, 0x1d, R37 ; /* 0x0000001dff257819 */
/* 0x000fe40000011625 */
/*0160*/ LDG.E.64 R12, [R8.64+0x8] ; /* 0x00000806080c7981 */
/* 0x000ea8000c1e1b00 */
/*0170*/ LDG.E.64 R14, [R8.64] ; /* 0x00000006080e7981 */
/* 0x000ea8000c1e1b00 */
/*0180*/ LDG.E.64 R16, [R10.64+0x8] ; /* 0x000008060a107981 */
/* 0x001ee8000c1e1b00 */
/*0190*/ LDG.E.64 R18, [R10.64] ; /* 0x000000060a127981 */
/* 0x000ee8000c1e1b00 */
/*01a0*/ LDG.E.64 R20, [R8.64+0x10] ; /* 0x0000100608147981 */
/* 0x000f28000c1e1b00 */
/*01b0*/ LDG.E.64 R22, [R10.64+0x10] ; /* 0x000010060a167981 */
/* 0x000f68000c1e1b00 */
/*01c0*/ LDG.E.64 R24, [R8.64+0x18] ; /* 0x0000180608187981 */
/* 0x000f28000c1e1b00 */
/*01d0*/ LDG.E.64 R26, [R10.64+0x18] ; /* 0x000018060a1a7981 */
/* 0x000f62000c1e1b00 */
/*01e0*/ DADD R30, -R12, R14 ; /* 0x000000000c1e7229 */
/* 0x004e0c000000010e */
/*01f0*/ DMUL R30, R30, R30 ; /* 0x0000001e1e1e7228 */
/* 0x001fc80000000000 */
/*0200*/ DADD R28, -R16, R18 ; /* 0x00000000101c7229 */
/* 0x008e0c0000000112 */
/*0210*/ DFMA R30, R28, R28, R30 ; /* 0x0000001c1c1e722b */
/* 0x001e08000000001e */
/*0220*/ DADD R28, R14, -R20 ; /* 0x000000000e1c7229 */
/* 0x010e480000000814 */
/*0230*/ DSETP.MAX.AND P0, P1, RZ, R30, PT ; /* 0x0000001eff00722a */
/* 0x0010a4000390f000 */
/*0240*/ IMAD.MOV.U32 R4, RZ, RZ, R30 ; /* 0x000000ffff047224 */
/* 0x000fe400078e001e */
/*0250*/ DMUL R28, R28, R28 ; /* 0x0000001c1c1c7228 */
/* 0x002fe20000000000 */
/*0260*/ IMAD.MOV.U32 R30, RZ, RZ, RZ ; /* 0x000000ffff1e7224 */
/* 0x001fc600078e00ff */
/*0270*/ DADD R32, R18, -R22 ; /* 0x0000000012207229 */
/* 0x020e240000000816 */
/*0280*/ SEL R30, R30, R4, P0 ; /* 0x000000041e1e7207 */
/* 0x004fc80000000000 */
/*0290*/ DFMA R28, R32, R32, R28 ; /* 0x00000020201c722b */
/* 0x001064000000001c */
/*02a0*/ IMAD.MOV.U32 R33, RZ, RZ, RZ ; /* 0x000000ffff217224 */
/* 0x001fe400078e00ff */
/*02b0*/ IMAD.MOV.U32 R32, RZ, RZ, R31 ; /* 0x000000ffff207224 */
/* 0x000fca00078e001f */
/*02c0*/ FSEL R33, R33, R32, P0 ; /* 0x0000002021217208 */
/* 0x000fe20000000000 */
/*02d0*/ IMAD.MOV.U32 R4, RZ, RZ, R29 ; /* 0x000000ffff047224 */
/* 0x002fe200078e001d */
/*02e0*/ @P1 LOP3.LUT R33, R32, 0x80000, RZ, 0xfc, !PT ; /* 0x0008000020211812 */
/* 0x000fe200078efcff */
/*02f0*/ IMAD.MOV.U32 R32, RZ, RZ, R30 ; /* 0x000000ffff207224 */
/* 0x000fc600078e001e */
/*0300*/ MOV R31, R33 ; /* 0x00000021001f7202 */
/* 0x000fe20000000f00 */
/*0310*/ IMAD.MOV.U32 R33, RZ, RZ, R28 ; /* 0x000000ffff217224 */
/* 0x000fca00078e001c */
/*0320*/ DSETP.MAX.AND P0, P1, R30, R28, PT ; /* 0x0000001c1e00722a */
/* 0x000e08000390f000 */
/*0330*/ DADD R28, R14, -R24 ; /* 0x000000000e1c7229 */
/* 0x000e640000000818 */
/*0340*/ SEL R30, R32, R33, P0 ; /* 0x00000021201e7207 */
/* 0x001fe40000000000 */
/*0350*/ DADD R32, R18, -R26 ; /* 0x0000000012207229 */
/* 0x000fe2000000081a */
/*0360*/ FSEL R31, R31, R4, P0 ; /* 0x000000041f1f7208 */
/* 0x000fe40000000000 */
/*0370*/ IMAD.MOV.U32 R34, RZ, RZ, R30 ; /* 0x000000ffff227224 */
/* 0x000fe200078e001e */
/*0380*/ DMUL R28, R28, R28 ; /* 0x0000001c1c1c7228 */
/* 0x002e240000000000 */
/*0390*/ @P1 LOP3.LUT R31, R4, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000041f1812 */
/* 0x000fc800078efcff */
/*03a0*/ DFMA R28, R32, R32, R28 ; /* 0x00000020201c722b */
/* 0x001062000000001c */
/*03b0*/ IMAD.MOV.U32 R4, RZ, RZ, R31 ; /* 0x000000ffff047224 */
/* 0x000fe200078e001f */
/*03c0*/ LDG.E.64 R32, [R10.64+0x20] ; /* 0x000020060a207981 */
/* 0x001ea8000c1e1b00 */
/*03d0*/ DSETP.MAX.AND P0, P1, R30, R28, PT ; /* 0x0000001c1e00722a */
/* 0x002064000390f000 */
/*03e0*/ LDG.E.64 R30, [R8.64+0x20] ; /* 0x00002006081e7981 */
/* 0x001ee4000c1e1b00 */
/*03f0*/ MOV R36, R28 ; /* 0x0000001c00247202 */
/* 0x000fc40000000f00 */
/*0400*/ FSEL R4, R4, R29, P0 ; /* 0x0000001d04047208 */
/* 0x002fe40000000000 */
/*0410*/ SEL R28, R34, R36, P0 ; /* 0x00000024221c7207 */
/* 0x000fcc0000000000 */
/*0420*/ @P1 LOP3.LUT R4, R29, 0x80000, RZ, 0xfc, !PT ; /* 0x000800001d041812 */
/* 0x000fca00078efcff */
/*0430*/ IMAD.MOV.U32 R29, RZ, RZ, R4 ; /* 0x000000ffff1d7224 */
/* 0x000fe400078e0004 */
/*0440*/ IMAD.IADD R2, R5, 0x1, R2 ; /* 0x0000000105027824 */
/* 0x000fe200078e0202 */
/*0450*/ BSSY B1, 0xa70 ; /* 0x0000061000017945 */
/* 0x000fe20003800000 */
/*0460*/ DADD R18, R18, -R32 ; /* 0x0000000012127229 */
/* 0x004fc80000000820 */
/*0470*/ DADD R14, R14, -R30 ; /* 0x000000000e0e7229 */
/* 0x008e0c000000081e */
/*0480*/ DMUL R14, R14, R14 ; /* 0x0000000e0e0e7228 */
/* 0x001e0c0000000000 */
/*0490*/ DFMA R14, R18, R18, R14 ; /* 0x00000012120e722b */
/* 0x001064000000000e */
/*04a0*/ IMAD.MOV.U32 R18, RZ, RZ, R28 ; /* 0x000000ffff127224 */
/* 0x001fc800078e001c */
/*04b0*/ DSETP.MAX.AND P0, P1, R28, R14, PT ; /* 0x0000000e1c00722a */
/* 0x002e08000390f000 */
/*04c0*/ IMAD.MOV.U32 R19, RZ, RZ, R14 ; /* 0x000000ffff137224 */
/* 0x000fe400078e000e */
/*04d0*/ IMAD.MOV.U32 R4, RZ, RZ, R15 ; /* 0x000000ffff047224 */
/* 0x000fc600078e000f */
/*04e0*/ SEL R14, R18, R19, P0 ; /* 0x00000013120e7207 */
/* 0x001fe20000000000 */
/*04f0*/ DADD R18, R12, -R20 ; /* 0x000000000c127229 */
/* 0x000e220000000814 */
/*0500*/ FSEL R15, R29, R4, P0 ; /* 0x000000041d0f7208 */
/* 0x000fc60000000000 */
/*0510*/ DADD R28, R16, -R22 ; /* 0x00000000101c7229 */
/* 0x000fc80000000816 */
/*0520*/ DMUL R18, R18, R18 ; /* 0x0000001212127228 */
/* 0x001e220000000000 */
/*0530*/ @P1 LOP3.LUT R15, R4, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000040f1812 */
/* 0x000fca00078efcff */
/*0540*/ DFMA R28, R28, R28, R18 ; /* 0x0000001c1c1c722b */
/* 0x001e0c0000000012 */
/*0550*/ DSETP.MAX.AND P0, P1, R14, R28, PT ; /* 0x0000001c0e00722a */
/* 0x001e22000390f000 */
/*0560*/ MOV R18, R14 ; /* 0x0000000e00127202 */
/* 0x000fe20000000f00 */
/*0570*/ IMAD.MOV.U32 R4, RZ, RZ, R15 ; /* 0x000000ffff047224 */
/* 0x000fe400078e000f */
/*0580*/ IMAD.MOV.U32 R19, RZ, RZ, R28 ; /* 0x000000ffff137224 */
/* 0x000fe200078e001c */
/*0590*/ DADD R14, R12, -R24 ; /* 0x000000000c0e7229 */
/* 0x000e480000000818 */
/*05a0*/ SEL R18, R18, R19, P0 ; /* 0x0000001312127207 */
/* 0x001fe40000000000 */
/*05b0*/ FSEL R19, R4, R29, P0 ; /* 0x0000001d04137208 */
/* 0x000fe20000000000 */
/*05c0*/ DMUL R14, R14, R14 ; /* 0x0000000e0e0e7228 */
/* 0x002fe40000000000 */
/*05d0*/ @P1 LOP3.LUT R19, R29, 0x80000, RZ, 0xfc, !PT ; /* 0x000800001d131812 */
/* 0x000fe400078efcff */
/*05e0*/ DADD R28, R16, -R26 ; /* 0x00000000101c7229 */
/* 0x000e0c000000081a */
/*05f0*/ DFMA R14, R28, R28, R14 ; /* 0x0000001c1c0e722b */
/* 0x001e08000000000e */
/*0600*/ DADD R12, R12, -R30 ; /* 0x000000000c0c7229 */
/* 0x000e48000000081e */
/*0610*/ DSETP.MAX.AND P0, P1, R18, R14, PT ; /* 0x0000000e1200722a */
/* 0x001e08000390f000 */
/*0620*/ DADD R16, R16, -R32 ; /* 0x0000000010107229 */
/* 0x000fc80000000820 */
/*0630*/ DMUL R12, R12, R12 ; /* 0x0000000c0c0c7228 */
/* 0x002e620000000000 */
/*0640*/ FSEL R19, R19, R15, P0 ; /* 0x0000000f13137208 */
/* 0x001fca0000000000 */
/*0650*/ DFMA R12, R16, R16, R12 ; /* 0x00000010100c722b */
/* 0x002062000000000c */
/*0660*/ @P1 LOP3.LUT R19, R15, 0x80000, RZ, 0xfc, !PT ; /* 0x000800000f131812 */
/* 0x000fe200078efcff */
/*0670*/ IMAD.MOV.U32 R17, RZ, RZ, R14 ; /* 0x000000ffff117224 */
/* 0x001fe400078e000e */
/*0680*/ IMAD.MOV.U32 R14, RZ, RZ, R18 ; /* 0x000000ffff0e7224 */
/* 0x000fe400078e0012 */
/*0690*/ IMAD.MOV.U32 R15, RZ, RZ, R19 ; /* 0x000000ffff0f7224 */
/* 0x000fc600078e0013 */
/*06a0*/ SEL R14, R14, R17, P0 ; /* 0x000000110e0e7207 */
/* 0x000fe20000000000 */
/*06b0*/ DADD R16, R20, -R24 ; /* 0x0000000014107229 */
/* 0x000e0a0000000818 */
/*06c0*/ DSETP.MAX.AND P0, P1, R14, R12, PT ; /* 0x0000000c0e00722a */
/* 0x002e62000390f000 */
/*06d0*/ MOV R4, R15 ; /* 0x0000000f00047202 */
/* 0x000fc60000000f00 */
/*06e0*/ DMUL R16, R16, R16 ; /* 0x0000001010107228 */
/* 0x001fc80000000000 */
/*06f0*/ DADD R18, R22, -R26 ; /* 0x0000000016127229 */
/* 0x000e22000000081a */
/*0700*/ FSEL R29, R4, R13, P0 ; /* 0x0000000d041d7208 */
/* 0x002fca0000000000 */
/*0710*/ DFMA R16, R18, R18, R16 ; /* 0x000000121210722b */
/* 0x0010620000000010 */
/*0720*/ @P1 LOP3.LUT R29, R13, 0x80000, RZ, 0xfc, !PT ; /* 0x000800000d1d1812 */
/* 0x000fe200078efcff */
/*0730*/ IMAD.MOV.U32 R19, RZ, RZ, R12 ; /* 0x000000ffff137224 */
/* 0x001fc800078e000c */
/*0740*/ IMAD.MOV.U32 R13, RZ, RZ, R29 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e001d */
/*0750*/ SEL R12, R14, R19, P0 ; /* 0x000000130e0c7207 */
/* 0x000fcc0000000000 */
/*0760*/ DSETP.MAX.AND P0, P1, R12, R16, PT ; /* 0x000000100c00722a */
/* 0x002e22000390f000 */
/*0770*/ IMAD.MOV.U32 R4, RZ, RZ, R13 ; /* 0x000000ffff047224 */
/* 0x000fc600078e000d */
/*0780*/ DADD R20, R20, -R30 ; /* 0x0000000014147229 */
/* 0x000e62000000081e */
/*0790*/ IMAD.MOV.U32 R15, RZ, RZ, R16 ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e0010 */
/*07a0*/ FSEL R19, R4, R17, P0 ; /* 0x0000001104137208 */
/* 0x001fe40000000000 */
/*07b0*/ DADD R22, R22, -R32 ; /* 0x0000000016167229 */
/* 0x000fc80000000820 */
/*07c0*/ DMUL R20, R20, R20 ; /* 0x0000001414147228 */
/* 0x002e240000000000 */
/*07d0*/ @P1 LOP3.LUT R19, R17, 0x80000, RZ, 0xfc, !PT ; /* 0x0008000011131812 */
/* 0x000fe400078efcff */
/*07e0*/ SEL R12, R12, R15, P0 ; /* 0x0000000f0c0c7207 */
/* 0x000fe40000000000 */
/*07f0*/ DFMA R22, R22, R22, R20 ; /* 0x000000161616722b */
/* 0x001e220000000014 */
/*0800*/ IMAD.MOV.U32 R13, RZ, RZ, R19 ; /* 0x000000ffff0d7224 */
/* 0x000fcc00078e0013 */
/*0810*/ DSETP.MAX.AND P0, P1, R12, R22, PT ; /* 0x000000160c00722a */
/* 0x001e22000390f000 */
/*0820*/ MOV R4, R13 ; /* 0x0000000d00047202 */
/* 0x000fe40000000f00 */
/*0830*/ IMAD.MOV.U32 R17, RZ, RZ, R23 ; /* 0x000000ffff117224 */
/* 0x000fe200078e0017 */
/*0840*/ DADD R24, R24, -R30 ; /* 0x0000000018187229 */
/* 0x000e48000000081e */
/*0850*/ DADD R14, R26, -R32 ; /* 0x000000001a0e7229 */
/* 0x000fe20000000820 */
/*0860*/ FSEL R19, R4, R17, P0 ; /* 0x0000001104137208 */
/* 0x001fc60000000000 */
/*0870*/ DMUL R24, R24, R24 ; /* 0x0000001818187228 */
/* 0x002e240000000000 */
/*0880*/ @P1 LOP3.LUT R19, R17, 0x80000, RZ, 0xfc, !PT ; /* 0x0008000011131812 */
/* 0x000fe400078efcff */
/*0890*/ SEL R12, R12, R22, P0 ; /* 0x000000160c0c7207 */
/* 0x000fe40000000000 */
/*08a0*/ DFMA R14, R14, R14, R24 ; /* 0x0000000e0e0e722b */
/* 0x001e220000000018 */
/*08b0*/ IMAD.MOV.U32 R13, RZ, RZ, R19 ; /* 0x000000ffff0d7224 */
/* 0x000fcc00078e0013 */
/*08c0*/ DSETP.MAX.AND P0, P1, R12, R14, PT ; /* 0x0000000e0c00722a */
/* 0x001e22000390f000 */
/*08d0*/ IMAD.MOV.U32 R4, RZ, RZ, R13 ; /* 0x000000ffff047224 */
/* 0x000fe400078e000d */
/*08e0*/ IMAD.MOV.U32 R17, RZ, RZ, R15 ; /* 0x000000ffff117224 */
/* 0x000fca00078e000f */
/*08f0*/ FSEL R19, R4, R17, P0 ; /* 0x0000001104137208 */
/* 0x001fcc0000000000 */
/*0900*/ @P1 LOP3.LUT R19, R17, 0x80000, RZ, 0xfc, !PT ; /* 0x0008000011131812 */
/* 0x000fca00078efcff */
/*0910*/ IMAD.MOV.U32 R15, RZ, RZ, R19 ; /* 0x000000ffff0f7224 */
/* 0x000fc800078e0013 */
/*0920*/ MUFU.RSQ64H R13, R15 ; /* 0x0000000f000d7308 */
/* 0x000e220000001c00 */
/*0930*/ SEL R14, R12, R14, P0 ; /* 0x0000000e0c0e7207 */
/* 0x000fe40000000000 */
/*0940*/ IADD3 R12, R15, -0x3500000, RZ ; /* 0xfcb000000f0c7810 */
/* 0x000fe20007ffe0ff */
/*0950*/ IMAD.MOV.U32 R18, RZ, RZ, 0x0 ; /* 0x00000000ff127424 */
/* 0x000fe200078e00ff */
/*0960*/ MOV R19, 0x3fd80000 ; /* 0x3fd8000000137802 */
/* 0x000fc80000000f00 */
/*0970*/ DMUL R16, R12, R12 ; /* 0x0000000c0c107228 */
/* 0x001e0c0000000000 */
/*0980*/ DFMA R16, R14, -R16, 1 ; /* 0x3ff000000e10742b */
/* 0x001e0c0000000810 */
/*0990*/ DFMA R18, R16, R18, 0.5 ; /* 0x3fe000001012742b */
/* 0x001fc80000000012 */
/*09a0*/ DMUL R16, R12, R16 ; /* 0x000000100c107228 */
/* 0x000e220000000000 */
/*09b0*/ ISETP.GE.U32.AND P1, PT, R12, 0x7ca00000, PT ; /* 0x7ca000000c00780c */
/* 0x000fca0003f26070 */
/*09c0*/ DFMA R24, R18, R16, R12 ; /* 0x000000101218722b */
/* 0x001e0c000000000c */
/*09d0*/ DMUL R16, R14, R24 ; /* 0x000000180e107228 */
/* 0x001e080000000000 */
/*09e0*/ IADD3 R23, R25, -0x100000, RZ ; /* 0xfff0000019177810 */
/* 0x000fe20007ffe0ff */
/*09f0*/ IMAD.MOV.U32 R22, RZ, RZ, R24 ; /* 0x000000ffff167224 */
/* 0x000fe200078e0018 */
/*0a00*/ DFMA R18, R16, -R16, R14 ; /* 0x800000101012722b */
/* 0x001e22000000000e */
/*0a10*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fca0003f06070 */
/*0a20*/ DFMA R20, R18, R22, R16 ; /* 0x000000161214722b */
/* 0x0010620000000010 */
/*0a30*/ @!P1 BRA 0xa60 ; /* 0x0000002000009947 */
/* 0x000fea0003800000 */
/*0a40*/ MOV R4, 0xa60 ; /* 0x00000a6000047802 */
/* 0x000fca0000000f00 */
/*0a50*/ CALL.REL.NOINC 0xc70 ; /* 0x0000021000007944 */
/* 0x003fea0003c00000 */
/*0a60*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0a70*/ IADD3 R10, P1, R10, R35.reuse, RZ ; /* 0x000000230a0a7210 */
/* 0x080fe20007f3e0ff */
/*0a80*/ DADD R6, R20, R6 ; /* 0x0000000014067229 */
/* 0x0022a20000000006 */
/*0a90*/ IADD3 R8, P2, R8, R35, RZ ; /* 0x0000002308087210 */
/* 0x000fc60007f5e0ff */
/*0aa0*/ IMAD.X R11, R11, 0x1, R37.reuse, P1 ; /* 0x000000010b0b7824 */
/* 0x100fe400008e0625 */
/*0ab0*/ IMAD.X R9, R9, 0x1, R37, P2 ; /* 0x0000000109097824 */
/* 0x000fe200010e0625 */
/*0ac0*/ @!P0 BRA 0x160 ; /* 0xfffff69000008947 */
/* 0x006fea000383ffff */
/*0ad0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0ae0*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0af0*/ STS.64 [R3.X8], R6 ; /* 0x0000000603007388 */
/* 0x000fe20000008a00 */
/*0b00*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fc60008011604 */
/*0b10*/ BAR.SYNC 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000000000 */
/*0b20*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe4000bf25270 */
/*0b30*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fd60003f05270 */
/*0b40*/ @!P1 BRA 0xc10 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*0b50*/ SHF.L.U32 R2, R3, 0x3, RZ ; /* 0x0000000303027819 */
/* 0x000fe200000006ff */
/*0b60*/ IMAD.U32 R8, RZ, RZ, UR4 ; /* 0x00000004ff087e24 */
/* 0x000fca000f8e00ff */
/*0b70*/ ISETP.GE.U32.AND P1, PT, R3, R8, PT ; /* 0x000000080300720c */
/* 0x000fda0003f26070 */
/*0b80*/ @!P1 IMAD R6, R8, 0x8, R2 ; /* 0x0000000808069824 */
/* 0x000fe200078e0202 */
/*0b90*/ @!P1 LDS.64 R4, [R3.X8] ; /* 0x0000000003049984 */
/* 0x000fe20000008a00 */
/*0ba0*/ SHF.R.U32.HI R8, RZ, 0x1, R8 ; /* 0x00000001ff087819 */
/* 0x000fc80000011608 */
/*0bb0*/ @!P1 LDS.64 R6, [R6] ; /* 0x0000000006069984 */
/* 0x000e640000000a00 */
/*0bc0*/ @!P1 DADD R4, R4, R6 ; /* 0x0000000004049229 */
/* 0x002e4e0000000006 */
/*0bd0*/ @!P1 STS.64 [R3.X8], R4 ; /* 0x0000000403009388 */
/* 0x002fe80000008a00 */
/*0be0*/ BAR.SYNC 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000000000 */
/*0bf0*/ ISETP.NE.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fda0003f25270 */
/*0c00*/ @P1 BRA 0xb70 ; /* 0xffffff6000001947 */
/* 0x000fea000383ffff */
/*0c10*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0c20*/ LDS.64 R2, [RZ] ; /* 0x00000000ff027984 */
/* 0x000e620000000a00 */
/*0c30*/ IMAD.MOV.U32 R5, RZ, RZ, 0x8 ; /* 0x00000008ff057424 */
/* 0x000fc800078e00ff */
/*0c40*/ IMAD.WIDE.U32 R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fca00078e0005 */
/*0c50*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x002fe2000c101b06 */
/*0c60*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c70*/ ISETP.GE.U32.AND P1, PT, R12, -0x3400000, PT ; /* 0xfcc000000c00780c */
/* 0x000fe20003f26070 */
/*0c80*/ BSSY B2, 0xf10 ; /* 0x0000028000027945 */
/* 0x000fe20003800000 */
/*0c90*/ MOV R12, R14 ; /* 0x0000000e000c7202 */
/* 0x000fe20000000f00 */
/*0ca0*/ IMAD.MOV.U32 R13, RZ, RZ, R15 ; /* 0x000000ffff0d7224 */
/* 0x000fe400078e000f */
/*0cb0*/ IMAD.MOV.U32 R22, RZ, RZ, R24 ; /* 0x000000ffff167224 */
/* 0x000fe400078e0018 */
/*0cc0*/ IMAD.MOV.U32 R14, RZ, RZ, R18 ; /* 0x000000ffff0e7224 */
/* 0x000fe400078e0012 */
/*0cd0*/ IMAD.MOV.U32 R15, RZ, RZ, R19 ; /* 0x000000ffff0f7224 */
/* 0x000fc800078e0013 */
/*0ce0*/ @!P1 BRA 0xd70 ; /* 0x0000008000009947 */
/* 0x000fea0003800000 */
/*0cf0*/ DFMA.RM R14, R14, R22, R16 ; /* 0x000000160e0e722b */
/* 0x000e140000004010 */
/*0d00*/ IADD3 R16, P1, R14, 0x1, RZ ; /* 0x000000010e107810 */
/* 0x001fca0007f3e0ff */
/*0d10*/ IMAD.X R17, RZ, RZ, R15, P1 ; /* 0x000000ffff117224 */
/* 0x000fcc00008e060f */
/*0d20*/ DFMA.RP R12, -R14, R16, R12 ; /* 0x000000100e0c722b */
/* 0x000e0c000000810c */
/*0d30*/ DSETP.GT.AND P1, PT, R12, RZ, PT ; /* 0x000000ff0c00722a */
/* 0x001e0c0003f24000 */
/*0d40*/ FSEL R14, R16, R14, P1 ; /* 0x0000000e100e7208 */
/* 0x001fe40000800000 */
/*0d50*/ FSEL R15, R17, R15, P1 ; /* 0x0000000f110f7208 */
/* 0x000fe20000800000 */
/*0d60*/ BRA 0xf00 ; /* 0x0000019000007947 */
/* 0x000fea0003800000 */
/*0d70*/ DSETP.NE.AND P1, PT, R12, RZ, PT ; /* 0x000000ff0c00722a */
/* 0x000e1c0003f25000 */
/*0d80*/ @!P1 BRA 0xef0 ; /* 0x0000016000009947 */
/* 0x001fea0003800000 */
/*0d90*/ ISETP.GE.AND P1, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x000fda0003f26270 */
/*0da0*/ @!P1 MOV R14, 0x0 ; /* 0x00000000000e9802 */
/* 0x000fe20000000f00 */
/*0db0*/ @!P1 IMAD.MOV.U32 R15, RZ, RZ, -0x80000 ; /* 0xfff80000ff0f9424 */
/* 0x000fe200078e00ff */
/*0dc0*/ @!P1 BRA 0xf00 ; /* 0x0000013000009947 */
/* 0x000fea0003800000 */
/*0dd0*/ ISETP.GT.AND P1, PT, R13, 0x7fefffff, PT ; /* 0x7fefffff0d00780c */
/* 0x000fda0003f24270 */
/*0de0*/ @P1 BRA 0xef0 ; /* 0x0000010000001947 */
/* 0x000fea0003800000 */
/*0df0*/ DMUL R12, R12, 8.11296384146066816958e+31 ; /* 0x469000000c0c7828 */
/* 0x000e220000000000 */
/*0e00*/ IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e7224 */
/* 0x000fe400078e00ff */
/*0e10*/ IMAD.MOV.U32 R18, RZ, RZ, 0x0 ; /* 0x00000000ff127424 */
/* 0x000fe400078e00ff */
/*0e20*/ IMAD.MOV.U32 R19, RZ, RZ, 0x3fd80000 ; /* 0x3fd80000ff137424 */
/* 0x000fe200078e00ff */
/*0e30*/ MUFU.RSQ64H R15, R13 ; /* 0x0000000d000f7308 */
/* 0x001e240000001c00 */
/*0e40*/ DMUL R16, R14, R14 ; /* 0x0000000e0e107228 */
/* 0x001e0c0000000000 */
/*0e50*/ DFMA R16, R12, -R16, 1 ; /* 0x3ff000000c10742b */
/* 0x001e0c0000000810 */
/*0e60*/ DFMA R18, R16, R18, 0.5 ; /* 0x3fe000001012742b */
/* 0x001fc80000000012 */
/*0e70*/ DMUL R16, R14, R16 ; /* 0x000000100e107228 */
/* 0x000e0c0000000000 */
/*0e80*/ DFMA R16, R18, R16, R14 ; /* 0x000000101210722b */
/* 0x001e0c000000000e */
/*0e90*/ DMUL R14, R12, R16 ; /* 0x000000100c0e7228 */
/* 0x0010480000000000 */
/*0ea0*/ IADD3 R17, R17, -0x100000, RZ ; /* 0xfff0000011117810 */
/* 0x001fe40007ffe0ff */
/*0eb0*/ DFMA R18, R14, -R14, R12 ; /* 0x8000000e0e12722b */
/* 0x002e0c000000000c */
/*0ec0*/ DFMA R14, R16, R18, R14 ; /* 0x00000012100e722b */
/* 0x001e14000000000e */
/*0ed0*/ IADD3 R15, R15, -0x3500000, RZ ; /* 0xfcb000000f0f7810 */
/* 0x001fe20007ffe0ff */
/*0ee0*/ BRA 0xf00 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0ef0*/ DADD R14, R12, R12 ; /* 0x000000000c0e7229 */
/* 0x00004c000000000c */
/*0f00*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0f10*/ IMAD.MOV.U32 R12, RZ, RZ, R4 ; /* 0x000000ffff0c7224 */
/* 0x001fe200078e0004 */
/*0f20*/ MOV R20, R14 ; /* 0x0000000e00147202 */
/* 0x002fe20000000f00 */
/*0f30*/ IMAD.MOV.U32 R13, RZ, RZ, 0x0 ; /* 0x00000000ff0d7424 */
/* 0x000fe400078e00ff */
/*0f40*/ IMAD.MOV.U32 R21, RZ, RZ, R15 ; /* 0x000000ffff157224 */
/* 0x000fe400078e000f */
/*0f50*/ RET.REL.NODEC R12 0x0 ; /* 0xfffff0a00c007950 */
/* 0x000fea0003c3ffff */
/*0f60*/ BRA 0xf60; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0f70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fe0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ff0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12computeValuePdPKdj
.globl _Z12computeValuePdPKdj
.p2align 8
.type _Z12computeValuePdPKdj,@function
_Z12computeValuePdPKdj:
s_clause 0x1
s_load_b32 s5, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x10
s_add_u32 s2, s0, 24
s_mov_b32 s4, s15
s_addc_u32 s3, s1, 0
s_mov_b32 s11, exec_lo
s_waitcnt lgkmcnt(0)
s_and_b32 s6, s5, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_mad_u64_u32 v[41:42], null, s4, s6, v[0:1]
v_mov_b32_e32 v42, 0
v_mov_b32_e32 v43, 0
v_cmpx_gt_u32_e64 s10, v41
s_cbranch_execz .LBB0_6
s_load_b64 s[8:9], s[0:1], 0x8
s_load_b32 s12, s[2:3], 0x0
v_mov_b32_e32 v42, 0
v_mov_b32_e32 v44, 0
v_mov_b32_e32 v45, 0
s_mov_b32 s3, 0
s_mul_i32 s2, s10, 5
v_lshlrev_b64 v[1:2], 3, v[41:42]
s_lshl_b64 s[14:15], s[2:3], 3
v_dual_mov_b32 v42, v44 :: v_dual_mov_b32 v43, v45
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v46, vcc_lo, s8, v1
v_add_co_ci_u32_e32 v47, vcc_lo, s9, v2, vcc_lo
s_mul_i32 s12, s12, s6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v48, vcc_lo, v46, s14
v_add_co_ci_u32_e32 v49, vcc_lo, s15, v47, vcc_lo
s_mul_i32 s2, s12, 5
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[6:7], s[2:3], 3
.LBB0_2:
global_load_b128 v[11:14], v[46:47], off
global_load_b128 v[15:18], v[48:49], off
global_load_b128 v[37:40], v[46:47], off offset:16
global_load_b128 v[50:53], v[48:49], off offset:16
global_load_b64 v[19:20], v[46:47], off offset:32
global_load_b64 v[54:55], v[48:49], off offset:32
s_mov_b64 s[8:9], 0
s_waitcnt vmcnt(5)
v_add_f64 v[1:2], v[11:12], -v[13:14]
s_waitcnt vmcnt(4)
v_add_f64 v[21:22], v[15:16], -v[17:18]
s_waitcnt vmcnt(3)
v_add_f64 v[3:4], v[11:12], -v[37:38]
s_waitcnt vmcnt(2)
v_add_f64 v[23:24], v[15:16], -v[50:51]
v_add_f64 v[5:6], v[11:12], -v[39:40]
v_add_f64 v[25:26], v[15:16], -v[52:53]
s_waitcnt vmcnt(1)
v_add_f64 v[7:8], v[11:12], -v[19:20]
s_waitcnt vmcnt(0)
v_add_f64 v[27:28], v[15:16], -v[54:55]
v_add_f64 v[9:10], v[13:14], -v[37:38]
v_add_f64 v[29:30], v[17:18], -v[50:51]
v_add_f64 v[11:12], v[13:14], -v[39:40]
v_add_f64 v[31:32], v[17:18], -v[52:53]
v_add_f64 v[13:14], v[13:14], -v[19:20]
v_add_f64 v[33:34], v[17:18], -v[54:55]
v_add_f64 v[15:16], v[37:38], -v[39:40]
v_add_f64 v[35:36], v[50:51], -v[52:53]
v_add_f64 v[17:18], v[37:38], -v[19:20]
v_add_f64 v[37:38], v[50:51], -v[54:55]
v_add_f64 v[19:20], v[39:40], -v[19:20]
v_add_f64 v[39:40], v[52:53], -v[54:55]
v_dual_mov_b32 v51, v45 :: v_dual_mov_b32 v50, v44
.p2align 6
.LBB0_3:
s_lshl_b32 s2, s8, 1
s_add_u32 s8, s8, 1
s_mov_b32 m0, s2
s_addc_u32 s9, s9, 0
v_movrels_b32_e32 v53, v22
v_movrels_b32_e32 v52, v21
v_movrels_b32_e32 v55, v2
v_movrels_b32_e32 v54, v1
v_max_f64 v[50:51], v[50:51], v[50:51]
s_cmp_eq_u32 s8, 10
v_mul_f64 v[52:53], v[52:53], v[52:53]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[52:53], v[54:55], v[54:55], v[52:53]
v_max_f64 v[50:51], v[50:51], v[52:53]
s_cbranch_scc0 .LBB0_3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_cmp_gt_f64_e32 vcc_lo, 0x10000000, v[50:51]
v_add_nc_u32_e32 v41, s12, v41
v_add_co_u32 v48, s2, v48, s6
v_add_co_ci_u32_e64 v49, s2, s7, v49, s2
v_cndmask_b32_e64 v1, 0, 1, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v1, 8, v1
v_ldexp_f64 v[1:2], v[50:51], v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_rsq_f64_e32 v[3:4], v[1:2]
s_waitcnt_depctr 0xfff
v_mul_f64 v[5:6], v[1:2], v[3:4]
v_mul_f64 v[3:4], v[3:4], 0.5
v_fma_f64 v[7:8], -v[3:4], v[5:6], 0.5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fma_f64 v[5:6], v[5:6], v[7:8], v[5:6]
v_fma_f64 v[3:4], v[3:4], v[7:8], v[3:4]
v_fma_f64 v[7:8], -v[5:6], v[5:6], v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[5:6], v[7:8], v[3:4], v[5:6]
v_fma_f64 v[7:8], -v[5:6], v[5:6], v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_fma_f64 v[3:4], v[7:8], v[3:4], v[5:6]
v_cndmask_b32_e64 v5, 0, 0xffffff80, vcc_lo
v_cmp_class_f64_e64 vcc_lo, v[1:2], 0x260
v_ldexp_f64 v[3:4], v[3:4], v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_dual_cndmask_b32 v2, v4, v2 :: v_dual_cndmask_b32 v1, v3, v1
v_add_co_u32 v46, vcc_lo, v46, s6
v_add_co_ci_u32_e32 v47, vcc_lo, s7, v47, vcc_lo
v_add_f64 v[42:43], v[42:43], v[1:2]
v_cmp_le_u32_e32 vcc_lo, s10, v41
s_or_b32 s3, vcc_lo, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_2
s_or_b32 exec_lo, exec_lo, s3
.LBB0_6:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
s_or_b32 exec_lo, exec_lo, s11
v_cmp_lt_u16_e64 s2, s5, 2
v_lshl_add_u32 v1, v0, 3, 0
s_and_b32 vcc_lo, exec_lo, s2
ds_store_b64 v1, v[42:43]
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_vccnz .LBB0_11
s_and_b32 s2, 0xffff, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_lshr_b32 s2, s2, 1
s_branch .LBB0_9
.p2align 6
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s3
s_lshr_b32 s3, s2, 1
s_cmp_gt_u32 s2, 1
s_mov_b32 s2, s3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_11
.LBB0_9:
s_mov_b32 s3, exec_lo
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB0_8
v_add_nc_u32_e32 v2, s2, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v2, v2, 3, 0
ds_load_b64 v[2:3], v2
ds_load_b64 v[4:5], v1
s_waitcnt lgkmcnt(0)
v_add_f64 v[2:3], v[2:3], v[4:5]
ds_store_b64 v1, v[2:3]
s_branch .LBB0_8
.LBB0_11:
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_13
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x0
s_mov_b32 s5, 0
v_mov_b32_e32 v2, 0
s_lshl_b64 s[2:3], s[4:5], 3
ds_load_b64 v[0:1], v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b64 v2, v[0:1], s[0:1]
.LBB0_13:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12computeValuePdPKdj
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 56
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12computeValuePdPKdj, .Lfunc_end0-_Z12computeValuePdPKdj
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12computeValuePdPKdj
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12computeValuePdPKdj.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 56
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
//#include <opencv2/opencv.hpp>
__global__ void mykernel(int *arr, int *stop){
int id = blockIdx.x * blockDim.x + threadIdx.x; //unique global id of thread
int numThreads = gridDim.x * blockDim.x; //total num threads in grid in x direction
int localsum = 0;
for (int i = id; i < *stop; i+= numThreads){
double tmp = sin(i*1.0);
double tmp2 = tmp*tmp;
int z = (int)(tmp2*10000.0);
localsum = (localsum + z) % 10000;
}
printf(" %d ", localsum);
arr[id] = localsum;
/*
if(id < *stop){
double tmp = sin(id*1.0);
double tmp2 = tmp*tmp;
int z = (int)(tmp2*10000.0);
arr[id] = z % 10000;
}
*/
}
int main(int argc, char *argv[]){
//assert(argc==2);
int stop = (int)atol(argv[1]);
assert(stop >= 1.0);
printf("Hello World!\n");
int blocks = 4;
int threads = 5;
int result = 0;
int *arr;
int arrsize;
if(blocks*threads < stop){
arrsize = blocks*threads;
}else{
arrsize = stop;
}
arr = (int *)malloc(sizeof(int)*arrsize); //memory in cpu
int *devarr;
int *devstop;
cudaMalloc((int**) &devarr , sizeof(int)*arrsize); //mem in gpu
cudaMalloc((int**) &devstop , sizeof(int)); //mem in gpu
cudaMemcpy(devarr, arr, sizeof(int)*arrsize, cudaMemcpyHostToDevice); //transfer
cudaMemcpy(devstop, &stop, sizeof(int), cudaMemcpyHostToDevice); //transfer
mykernel<<<blocks,threads>>>(devarr, devstop); //1,1 block, threads- launch config
cudaMemcpy(arr, devarr, sizeof(int)*arrsize, cudaMemcpyDeviceToHost);
printf("arrsize: %d\n", arrsize);
for(int i = 0; i<arrsize; i++){
//printf(" %d ", arr[i]);
result = (result + arr[i]) % 10000;
}
printf("PIN is: %d\n",result);
//scanf("%d%d", a, b);
free(arr);
cudaFree(devarr);
cudaFree(devstop);
return 0;
}
|
code for sm_80
Function : _Z8mykernelPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff0a7624 */
/* 0x000fe200078e00ff */
/*0020*/ ULDC.64 UR36, c[0x0][0x118] ; /* 0x0000460000247ab9 */
/* 0x000fe20000000a00 */
/*0030*/ IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff0b7624 */
/* 0x000fe200078e00ff */
/*0040*/ IADD3 R1, R1, -0x38, RZ ; /* 0xffffffc801017810 */
/* 0x000fc80007ffe0ff */
/*0050*/ LDG.E R3, [R10.64] ; /* 0x000000240a037981 */
/* 0x000ea2000c1e1900 */
/*0060*/ BSSY B0, 0x510 ; /* 0x000004a000007945 */
/* 0x000fe20003800000 */
/*0070*/ IADD3 R6, P2, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe20007f5e0ff */
/*0080*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fe200078e00ff */
/*0090*/ S2R R16, SR_CTAID.X ; /* 0x0000000000107919 */
/* 0x000e280000002500 */
/*00a0*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e240000002100 */
/*00b0*/ IMAD R16, R16, c[0x0][0x0], R5 ; /* 0x0000000010107a24 */
/* 0x001fca00078e0205 */
/*00c0*/ ISETP.GE.AND P0, PT, R16, R3, PT ; /* 0x000000031000720c */
/* 0x004fda0003f06270 */
/*00d0*/ @P0 BRA 0x500 ; /* 0x0000042000000947 */
/* 0x000fea0003800000 */
/*00e0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fe400078e00ff */
/*00f0*/ IMAD.MOV.U32 R3, RZ, RZ, R16 ; /* 0x000000ffff037224 */
/* 0x000fc800078e0010 */
/*0100*/ I2F.F64 R12, R3 ; /* 0x00000003000c7312 */
/* 0x000e220000201c00 */
/*0110*/ BSSY B1, 0x2a0 ; /* 0x0000018000017945 */
/* 0x000fe20003800000 */
/*0120*/ LOP3.LUT R0, R13, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff0d007812 */
/* 0x001fe400078ec0ff */
/*0130*/ ISETP.EQ.AND P1, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */
/* 0x000fe40003f22270 */
/*0140*/ ISETP.NE.AND P0, PT, R0, 0x7ff00000, PT ; /* 0x7ff000000000780c */
/* 0x000fda0003f05270 */
/*0150*/ @!P0 BRA P1, 0x270 ; /* 0x0000011000008947 */
/* 0x000fea0000800000 */
/*0160*/ DMUL R8, R12, c[0x2][0x0] ; /* 0x008000000c087a28 */
/* 0x000e080000000000 */
/*0170*/ DSETP.GE.AND P0, PT, |R12|, 2.14748364800000000000e+09, PT ; /* 0x41e000000c00742a */
/* 0x000fe40003f06200 */
/*0180*/ F2I.F64 R0, R8 ; /* 0x0000000800007311 */
/* 0x001e300000301100 */
/*0190*/ I2F.F64 R14, R0 ; /* 0x00000000000e7312 */
/* 0x001e220000201c00 */
/*01a0*/ STL [R1], R0 ; /* 0x0000000001007387 */
/* 0x0003e20000100800 */
/*01b0*/ DFMA R4, -R14, c[0x2][0x8], R12 ; /* 0x008002000e047a2b */
/* 0x001e0c000000010c */
/*01c0*/ DFMA R4, -R14, c[0x2][0x10], R4 ; /* 0x008004000e047a2b */
/* 0x001e0c0000000104 */
/*01d0*/ DFMA R4, -R14, c[0x2][0x18], R4 ; /* 0x008006000e047a2b */
/* 0x0012220000000104 */
/*01e0*/ @!P0 BRA 0x290 ; /* 0x000000a000008947 */
/* 0x000fea0003800000 */
/*01f0*/ BSSY B2, 0x230 ; /* 0x0000003000027945 */
/* 0x000fe20003800000 */
/*0200*/ MOV R0, 0x220 ; /* 0x0000022000007802 */
/* 0x002fc80000000f00 */
/*0210*/ CALL.REL.NOINC 0x640 ; /* 0x0000042000007944 */
/* 0x001fea0003c00000 */
/*0220*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0230*/ LDL R0, [R1] ; /* 0x0000000001007983 */
/* 0x0001620000100800 */
/*0240*/ IMAD.MOV.U32 R4, RZ, RZ, R18 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0012 */
/*0250*/ IMAD.MOV.U32 R5, RZ, RZ, R19 ; /* 0x000000ffff057224 */
/* 0x000fe200078e0013 */
/*0260*/ BRA 0x290 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*0270*/ DMUL R4, RZ, R12 ; /* 0x0000000cff047228 */
/* 0x0000620000000000 */
/*0280*/ IMAD.MOV.U32 R0, RZ, RZ, RZ ; /* 0x000000ffff007224 */
/* 0x000fca00078e00ff */
/*0290*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*02a0*/ IMAD.SHL.U32 R7, R0, 0x8, RZ ; /* 0x0000000800077824 */
/* 0x020fe200078e00ff */
/*02b0*/ LDG.E R28, [R10.64] ; /* 0x000000240a1c7981 */
/* 0x000ea2000c1e1900 */
/*02c0*/ IMAD.MOV.U32 R26, RZ, RZ, 0x8 ; /* 0x00000008ff1a7424 */
/* 0x000fc600078e00ff */
/*02d0*/ LOP3.LUT R7, R7, 0x8, RZ, 0xc0, !PT ; /* 0x0000000807077812 */
/* 0x000fca00078ec0ff */
/*02e0*/ IMAD.WIDE R26, R7, R26, c[0x4][0x10] ; /* 0x01000400071a7625 */
/* 0x000fca00078e021a */
/*02f0*/ LDG.E.64.CONSTANT R22, [R26.64+0x8] ; /* 0x000008241a167981 */
/* 0x000728000c1e9b00 */
/*0300*/ LDG.E.64.CONSTANT R20, [R26.64+0x10] ; /* 0x000010241a147981 */
/* 0x0006a8000c1e9b00 */
/*0310*/ LDG.E.64.CONSTANT R18, [R26.64+0x18] ; /* 0x000018241a127981 */
/* 0x0006a8000c1e9b00 */
/*0320*/ LDG.E.64.CONSTANT R14, [R26.64+0x20] ; /* 0x000020241a0e7981 */
/* 0x0026a8000c1e9b00 */
/*0330*/ LDG.E.64.CONSTANT R12, [R26.64+0x28] ; /* 0x000028241a0c7981 */
/* 0x0016a8000c1e9b00 */
/*0340*/ LDG.E.64.CONSTANT R8, [R26.64+0x30] ; /* 0x000030241a087981 */
/* 0x0006a2000c1e9b00 */
/*0350*/ R2P PR, R0, 0x3 ; /* 0x0000000300007804 */
/* 0x000fe20000000000 */
/*0360*/ IMAD.MOV.U32 R24, RZ, RZ, 0x79785eba ; /* 0x79785ebaff187424 */
/* 0x000fc400078e00ff */
/*0370*/ IMAD.MOV.U32 R0, RZ, RZ, 0x3de5db65 ; /* 0x3de5db65ff007424 */
/* 0x000fc600078e00ff */
/*0380*/ FSEL R24, -R24, 4.2945490664224492434e-19, !P0 ; /* 0x20fd816418187808 */
/* 0x000fe40004000100 */
/*0390*/ FSEL R25, R0, -0.082518599927425384521, !P0 ; /* 0xbda8ff8300197808 */
/* 0x000fe20004000000 */
/*03a0*/ DMUL R26, R4, R4 ; /* 0x00000004041a7228 */
/* 0x008f0c0000000000 */
/*03b0*/ DFMA R22, R26, R24, R22 ; /* 0x000000181a16722b */
/* 0x010e8c0000000016 */
/*03c0*/ DFMA R20, R26, R22, R20 ; /* 0x000000161a14722b */
/* 0x004e0c0000000014 */
/*03d0*/ DFMA R18, R26, R20, R18 ; /* 0x000000141a12722b */
/* 0x001e0c0000000012 */
/*03e0*/ DFMA R14, R26, R18, R14 ; /* 0x000000121a0e722b */
/* 0x001e0c000000000e */
/*03f0*/ DFMA R12, R26, R14, R12 ; /* 0x0000000e1a0c722b */
/* 0x001e0c000000000c */
/*0400*/ DFMA R8, R26, R12, R8 ; /* 0x0000000c1a08722b */
/* 0x001e0c0000000008 */
/*0410*/ DFMA R4, R8, R4, R4 ; /* 0x000000040804722b */
/* 0x001fc80000000004 */
/*0420*/ @P0 DFMA R4, R26, R8, 1 ; /* 0x3ff000001a04042b */
/* 0x000e0c0000000008 */
/*0430*/ @P1 DFMA R4, R4, -1, RZ ; /* 0xbff000000404182b */
/* 0x001e0c00000000ff */
/*0440*/ DMUL R4, R4, R4 ; /* 0x0000000404047228 */
/* 0x001e0c0000000000 */
/*0450*/ DMUL R4, R4, 10000 ; /* 0x40c3880004047828 */
/* 0x001e140000000000 */
/*0460*/ F2I.F64.TRUNC R5, R4 ; /* 0x0000000400057311 */
/* 0x001e22000030d100 */
/*0470*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0xc] ; /* 0x00000300ff087624 */
/* 0x000fc800078e00ff */
/*0480*/ IMAD R3, R8, c[0x0][0x0], R3 ; /* 0x0000000008037a24 */
/* 0x000fca00078e0203 */
/*0490*/ ISETP.GE.AND P0, PT, R3, R28, PT ; /* 0x0000001c0300720c */
/* 0x000fe20003f06270 */
/*04a0*/ IMAD.IADD R2, R5, 0x1, R2 ; /* 0x0000000105027824 */
/* 0x001fc800078e0202 */
/*04b0*/ IMAD.HI R0, R2, 0x68db8bad, RZ ; /* 0x68db8bad02007827 */
/* 0x000fca00078e02ff */
/*04c0*/ SHF.R.U32.HI R7, RZ, 0x1f, R0 ; /* 0x0000001fff077819 */
/* 0x000fc80000011600 */
/*04d0*/ LEA.HI.SX32 R7, R0, R7, 0x14 ; /* 0x0000000700077211 */
/* 0x000fca00078fa2ff */
/*04e0*/ IMAD R2, R7, -0x2710, R2 ; /* 0xffffd8f007027824 */
/* 0x000fe200078e0202 */
/*04f0*/ @!P0 BRA 0x100 ; /* 0xfffffc0000008947 */
/* 0x000fea000383ffff */
/*0500*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0510*/ MOV R8, 0x0 ; /* 0x0000000000087802 */
/* 0x000fe20000000f00 */
/*0520*/ STL [R1+0x8], R2 ; /* 0x0000080201007387 */
/* 0x0001e20000100800 */
/*0530*/ IADD3 R6, P0, R6, 0x8, RZ ; /* 0x0000000806067810 */
/* 0x000fe20007f1e0ff */
/*0540*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe400078e00ff */
/*0550*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0560*/ LDC.64 R8, c[0x4][R8] ; /* 0x0100000008087b82 */
/* 0x000e620000000a00 */
/*0570*/ IADD3.X R7, RZ, c[0x0][0x24], RZ, P0, P2 ; /* 0x00000900ff077a10 */
/* 0x000fce00007e44ff */
/*0580*/ LEPC R10 ; /* 0x00000000000a734e */
/* 0x000fe20000000000 */
/*0590*/ MOV R3, 0x600 ; /* 0x0000060000037802 */
/* 0x000fe40000000f00 */
/*05a0*/ MOV R20, 0x580 ; /* 0x0000058000147802 */
/* 0x000fc40000000f00 */
/*05b0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*05c0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*05d0*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */
/* 0x000fc8000791e10a */
/*05e0*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e250b */
/*05f0*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */
/* 0x003fea0003c00000 */
/*0600*/ IMAD.MOV.U32 R17, RZ, RZ, 0x4 ; /* 0x00000004ff117424 */
/* 0x000fc800078e00ff */
/*0610*/ IMAD.WIDE R16, R16, R17, c[0x0][0x160] ; /* 0x0000580010107625 */
/* 0x000fca00078e0211 */
/*0620*/ STG.E [R16.64], R2 ; /* 0x0000000210007986 */
/* 0x000fe2000c101924 */
/*0630*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0640*/ SHF.R.U32.HI R4, RZ, 0x14, R13.reuse ; /* 0x00000014ff047819 */
/* 0x100fe2000001160d */
/*0650*/ IMAD.MOV.U32 R18, RZ, RZ, R12 ; /* 0x000000ffff127224 */
/* 0x000fe400078e000c */
/*0660*/ IMAD.MOV.U32 R19, RZ, RZ, R13 ; /* 0x000000ffff137224 */
/* 0x000fe200078e000d */
/*0670*/ LOP3.LUT R4, R4, 0x7ff, RZ, 0xc0, !PT ; /* 0x000007ff04047812 */
/* 0x000fc800078ec0ff */
/*0680*/ ISETP.NE.AND P0, PT, R4, 0x7ff, PT ; /* 0x000007ff0400780c */
/* 0x000fda0003f05270 */
/*0690*/ @!P0 BRA 0xfb0 ; /* 0x0000091000008947 */
/* 0x000fea0003800000 */
/*06a0*/ IADD3 R8, R4, -0x400, RZ ; /* 0xfffffc0004087810 */
/* 0x000fe20007ffe0ff */
/*06b0*/ BSSY B3, 0x9d0 ; /* 0x0000031000037945 */
/* 0x000fe20003800000 */
/*06c0*/ CS2R R14, SRZ ; /* 0x00000000000e7805 */
/* 0x000fe4000001ff00 */
/*06d0*/ SHF.R.U32.HI R7, RZ, 0x6, R8 ; /* 0x00000006ff077819 */
/* 0x000fc80000011608 */
/*06e0*/ IADD3 R5, -R7.reuse, 0x10, RZ ; /* 0x0000001007057810 */
/* 0x040fe40007ffe1ff */
/*06f0*/ IADD3 R4, -R7, 0x13, RZ ; /* 0x0000001307047810 */
/* 0x000fe40007ffe1ff */
/*0700*/ ISETP.GT.AND P0, PT, R5, 0xe, PT ; /* 0x0000000e0500780c */
/* 0x000fe40003f04270 */
/*0710*/ IADD3 R7, -R7, 0xf, RZ ; /* 0x0000000f07077810 */
/* 0x000fe40007ffe1ff */
/*0720*/ SEL R4, R4, 0x12, !P0 ; /* 0x0000001204047807 */
/* 0x000fc60004000000 */
/*0730*/ IMAD.MOV.U32 R17, RZ, RZ, R7 ; /* 0x000000ffff117224 */
/* 0x000fe200078e0007 */
/*0740*/ ISETP.GT.AND P0, PT, R5, R4, PT ; /* 0x000000040500720c */
/* 0x000fe40003f04270 */
/*0750*/ LOP3.LUT P3, R5, R8, 0x3f, RZ, 0xc0, !PT ; /* 0x0000003f08057812 */
/* 0x000fe4000786c0ff */
/*0760*/ IADD3 R8, R1, 0x10, RZ ; /* 0x0000001001087810 */
/* 0x000fd20007ffe0ff */
/*0770*/ @P0 BRA 0x9c0 ; /* 0x0000024000000947 */
/* 0x000fea0003800000 */
/*0780*/ IMAD.MOV.U32 R20, RZ, RZ, 0x8 ; /* 0x00000008ff147424 */
/* 0x000fe200078e00ff */
/*0790*/ SHF.L.U64.HI R9, R18.reuse, 0xb, R19 ; /* 0x0000000b12097819 */
/* 0x040fe20000010213 */
/*07a0*/ IMAD.SHL.U32 R23, R18, 0x800, RZ ; /* 0x0000080012177824 */
/* 0x000fe200078e00ff */
/*07b0*/ CS2R R14, SRZ ; /* 0x00000000000e7805 */
/* 0x000fe2000001ff00 */
/*07c0*/ IMAD.WIDE R20, R7, R20, c[0x4][0x18] ; /* 0x0100060007147625 */
/* 0x000fe200078e0214 */
/*07d0*/ LOP3.LUT R9, R9, 0x80000000, RZ, 0xfc, !PT ; /* 0x8000000009097812 */
/* 0x000fc600078efcff */
/*07e0*/ IMAD.MOV.U32 R25, RZ, RZ, R21 ; /* 0x000000ffff197224 */
/* 0x000fe400078e0015 */
/*07f0*/ IMAD.MOV.U32 R21, RZ, RZ, R8 ; /* 0x000000ffff157224 */
/* 0x000fe400078e0008 */
/*0800*/ IMAD.MOV.U32 R17, RZ, RZ, R7 ; /* 0x000000ffff117224 */
/* 0x000fe400078e0007 */
/*0810*/ IMAD.MOV.U32 R18, RZ, RZ, R20 ; /* 0x000000ffff127224 */
/* 0x000fe200078e0014 */
/*0820*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0830*/ IMAD.MOV.U32 R19, RZ, RZ, R25 ; /* 0x000000ffff137224 */
/* 0x000fcc00078e0019 */
/*0840*/ LDG.E.64.CONSTANT R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ea2000c1e9b00 */
/*0850*/ IADD3 R17, R17, 0x1, RZ ; /* 0x0000000111117810 */
/* 0x000fe20007ffe0ff */
/*0860*/ IMAD.WIDE.U32 R14, P4, R18, R23, R14 ; /* 0x00000017120e7225 */
/* 0x004fc8000788000e */
/*0870*/ IMAD R27, R18, R9, RZ ; /* 0x00000009121b7224 */
/* 0x000fe400078e02ff */
/*0880*/ IMAD R22, R19, R23, RZ ; /* 0x0000001713167224 */
/* 0x000fc600078e02ff */
/*0890*/ IADD3 R15, P0, R27, R15, RZ ; /* 0x0000000f1b0f7210 */
/* 0x000fe20007f1e0ff */
/*08a0*/ IMAD.HI.U32 R27, R19, R23, RZ ; /* 0x00000017131b7227 */
/* 0x000fc600078e00ff */
/*08b0*/ IADD3 R15, P1, R22, R15, RZ ; /* 0x0000000f160f7210 */
/* 0x000fe20007f3e0ff */
/*08c0*/ IMAD.HI.U32 R22, R18, R9, RZ ; /* 0x0000000912167227 */
/* 0x000fc800078e00ff */
/*08d0*/ IMAD.X R22, RZ, RZ, R22, P4 ; /* 0x000000ffff167224 */
/* 0x000fe200020e0616 */
/*08e0*/ ISETP.GE.AND P4, PT, R17, R4, PT ; /* 0x000000041100720c */
/* 0x000fe20003f86270 */
/*08f0*/ IMAD.HI.U32 R18, R19, R9.reuse, RZ ; /* 0x0000000913127227 */
/* 0x080fe200078e00ff */
/*0900*/ STL.64 [R21], R14 ; /* 0x0000000e15007387 */
/* 0x0001e40000100a00 */
/*0910*/ IADD3.X R22, P0, R27, R22, RZ, P0, !PT ; /* 0x000000161b167210 */
/* 0x000fe2000071e4ff */
/*0920*/ IMAD R19, R19, R9, RZ ; /* 0x0000000913137224 */
/* 0x000fc800078e02ff */
/*0930*/ IMAD.X R18, RZ, RZ, R18, P0 ; /* 0x000000ffff127224 */
/* 0x000fe200000e0612 */
/*0940*/ IADD3.X R22, P1, R19, R22, RZ, P1, !PT ; /* 0x0000001613167210 */
/* 0x000fe40000f3e4ff */
/*0950*/ IADD3 R20, P0, R20, 0x8, RZ ; /* 0x0000000814147810 */
/* 0x000fe40007f1e0ff */
/*0960*/ IADD3 R21, R21, 0x8, RZ ; /* 0x0000000815157810 */
/* 0x001fe20007ffe0ff */
/*0970*/ IMAD.X R19, RZ, RZ, R18, P1 ; /* 0x000000ffff137224 */
/* 0x000fe400008e0612 */
/*0980*/ IMAD.X R25, RZ, RZ, R25, P0 ; /* 0x000000ffff197224 */
/* 0x000fe400000e0619 */
/*0990*/ IMAD.MOV.U32 R14, RZ, RZ, R22 ; /* 0x000000ffff0e7224 */
/* 0x000fc400078e0016 */
/*09a0*/ IMAD.MOV.U32 R15, RZ, RZ, R19 ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e0013 */
/*09b0*/ @!P4 BRA 0x810 ; /* 0xfffffe500000c947 */
/* 0x000fea000383ffff */
/*09c0*/ BSYNC B3 ; /* 0x0000000000037941 */
/* 0x000fea0003800000 */
/*09d0*/ IMAD.IADD R7, R17, 0x1, -R7 ; /* 0x0000000111077824 */
/* 0x000fc800078e0a07 */
/*09e0*/ IMAD R23, R7, 0x8, R8 ; /* 0x0000000807177824 */
/* 0x000fca00078e0208 */
/*09f0*/ STL.64 [R23], R14 ; /* 0x0000000e17007387 */
/* 0x0001e80000100a00 */
/*0a00*/ LDL.64 R18, [R1+0x20] ; /* 0x0000200001127983 */
/* 0x000ea80000100a00 */
/*0a10*/ @P3 LDL.64 R20, [R1+0x18] ; /* 0x0000180001143983 */
/* 0x000ee80000100a00 */
/*0a20*/ LDL.64 R8, [R1+0x28] ; /* 0x0000280001087983 */
/* 0x000f220000100a00 */
/*0a30*/ @P3 IADD3 R4, -R5, 0x40, RZ ; /* 0x0000004005043810 */
/* 0x000fe20007ffe1ff */
/*0a40*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0a50*/ @P3 SHF.L.U32 R12, R18, R5, RZ ; /* 0x00000005120c3219 */
/* 0x004fc400000006ff */
/*0a60*/ @P3 SHF.L.U64.HI R17, R18, R5.reuse, R19 ; /* 0x0000000512113219 */
/* 0x080fe40000010213 */
/*0a70*/ @P3 SHF.R.U64 R7, R20, R4.reuse, R21.reuse ; /* 0x0000000414073219 */
/* 0x188fe40000001215 */
/*0a80*/ @P3 SHF.R.U32.HI R20, RZ, R4.reuse, R21 ; /* 0x00000004ff143219 */
/* 0x080fe40000011615 */
/*0a90*/ @P3 SHF.R.U64 R21, R18, R4, R19 ; /* 0x0000000412153219 */
/* 0x000fe40000001213 */
/*0aa0*/ @P3 LOP3.LUT R18, R7, R12, RZ, 0xfc, !PT ; /* 0x0000000c07123212 */
/* 0x000fe400078efcff */
/*0ab0*/ @P3 SHF.L.U32 R12, R8, R5, RZ ; /* 0x00000005080c3219 */
/* 0x010fc400000006ff */
/*0ac0*/ @P3 SHF.R.U32.HI R4, RZ, R4, R19 ; /* 0x00000004ff043219 */
/* 0x000fe40000011613 */
/*0ad0*/ @P3 SHF.L.U64.HI R5, R8, R5, R9 ; /* 0x0000000508053219 */
/* 0x000fe40000010209 */
/*0ae0*/ @P3 LOP3.LUT R19, R20, R17, RZ, 0xfc, !PT ; /* 0x0000001114133212 */
/* 0x000fe400078efcff */
/*0af0*/ @P3 LOP3.LUT R8, R12, R21, RZ, 0xfc, !PT ; /* 0x000000150c083212 */
/* 0x000fe400078efcff */
/*0b00*/ SHF.L.U64.HI R12, R18.reuse, 0x2, R19.reuse ; /* 0x00000002120c7819 */
/* 0x140fe20000010213 */
/*0b10*/ IMAD.SHL.U32 R18, R18, 0x4, RZ ; /* 0x0000000412127824 */
/* 0x000fe200078e00ff */
/*0b20*/ SHF.R.U32.HI R19, RZ, 0x1e, R19 ; /* 0x0000001eff137819 */
/* 0x000fe20000011613 */
/*0b30*/ IMAD.SHL.U32 R14, R8, 0x4, RZ ; /* 0x00000004080e7824 */
/* 0x001fe200078e00ff */
/*0b40*/ @P3 LOP3.LUT R9, R5, R4, RZ, 0xfc, !PT ; /* 0x0000000405093212 */
/* 0x000fc400078efcff */
/*0b50*/ IADD3 RZ, P0, RZ, -R18, RZ ; /* 0x80000012ffff7210 */
/* 0x000fe40007f1e0ff */
/*0b60*/ LOP3.LUT R4, RZ, R12, RZ, 0x33, !PT ; /* 0x0000000cff047212 */
/* 0x000fe400078e33ff */
/*0b70*/ LOP3.LUT R14, R19, R14, RZ, 0xfc, !PT ; /* 0x0000000e130e7212 */
/* 0x000fe400078efcff */
/*0b80*/ SHF.L.U64.HI R8, R8, 0x2, R9 ; /* 0x0000000208087819 */
/* 0x000fe40000010209 */
/*0b90*/ IADD3.X R15, P0, RZ, R4, RZ, P0, !PT ; /* 0x00000004ff0f7210 */
/* 0x000fe4000071e4ff */
/*0ba0*/ LOP3.LUT R5, RZ, R14, RZ, 0x33, !PT ; /* 0x0000000eff057212 */
/* 0x000fc400078e33ff */
/*0bb0*/ LOP3.LUT R7, RZ, R8, RZ, 0x33, !PT ; /* 0x00000008ff077212 */
/* 0x000fe400078e33ff */
/*0bc0*/ IADD3.X R5, P0, RZ, R5, RZ, P0, !PT ; /* 0x00000005ff057210 */
/* 0x000fe4000071e4ff */
/*0bd0*/ SHF.R.U32.HI R4, RZ, 0x1d, R9 ; /* 0x0000001dff047819 */
/* 0x000fc60000011609 */
/*0be0*/ IMAD.X R7, RZ, RZ, R7, P0 ; /* 0x000000ffff077224 */
/* 0x000fe200000e0607 */
/*0bf0*/ LOP3.LUT P1, RZ, R4.reuse, 0x1, RZ, 0xc0, !PT ; /* 0x0000000104ff7812 */
/* 0x040fe4000782c0ff */
/*0c00*/ LOP3.LUT R4, R4, 0x1, RZ, 0xc0, !PT ; /* 0x0000000104047812 */
/* 0x000fe400078ec0ff */
/*0c10*/ SEL R8, R8, R7, !P1 ; /* 0x0000000708087207 */
/* 0x000fe40004800000 */
/*0c20*/ SEL R7, R14, R5, !P1 ; /* 0x000000050e077207 */
/* 0x000fe40004800000 */
/*0c30*/ ISETP.NE.U32.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f05070 */
/*0c40*/ SEL R12, R12, R15, !P1 ; /* 0x0000000f0c0c7207 */
/* 0x000fc40004800000 */
/*0c50*/ SEL R14, R7, R8, !P0 ; /* 0x00000008070e7207 */
/* 0x000fe20004000000 */
/*0c60*/ @P1 IMAD.MOV R18, RZ, RZ, -R18 ; /* 0x000000ffff121224 */
/* 0x000fe200078e0a12 */
/*0c70*/ LEA.HI R4, R9, R4, RZ, 0x2 ; /* 0x0000000409047211 */
/* 0x000fe400078f10ff */
/*0c80*/ IADD3 R9, R6, -c[0x0][0x20], RZ ; /* 0x8000080006097a10 */
/* 0x000fe40007ffe0ff */
/*0c90*/ FLO.U32 R14, R14 ; /* 0x0000000e000e7300 */
/* 0x000e2400000e0000 */
/*0ca0*/ IADD3 R17, -R14.reuse, 0x1f, RZ ; /* 0x0000001f0e117810 */
/* 0x041fe40007ffe1ff */
/*0cb0*/ IADD3 R5, -R14, 0x3f, RZ ; /* 0x0000003f0e057810 */
/* 0x000fc60007ffe1ff */
/*0cc0*/ @P0 IMAD.MOV R5, RZ, RZ, R17 ; /* 0x000000ffff050224 */
/* 0x000fca00078e0211 */
/*0cd0*/ ISETP.NE.U32.AND P0, PT, R5.reuse, RZ, PT ; /* 0x000000ff0500720c */
/* 0x040fe40003f05070 */
/*0ce0*/ IADD3 R15, -R5, 0x40, RZ ; /* 0x00000040050f7810 */
/* 0x000fe40007ffe1ff */
/*0cf0*/ ISETP.NE.AND.EX P0, PT, RZ, RZ, PT, P0 ; /* 0x000000ffff00720c */
/* 0x000fe40003f05300 */
/*0d00*/ SHF.L.U32 R17, R7.reuse, R5, RZ ; /* 0x0000000507117219 */
/* 0x040fe400000006ff */
/*0d10*/ SHF.R.U64 R18, R18, R15, R12 ; /* 0x0000000f12127219 */
/* 0x000fe4000000120c */
/*0d20*/ SHF.L.U64.HI R21, R7, R5, R8 ; /* 0x0000000507157219 */
/* 0x000fc40000010208 */
/*0d30*/ SHF.R.U32.HI R12, RZ, R15, R12 ; /* 0x0000000fff0c7219 */
/* 0x000fe2000001160c */
/*0d40*/ IMAD.MOV.U32 R15, RZ, RZ, RZ ; /* 0x000000ffff0f7224 */
/* 0x000fc800078e00ff */
/*0d50*/ @P0 LOP3.LUT R7, R18, R17, RZ, 0xfc, !PT ; /* 0x0000001112070212 */
/* 0x000fe400078efcff */
/*0d60*/ @P0 LOP3.LUT R8, R12, R21, RZ, 0xfc, !PT ; /* 0x000000150c080212 */
/* 0x000fc600078efcff */
/*0d70*/ IMAD.WIDE.U32 R18, R7, 0x2168c235, RZ ; /* 0x2168c23507127825 */
/* 0x000fc800078e00ff */
/*0d80*/ IMAD.MOV.U32 R14, RZ, RZ, R19 ; /* 0x000000ffff0e7224 */
/* 0x000fe200078e0013 */
/*0d90*/ IADD3 RZ, P0, R18, R18, RZ ; /* 0x0000001212ff7210 */
/* 0x000fe20007f1e0ff */
/*0da0*/ IMAD R17, R8, -0x36f0255e, RZ ; /* 0xc90fdaa208117824 */
/* 0x000fe400078e02ff */
/*0db0*/ IMAD.WIDE.U32 R14, R7, -0x36f0255e, R14 ; /* 0xc90fdaa2070e7825 */
/* 0x000fc800078e000e */
/*0dc0*/ IMAD.HI.U32 R7, R8, -0x36f0255e, RZ ; /* 0xc90fdaa208077827 */
/* 0x000fc800078e00ff */
/*0dd0*/ IMAD.WIDE.U32 R14, P3, R8, 0x2168c235, R14 ; /* 0x2168c235080e7825 */
/* 0x000fc8000786000e */
/*0de0*/ IMAD.X R7, RZ, RZ, R7, P3 ; /* 0x000000ffff077224 */
/* 0x000fe200018e0607 */
/*0df0*/ IADD3 R8, P3, R17, R15, RZ ; /* 0x0000000f11087210 */
/* 0x000fe40007f7e0ff */
/*0e00*/ IADD3.X RZ, P0, R14, R14, RZ, P0, !PT ; /* 0x0000000e0eff7210 */
/* 0x000fe4000071e4ff */
/*0e10*/ ISETP.GT.U32.AND P4, PT, R8.reuse, RZ, PT ; /* 0x000000ff0800720c */
/* 0x040fe20003f84070 */
/*0e20*/ IMAD.X R7, RZ, RZ, R7, P3 ; /* 0x000000ffff077224 */
/* 0x000fe200018e0607 */
/*0e30*/ IADD3.X R15, P3, R8, R8, RZ, P0, !PT ; /* 0x00000008080f7210 */
/* 0x000fc8000077e4ff */
/*0e40*/ ISETP.GT.AND.EX P0, PT, R7.reuse, RZ, PT, P4 ; /* 0x000000ff0700720c */
/* 0x040fe20003f04340 */
/*0e50*/ IMAD.X R12, R7, 0x1, R7, P3 ; /* 0x00000001070c7824 */
/* 0x000fe200018e0607 */
/*0e60*/ LOP3.LUT P3, RZ, R13, 0x80000000, RZ, 0xc0, !PT ; /* 0x800000000dff7812 */
/* 0x000fe4000786c0ff */
/*0e70*/ SEL R15, R15, R8, P0 ; /* 0x000000080f0f7207 */
/* 0x000fe20000000000 */
/*0e80*/ IMAD.MOV R8, RZ, RZ, -R4 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0a04 */
/*0e90*/ SEL R7, R12, R7, P0 ; /* 0x000000070c077207 */
/* 0x000fe40000000000 */
/*0ea0*/ IADD3 R18, P4, R15, 0x1, RZ ; /* 0x000000010f127810 */
/* 0x000fe40007f9e0ff */
/*0eb0*/ LOP3.LUT R13, R13, 0x80000000, RZ, 0xc0, !PT ; /* 0x800000000d0d7812 */
/* 0x000fc600078ec0ff */
/*0ec0*/ IMAD.X R7, RZ, RZ, R7, P4 ; /* 0x000000ffff077224 */
/* 0x000fe200020e0607 */
/*0ed0*/ @P1 LOP3.LUT R13, R13, 0x80000000, RZ, 0x3c, !PT ; /* 0x800000000d0d1812 */
/* 0x000fe200078e3cff */
/*0ee0*/ @P3 IMAD.MOV.U32 R4, RZ, RZ, R8 ; /* 0x000000ffff043224 */
/* 0x000fe200078e0008 */
/*0ef0*/ SEL R8, RZ, 0x1, !P0 ; /* 0x00000001ff087807 */
/* 0x000fe40004000000 */
/*0f00*/ SHF.R.U64 R18, R18, 0xa, R7 ; /* 0x0000000a12127819 */
/* 0x000fe40000001207 */
/*0f10*/ STL [R9], R4 ; /* 0x0000000409007387 */
/* 0x0001e20000100800 */
/*0f20*/ IMAD.IADD R5, R8, 0x1, R5 ; /* 0x0000000108057824 */
/* 0x000fe200078e0205 */
/*0f30*/ IADD3 R18, P3, R18, 0x1, RZ ; /* 0x0000000112127810 */
/* 0x000fc60007f7e0ff */
/*0f40*/ IMAD.SHL.U32 R5, R5, 0x100000, RZ ; /* 0x0010000005057824 */
/* 0x000fe200078e00ff */
/*0f50*/ LEA.HI.X R7, R7, RZ, RZ, 0x16, P3 ; /* 0x000000ff07077211 */
/* 0x000fc800018fb4ff */
/*0f60*/ SHF.R.U64 R18, R18, 0x1, R7.reuse ; /* 0x0000000112127819 */
/* 0x100fe40000001207 */
/*0f70*/ SHF.R.U32.HI R8, RZ, 0x1, R7 ; /* 0x00000001ff087819 */
/* 0x000fe40000011607 */
/*0f80*/ IADD3 R18, P0, P3, R18, -UR4, RZ ; /* 0x8000000412127c10 */
/* 0x000fc8000fb1e0ff */
/*0f90*/ IADD3.X R8, R8, 0x3fe00000, ~R5, P0, P3 ; /* 0x3fe0000008087810 */
/* 0x000fc800007e6c05 */
/*0fa0*/ LOP3.LUT R19, R8, R13, RZ, 0xfc, !PT ; /* 0x0000000d08137212 */
/* 0x000fe400078efcff */
/*0fb0*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0000 */
/*0fc0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */
/* 0x000fc800078e00ff */
/*0fd0*/ RET.REL.NODEC R4 0x0 ; /* 0xfffff02004007950 */
/* 0x000fea0003c3ffff */
/*0fe0*/ BRA 0xfe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0ff0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1000*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1010*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1020*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
//#include <opencv2/opencv.hpp>
__global__ void mykernel(int *arr, int *stop){
int id = blockIdx.x * blockDim.x + threadIdx.x; //unique global id of thread
int numThreads = gridDim.x * blockDim.x; //total num threads in grid in x direction
int localsum = 0;
for (int i = id; i < *stop; i+= numThreads){
double tmp = sin(i*1.0);
double tmp2 = tmp*tmp;
int z = (int)(tmp2*10000.0);
localsum = (localsum + z) % 10000;
}
printf(" %d ", localsum);
arr[id] = localsum;
/*
if(id < *stop){
double tmp = sin(id*1.0);
double tmp2 = tmp*tmp;
int z = (int)(tmp2*10000.0);
arr[id] = z % 10000;
}
*/
}
int main(int argc, char *argv[]){
//assert(argc==2);
int stop = (int)atol(argv[1]);
assert(stop >= 1.0);
printf("Hello World!\n");
int blocks = 4;
int threads = 5;
int result = 0;
int *arr;
int arrsize;
if(blocks*threads < stop){
arrsize = blocks*threads;
}else{
arrsize = stop;
}
arr = (int *)malloc(sizeof(int)*arrsize); //memory in cpu
int *devarr;
int *devstop;
cudaMalloc((int**) &devarr , sizeof(int)*arrsize); //mem in gpu
cudaMalloc((int**) &devstop , sizeof(int)); //mem in gpu
cudaMemcpy(devarr, arr, sizeof(int)*arrsize, cudaMemcpyHostToDevice); //transfer
cudaMemcpy(devstop, &stop, sizeof(int), cudaMemcpyHostToDevice); //transfer
mykernel<<<blocks,threads>>>(devarr, devstop); //1,1 block, threads- launch config
cudaMemcpy(arr, devarr, sizeof(int)*arrsize, cudaMemcpyDeviceToHost);
printf("arrsize: %d\n", arrsize);
for(int i = 0; i<arrsize; i++){
//printf(" %d ", arr[i]);
result = (result + arr[i]) % 10000;
}
printf("PIN is: %d\n",result);
//scanf("%d%d", a, b);
free(arr);
cudaFree(devarr);
cudaFree(devstop);
return 0;
}
|
.file "tmpxft_0016be57_00000000-6_pin_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z8mykernelPiS_PiS_
.type _Z29__device_stub__Z8mykernelPiS_PiS_, @function
_Z29__device_stub__Z8mykernelPiS_PiS_:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z8mykernelPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z29__device_stub__Z8mykernelPiS_PiS_, .-_Z29__device_stub__Z8mykernelPiS_PiS_
.globl _Z8mykernelPiS_
.type _Z8mykernelPiS_, @function
_Z8mykernelPiS_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z8mykernelPiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z8mykernelPiS_, .-_Z8mykernelPiS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello World!\n"
.LC1:
.string "arrsize: %d\n"
.LC2:
.string "PIN is: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 12(%rsp)
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 12(%rsp), %r13d
movl $20, %ebp
cmpl %ebp, %r13d
cmovle %r13d, %ebp
movslq %ebp, %r12
salq $2, %r12
movq %r12, %rdi
call malloc@PLT
movq %rax, %rbx
leaq 16(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $1, %ecx
movq %r12, %rdx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 12(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $5, 44(%rsp)
movl $1, 48(%rsp)
movl $4, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L12:
movl $2, %ecx
movq %r12, %rdx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl %ebp, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %r13d, %r13d
jle .L16
movl $0, %eax
movl $0, %edx
.L14:
movl %edx, %ecx
addl (%rbx,%rax,4), %ecx
movslq %ecx, %rdx
imulq $1759218605, %rdx, %rdx
sarq $44, %rdx
movl %ecx, %esi
sarl $31, %esi
subl %esi, %edx
imull $10000, %edx, %esi
subl %esi, %ecx
movl %ecx, %edx
addq $1, %rax
cmpl %eax, %ebp
jg .L14
.L13:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdi
call free@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z29__device_stub__Z8mykernelPiS_PiS_
jmp .L12
.L16:
movl $0, %edx
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z8mykernelPiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z8mykernelPiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
//#include <opencv2/opencv.hpp>
__global__ void mykernel(int *arr, int *stop){
int id = blockIdx.x * blockDim.x + threadIdx.x; //unique global id of thread
int numThreads = gridDim.x * blockDim.x; //total num threads in grid in x direction
int localsum = 0;
for (int i = id; i < *stop; i+= numThreads){
double tmp = sin(i*1.0);
double tmp2 = tmp*tmp;
int z = (int)(tmp2*10000.0);
localsum = (localsum + z) % 10000;
}
printf(" %d ", localsum);
arr[id] = localsum;
/*
if(id < *stop){
double tmp = sin(id*1.0);
double tmp2 = tmp*tmp;
int z = (int)(tmp2*10000.0);
arr[id] = z % 10000;
}
*/
}
int main(int argc, char *argv[]){
//assert(argc==2);
int stop = (int)atol(argv[1]);
assert(stop >= 1.0);
printf("Hello World!\n");
int blocks = 4;
int threads = 5;
int result = 0;
int *arr;
int arrsize;
if(blocks*threads < stop){
arrsize = blocks*threads;
}else{
arrsize = stop;
}
arr = (int *)malloc(sizeof(int)*arrsize); //memory in cpu
int *devarr;
int *devstop;
cudaMalloc((int**) &devarr , sizeof(int)*arrsize); //mem in gpu
cudaMalloc((int**) &devstop , sizeof(int)); //mem in gpu
cudaMemcpy(devarr, arr, sizeof(int)*arrsize, cudaMemcpyHostToDevice); //transfer
cudaMemcpy(devstop, &stop, sizeof(int), cudaMemcpyHostToDevice); //transfer
mykernel<<<blocks,threads>>>(devarr, devstop); //1,1 block, threads- launch config
cudaMemcpy(arr, devarr, sizeof(int)*arrsize, cudaMemcpyDeviceToHost);
printf("arrsize: %d\n", arrsize);
for(int i = 0; i<arrsize; i++){
//printf(" %d ", arr[i]);
result = (result + arr[i]) % 10000;
}
printf("PIN is: %d\n",result);
//scanf("%d%d", a, b);
free(arr);
cudaFree(devarr);
cudaFree(devstop);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
//#include <opencv2/opencv.hpp>
__global__ void mykernel(int *arr, int *stop){
int id = blockIdx.x * blockDim.x + threadIdx.x; //unique global id of thread
int numThreads = gridDim.x * blockDim.x; //total num threads in grid in x direction
int localsum = 0;
for (int i = id; i < *stop; i+= numThreads){
double tmp = sin(i*1.0);
double tmp2 = tmp*tmp;
int z = (int)(tmp2*10000.0);
localsum = (localsum + z) % 10000;
}
printf(" %d ", localsum);
arr[id] = localsum;
/*
if(id < *stop){
double tmp = sin(id*1.0);
double tmp2 = tmp*tmp;
int z = (int)(tmp2*10000.0);
arr[id] = z % 10000;
}
*/
}
int main(int argc, char *argv[]){
//assert(argc==2);
int stop = (int)atol(argv[1]);
assert(stop >= 1.0);
printf("Hello World!\n");
int blocks = 4;
int threads = 5;
int result = 0;
int *arr;
int arrsize;
if(blocks*threads < stop){
arrsize = blocks*threads;
}else{
arrsize = stop;
}
arr = (int *)malloc(sizeof(int)*arrsize); //memory in cpu
int *devarr;
int *devstop;
hipMalloc((int**) &devarr , sizeof(int)*arrsize); //mem in gpu
hipMalloc((int**) &devstop , sizeof(int)); //mem in gpu
hipMemcpy(devarr, arr, sizeof(int)*arrsize, hipMemcpyHostToDevice); //transfer
hipMemcpy(devstop, &stop, sizeof(int), hipMemcpyHostToDevice); //transfer
mykernel<<<blocks,threads>>>(devarr, devstop); //1,1 block, threads- launch config
hipMemcpy(arr, devarr, sizeof(int)*arrsize, hipMemcpyDeviceToHost);
printf("arrsize: %d\n", arrsize);
for(int i = 0; i<arrsize; i++){
//printf(" %d ", arr[i]);
result = (result + arr[i]) % 10000;
}
printf("PIN is: %d\n",result);
//scanf("%d%d", a, b);
free(arr);
hipFree(devarr);
hipFree(devstop);
return 0;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
//#include <opencv2/opencv.hpp>
__global__ void mykernel(int *arr, int *stop){
int id = blockIdx.x * blockDim.x + threadIdx.x; //unique global id of thread
int numThreads = gridDim.x * blockDim.x; //total num threads in grid in x direction
int localsum = 0;
for (int i = id; i < *stop; i+= numThreads){
double tmp = sin(i*1.0);
double tmp2 = tmp*tmp;
int z = (int)(tmp2*10000.0);
localsum = (localsum + z) % 10000;
}
printf(" %d ", localsum);
arr[id] = localsum;
/*
if(id < *stop){
double tmp = sin(id*1.0);
double tmp2 = tmp*tmp;
int z = (int)(tmp2*10000.0);
arr[id] = z % 10000;
}
*/
}
int main(int argc, char *argv[]){
//assert(argc==2);
int stop = (int)atol(argv[1]);
assert(stop >= 1.0);
printf("Hello World!\n");
int blocks = 4;
int threads = 5;
int result = 0;
int *arr;
int arrsize;
if(blocks*threads < stop){
arrsize = blocks*threads;
}else{
arrsize = stop;
}
arr = (int *)malloc(sizeof(int)*arrsize); //memory in cpu
int *devarr;
int *devstop;
hipMalloc((int**) &devarr , sizeof(int)*arrsize); //mem in gpu
hipMalloc((int**) &devstop , sizeof(int)); //mem in gpu
hipMemcpy(devarr, arr, sizeof(int)*arrsize, hipMemcpyHostToDevice); //transfer
hipMemcpy(devstop, &stop, sizeof(int), hipMemcpyHostToDevice); //transfer
mykernel<<<blocks,threads>>>(devarr, devstop); //1,1 block, threads- launch config
hipMemcpy(arr, devarr, sizeof(int)*arrsize, hipMemcpyDeviceToHost);
printf("arrsize: %d\n", arrsize);
for(int i = 0; i<arrsize; i++){
//printf(" %d ", arr[i]);
result = (result + arr[i]) % 10000;
}
printf("PIN is: %d\n",result);
//scanf("%d%d", a, b);
free(arr);
hipFree(devarr);
hipFree(devstop);
return 0;
}
|
.text
.file "pin_cuda.hip"
.globl _Z23__device_stub__mykernelPiS_ # -- Begin function _Z23__device_stub__mykernelPiS_
.p2align 4, 0x90
.type _Z23__device_stub__mykernelPiS_,@function
_Z23__device_stub__mykernelPiS_: # @_Z23__device_stub__mykernelPiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z8mykernelPiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z23__device_stub__mykernelPiS_, .Lfunc_end0-_Z23__device_stub__mykernelPiS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $112, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq 8(%rsi), %rdi
xorl %ebx, %ebx
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, 12(%rsp)
movl $.Lstr, %edi
callq puts@PLT
movl 12(%rsp), %r12d
cmpl $20, %r12d
movl $20, %ebp
cmovll %r12d, %ebp
movslq %ebp, %r15
shlq $2, %r15
movq %r15, %rdi
callq malloc
movq %rax, %r14
leaq 16(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r14, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
leaq 12(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967300, %rdi # imm = 0x100000004
leaq 1(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z8mykernelPiS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 16(%rsp), %rsi
movq %r14, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
movl $.L.str.1, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
testl %r12d, %r12d
jle .LBB1_5
# %bb.3: # %.lr.ph.preheader
movl %ebp, %eax
xorl %ecx, %ecx
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addl (%r14,%rcx,4), %ebx
movslq %ebx, %rbx
imulq $1759218605, %rbx, %rdx # imm = 0x68DB8BAD
movq %rdx, %rsi
shrq $63, %rsi
sarq $44, %rdx
addl %esi, %edx
imull $10000, %edx, %edx # imm = 0x2710
subl %edx, %ebx
incq %rcx
cmpq %rcx, %rax
jne .LBB1_4
.LBB1_5: # %._crit_edge
movl $.L.str.2, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
movq %r14, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $112, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8mykernelPiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8mykernelPiS_,@object # @_Z8mykernelPiS_
.section .rodata,"a",@progbits
.globl _Z8mykernelPiS_
.p2align 3, 0x0
_Z8mykernelPiS_:
.quad _Z23__device_stub__mykernelPiS_
.size _Z8mykernelPiS_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "arrsize: %d\n"
.size .L.str.1, 13
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "PIN is: %d\n"
.size .L.str.2, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8mykernelPiS_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello World!"
.size .Lstr, 13
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__mykernelPiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8mykernelPiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_0016be57_00000000-6_pin_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z8mykernelPiS_PiS_
.type _Z29__device_stub__Z8mykernelPiS_PiS_, @function
_Z29__device_stub__Z8mykernelPiS_PiS_:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z8mykernelPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z29__device_stub__Z8mykernelPiS_PiS_, .-_Z29__device_stub__Z8mykernelPiS_PiS_
.globl _Z8mykernelPiS_
.type _Z8mykernelPiS_, @function
_Z8mykernelPiS_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z8mykernelPiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z8mykernelPiS_, .-_Z8mykernelPiS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello World!\n"
.LC1:
.string "arrsize: %d\n"
.LC2:
.string "PIN is: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 12(%rsp)
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 12(%rsp), %r13d
movl $20, %ebp
cmpl %ebp, %r13d
cmovle %r13d, %ebp
movslq %ebp, %r12
salq $2, %r12
movq %r12, %rdi
call malloc@PLT
movq %rax, %rbx
leaq 16(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $1, %ecx
movq %r12, %rdx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 12(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $5, 44(%rsp)
movl $1, 48(%rsp)
movl $4, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L12:
movl $2, %ecx
movq %r12, %rdx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl %ebp, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %r13d, %r13d
jle .L16
movl $0, %eax
movl $0, %edx
.L14:
movl %edx, %ecx
addl (%rbx,%rax,4), %ecx
movslq %ecx, %rdx
imulq $1759218605, %rdx, %rdx
sarq $44, %rdx
movl %ecx, %esi
sarl $31, %esi
subl %esi, %edx
imull $10000, %edx, %esi
subl %esi, %ecx
movl %ecx, %edx
addq $1, %rax
cmpl %eax, %ebp
jg .L14
.L13:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdi
call free@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z29__device_stub__Z8mykernelPiS_PiS_
jmp .L12
.L16:
movl $0, %edx
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z8mykernelPiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z8mykernelPiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "pin_cuda.hip"
.globl _Z23__device_stub__mykernelPiS_ # -- Begin function _Z23__device_stub__mykernelPiS_
.p2align 4, 0x90
.type _Z23__device_stub__mykernelPiS_,@function
_Z23__device_stub__mykernelPiS_: # @_Z23__device_stub__mykernelPiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z8mykernelPiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z23__device_stub__mykernelPiS_, .Lfunc_end0-_Z23__device_stub__mykernelPiS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $112, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq 8(%rsi), %rdi
xorl %ebx, %ebx
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, 12(%rsp)
movl $.Lstr, %edi
callq puts@PLT
movl 12(%rsp), %r12d
cmpl $20, %r12d
movl $20, %ebp
cmovll %r12d, %ebp
movslq %ebp, %r15
shlq $2, %r15
movq %r15, %rdi
callq malloc
movq %rax, %r14
leaq 16(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r14, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
leaq 12(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967300, %rdi # imm = 0x100000004
leaq 1(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z8mykernelPiS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 16(%rsp), %rsi
movq %r14, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
movl $.L.str.1, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
testl %r12d, %r12d
jle .LBB1_5
# %bb.3: # %.lr.ph.preheader
movl %ebp, %eax
xorl %ecx, %ecx
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addl (%r14,%rcx,4), %ebx
movslq %ebx, %rbx
imulq $1759218605, %rbx, %rdx # imm = 0x68DB8BAD
movq %rdx, %rsi
shrq $63, %rsi
sarq $44, %rdx
addl %esi, %edx
imull $10000, %edx, %edx # imm = 0x2710
subl %edx, %ebx
incq %rcx
cmpq %rcx, %rax
jne .LBB1_4
.LBB1_5: # %._crit_edge
movl $.L.str.2, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
movq %r14, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $112, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8mykernelPiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8mykernelPiS_,@object # @_Z8mykernelPiS_
.section .rodata,"a",@progbits
.globl _Z8mykernelPiS_
.p2align 3, 0x0
_Z8mykernelPiS_:
.quad _Z23__device_stub__mykernelPiS_
.size _Z8mykernelPiS_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "arrsize: %d\n"
.size .L.str.1, 13
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "PIN is: %d\n"
.size .L.str.2, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8mykernelPiS_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello World!"
.size .Lstr, 13
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__mykernelPiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8mykernelPiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include<cstdlib>
#include<stdio.h>
void initialize(float* mtx, int const nx, int const ny){
int tmp = nx*ny;
for(int i=0; i<tmp; i++){
mtx[i] = rand()/(float)RAND_MAX;
}
};
__global__ void sumMatrix2D2D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int mp = j*ny+i;
d_c[mp] = d_a[mp] + d_b[mp];
};
__global__ void sumMatrix1D1D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){
int i = blockIdx.x;
for (; i < nx; i += gridDim.x){
int j = threadIdx.x;
for (; j < ny; j += blockDim.x){
int mp = i*ny + j;
d_c[mp] = d_a[mp] + d_b[mp];
}
}
};
int main(int argc, char **argv){
int const nx = 1<<14;
int const ny = 1<<14;
size_t mSize = nx*ny*sizeof(float);
float* h_a;
h_a = (float*)malloc(mSize);
float* h_b;
h_b = (float*)malloc(mSize);
float* h_c;
h_c = (float*)malloc(mSize);
initialize(h_a, nx, ny);
initialize(h_b, nx, ny);
float* d_a;
float* d_b;
float* d_c;
cudaMalloc((void**)&d_a, mSize);
cudaMalloc((void**)&d_b, mSize);
cudaMalloc((void**)&d_c, mSize);
cudaMemcpy(d_a, h_a, mSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, mSize, cudaMemcpyHostToDevice);
int xBlock = 32;
int yBlock = 16;
if(argc > 1) xBlock = atoi(argv[1]);
if(argc > 2) yBlock = atoi(argv[2]);
dim3 block(xBlock, yBlock);
dim3 grid(nx/xBlock, ny/yBlock);
printf("run with block %d, %d", xBlock, yBlock);
sumMatrix2D2D<<<grid, block>>>(d_a, d_b, d_c, nx, ny);
cudaMemcpy(h_c, d_c, mSize, cudaMemcpyDeviceToHost);
for (int i=0; i<nx*ny; i++){
if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) {
printf("2D2D");
printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i);
break;
}
}
sumMatrix1D1D<<<128, 128>>>(d_a, d_b, d_c, nx, ny);
cudaMemcpy(h_c, d_c, mSize, cudaMemcpyDeviceToHost);
for (int i=0; i<nx*ny; i++){
if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) {
printf("1D1D");
printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i);
break;
}
}
return 0;
}
|
code for sm_80
Function : _Z13sumMatrix1D1DPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e240000002500 */
/*0020*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x001fda0003f06270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ S2R R8, SR_TID.X ; /* 0x0000000000087919 */
/* 0x000e220000002100 */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0060*/ ISETP.GE.AND P0, PT, R8, c[0x0][0x17c], PT ; /* 0x00005f0008007a0c */
/* 0x001fe20003f06270 */
/*0070*/ BSSY B0, 0x170 ; /* 0x000000f000007945 */
/* 0x000fd80003800000 */
/*0080*/ @P0 BRA 0x160 ; /* 0x000000d000000947 */
/* 0x000fea0003800000 */
/*0090*/ MOV R9, R8 ; /* 0x0000000800097202 */
/* 0x000fc60000000f00 */
/*00a0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x001fe400078e00ff */
/*00b0*/ IMAD R6, R0, c[0x0][0x17c], R9 ; /* 0x00005f0000067a24 */
/* 0x000fc800078e0209 */
/*00c0*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*00d0*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00e0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00f0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0100*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0110*/ IADD3 R9, R9, c[0x0][0x0], RZ ; /* 0x0000000009097a10 */
/* 0x000fc80007ffe0ff */
/*0120*/ ISETP.GE.AND P0, PT, R9, c[0x0][0x17c], PT ; /* 0x00005f0009007a0c */
/* 0x000fe20003f06270 */
/*0130*/ FADD R11, R4, R3 ; /* 0x00000003040b7221 */
/* 0x004fca0000000000 */
/*0140*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0001ee000c101904 */
/*0150*/ @!P0 BRA 0xa0 ; /* 0xffffff4000008947 */
/* 0x000fea000383ffff */
/*0160*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0170*/ IADD3 R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a10 */
/* 0x000fc80007ffe0ff */
/*0180*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fda0003f06270 */
/*0190*/ @!P0 BRA 0x60 ; /* 0xfffffec000008947 */
/* 0x000fea000383ffff */
/*01a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01b0*/ BRA 0x1b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13sumMatrix2D2DPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0050*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0060*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc400078e0203 */
/*0080*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fc800078e0205 */
/*0090*/ IMAD R0, R3, c[0x0][0x17c], R0 ; /* 0x00005f0003007a24 */
/* 0x000fc800078e0200 */
/*00a0*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0207 */
/*00b0*/ IMAD.WIDE R4, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x0c0fe400078e0207 */
/*00c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00d0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00e0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0207 */
/*00f0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*0100*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include<cstdlib>
#include<stdio.h>
void initialize(float* mtx, int const nx, int const ny){
int tmp = nx*ny;
for(int i=0; i<tmp; i++){
mtx[i] = rand()/(float)RAND_MAX;
}
};
__global__ void sumMatrix2D2D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int mp = j*ny+i;
d_c[mp] = d_a[mp] + d_b[mp];
};
__global__ void sumMatrix1D1D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){
int i = blockIdx.x;
for (; i < nx; i += gridDim.x){
int j = threadIdx.x;
for (; j < ny; j += blockDim.x){
int mp = i*ny + j;
d_c[mp] = d_a[mp] + d_b[mp];
}
}
};
int main(int argc, char **argv){
int const nx = 1<<14;
int const ny = 1<<14;
size_t mSize = nx*ny*sizeof(float);
float* h_a;
h_a = (float*)malloc(mSize);
float* h_b;
h_b = (float*)malloc(mSize);
float* h_c;
h_c = (float*)malloc(mSize);
initialize(h_a, nx, ny);
initialize(h_b, nx, ny);
float* d_a;
float* d_b;
float* d_c;
cudaMalloc((void**)&d_a, mSize);
cudaMalloc((void**)&d_b, mSize);
cudaMalloc((void**)&d_c, mSize);
cudaMemcpy(d_a, h_a, mSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, mSize, cudaMemcpyHostToDevice);
int xBlock = 32;
int yBlock = 16;
if(argc > 1) xBlock = atoi(argv[1]);
if(argc > 2) yBlock = atoi(argv[2]);
dim3 block(xBlock, yBlock);
dim3 grid(nx/xBlock, ny/yBlock);
printf("run with block %d, %d", xBlock, yBlock);
sumMatrix2D2D<<<grid, block>>>(d_a, d_b, d_c, nx, ny);
cudaMemcpy(h_c, d_c, mSize, cudaMemcpyDeviceToHost);
for (int i=0; i<nx*ny; i++){
if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) {
printf("2D2D");
printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i);
break;
}
}
sumMatrix1D1D<<<128, 128>>>(d_a, d_b, d_c, nx, ny);
cudaMemcpy(h_c, d_c, mSize, cudaMemcpyDeviceToHost);
for (int i=0; i<nx*ny; i++){
if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) {
printf("1D1D");
printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i);
break;
}
}
return 0;
}
|
.file "tmpxft_001bcada_00000000-6_sumMatrix.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10initializePfii
.type _Z10initializePfii, @function
_Z10initializePfii:
.LFB2057:
.cfi_startproc
endbr64
imull %edx, %esi
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2057:
.size _Z10initializePfii, .-_Z10initializePfii
.globl _Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii
.type _Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii, @function
_Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13sumMatrix2D2DPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii, .-_Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii
.globl _Z13sumMatrix2D2DPfS_S_ii
.type _Z13sumMatrix2D2DPfS_S_ii, @function
_Z13sumMatrix2D2DPfS_S_ii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z13sumMatrix2D2DPfS_S_ii, .-_Z13sumMatrix2D2DPfS_S_ii
.globl _Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii
.type _Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii, @function
_Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13sumMatrix1D1DPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii, .-_Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii
.globl _Z13sumMatrix1D1DPfS_S_ii
.type _Z13sumMatrix1D1DPfS_S_ii, @function
_Z13sumMatrix1D1DPfS_S_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z13sumMatrix1D1DPfS_S_ii, .-_Z13sumMatrix1D1DPfS_S_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "run with block %d, %d"
.LC4:
.string "2D2D"
.LC5:
.string "%8.5f, %8.5f, %8.5f, %d \n"
.LC6:
.string "1D1D"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movl %edi, %r13d
movq %rsi, %r14
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $1073741824, %edi
call malloc@PLT
movq %rax, %rbp
movl $1073741824, %edi
call malloc@PLT
movq %rax, %rbx
movl $1073741824, %edi
call malloc@PLT
movq %rax, 8(%rsp)
movl $16384, %edx
movl $16384, %esi
movq %rbp, %rdi
call _Z10initializePfii
movl $16384, %edx
movl $16384, %esi
movq %rbx, %rdi
call _Z10initializePfii
leaq 16(%rsp), %rdi
movl $1073741824, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $1073741824, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $1073741824, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $1073741824, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $1073741824, %edx
movq %rbx, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $32, %r12d
movl $16, %esi
cmpl $1, %r13d
jg .L49
.L28:
movl %r12d, 40(%rsp)
movl %esi, 44(%rsp)
movl $1, 48(%rsp)
movl $16384, %ecx
movl %ecx, %eax
cltd
idivl %r12d
movl %eax, 52(%rsp)
movl %ecx, %eax
cltd
idivl %esi
movl %eax, 56(%rsp)
movl $1, 60(%rsp)
movl %esi, %ecx
movl %r12d, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 52(%rsp), %rdi
movl 60(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L50
.L29:
movl $2, %ecx
movl $1073741824, %edx
movq 32(%rsp), %rsi
movq 8(%rsp), %r15
movq %r15, %rdi
call cudaMemcpy@PLT
movq %r15, %r13
movq %rbp, %r14
movq %rbx, %r12
movl $0, 4(%rsp)
movss .LC2(%rip), %xmm3
movsd .LC3(%rip), %xmm2
.L33:
movss 0(%rbp), %xmm1
addss (%rbx), %xmm1
movss (%r15), %xmm0
subss %xmm1, %xmm0
andps %xmm3, %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm2, %xmm0
ja .L51
addl $1, 4(%rsp)
movl 4(%rsp), %eax
addq $4, %r15
addq $4, %rbp
addq $4, %rbx
cmpl $268435456, %eax
jne .L33
jmp .L32
.L49:
movq 8(%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r12d
movl $16, %esi
cmpl $2, %r13d
jle .L28
movq 16(%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %esi
jmp .L28
.L50:
movl $16384, %r8d
movl $16384, %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii
jmp .L29
.L51:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 0(%rbp), %xmm0
movl 4(%rsp), %edx
pxor %xmm2, %xmm2
cvtss2sd (%r15), %xmm2
pxor %xmm1, %xmm1
cvtss2sd (%rbx), %xmm1
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
.L32:
movl $128, 76(%rsp)
movl $1, 80(%rsp)
movl $128, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L52
.L34:
movl $2, %ecx
movl $1073741824, %edx
movq 32(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %ebx
movss .LC2(%rip), %xmm3
movsd .LC3(%rip), %xmm2
.L38:
movss (%r14), %xmm1
addss (%r12), %xmm1
movss 0(%r13), %xmm0
subss %xmm1, %xmm0
andps %xmm3, %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm2, %xmm0
ja .L53
addl $1, %ebx
addq $4, %r13
addq $4, %r12
addq $4, %r14
cmpl $268435456, %ebx
jne .L38
jmp .L37
.L52:
movl $16384, %r8d
movl $16384, %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii
jmp .L34
.L53:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd (%r14), %xmm0
movl %ebx, %edx
pxor %xmm2, %xmm2
cvtss2sd 0(%r13), %xmm2
pxor %xmm1, %xmm1
cvtss2sd (%r12), %xmm1
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
.L37:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L54
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L54:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z13sumMatrix1D1DPfS_S_ii"
.LC8:
.string "_Z13sumMatrix2D2DPfS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z13sumMatrix1D1DPfS_S_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z13sumMatrix2D2DPfS_S_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 805306368
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC2:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC3:
.long -350469331
.long 1058682594
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include<cstdlib>
#include<stdio.h>
void initialize(float* mtx, int const nx, int const ny){
int tmp = nx*ny;
for(int i=0; i<tmp; i++){
mtx[i] = rand()/(float)RAND_MAX;
}
};
__global__ void sumMatrix2D2D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int mp = j*ny+i;
d_c[mp] = d_a[mp] + d_b[mp];
};
__global__ void sumMatrix1D1D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){
int i = blockIdx.x;
for (; i < nx; i += gridDim.x){
int j = threadIdx.x;
for (; j < ny; j += blockDim.x){
int mp = i*ny + j;
d_c[mp] = d_a[mp] + d_b[mp];
}
}
};
int main(int argc, char **argv){
int const nx = 1<<14;
int const ny = 1<<14;
size_t mSize = nx*ny*sizeof(float);
float* h_a;
h_a = (float*)malloc(mSize);
float* h_b;
h_b = (float*)malloc(mSize);
float* h_c;
h_c = (float*)malloc(mSize);
initialize(h_a, nx, ny);
initialize(h_b, nx, ny);
float* d_a;
float* d_b;
float* d_c;
cudaMalloc((void**)&d_a, mSize);
cudaMalloc((void**)&d_b, mSize);
cudaMalloc((void**)&d_c, mSize);
cudaMemcpy(d_a, h_a, mSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, mSize, cudaMemcpyHostToDevice);
int xBlock = 32;
int yBlock = 16;
if(argc > 1) xBlock = atoi(argv[1]);
if(argc > 2) yBlock = atoi(argv[2]);
dim3 block(xBlock, yBlock);
dim3 grid(nx/xBlock, ny/yBlock);
printf("run with block %d, %d", xBlock, yBlock);
sumMatrix2D2D<<<grid, block>>>(d_a, d_b, d_c, nx, ny);
cudaMemcpy(h_c, d_c, mSize, cudaMemcpyDeviceToHost);
for (int i=0; i<nx*ny; i++){
if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) {
printf("2D2D");
printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i);
break;
}
}
sumMatrix1D1D<<<128, 128>>>(d_a, d_b, d_c, nx, ny);
cudaMemcpy(h_c, d_c, mSize, cudaMemcpyDeviceToHost);
for (int i=0; i<nx*ny; i++){
if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) {
printf("1D1D");
printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i);
break;
}
}
return 0;
}
|
#include <hip/hip_runtime.h>
#include<cstdlib>
#include<stdio.h>
void initialize(float* mtx, int const nx, int const ny){
int tmp = nx*ny;
for(int i=0; i<tmp; i++){
mtx[i] = rand()/(float)RAND_MAX;
}
};
__global__ void sumMatrix2D2D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int mp = j*ny+i;
d_c[mp] = d_a[mp] + d_b[mp];
};
__global__ void sumMatrix1D1D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){
int i = blockIdx.x;
for (; i < nx; i += gridDim.x){
int j = threadIdx.x;
for (; j < ny; j += blockDim.x){
int mp = i*ny + j;
d_c[mp] = d_a[mp] + d_b[mp];
}
}
};
int main(int argc, char **argv){
int const nx = 1<<14;
int const ny = 1<<14;
size_t mSize = nx*ny*sizeof(float);
float* h_a;
h_a = (float*)malloc(mSize);
float* h_b;
h_b = (float*)malloc(mSize);
float* h_c;
h_c = (float*)malloc(mSize);
initialize(h_a, nx, ny);
initialize(h_b, nx, ny);
float* d_a;
float* d_b;
float* d_c;
hipMalloc((void**)&d_a, mSize);
hipMalloc((void**)&d_b, mSize);
hipMalloc((void**)&d_c, mSize);
hipMemcpy(d_a, h_a, mSize, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, mSize, hipMemcpyHostToDevice);
int xBlock = 32;
int yBlock = 16;
if(argc > 1) xBlock = atoi(argv[1]);
if(argc > 2) yBlock = atoi(argv[2]);
dim3 block(xBlock, yBlock);
dim3 grid(nx/xBlock, ny/yBlock);
printf("run with block %d, %d", xBlock, yBlock);
sumMatrix2D2D<<<grid, block>>>(d_a, d_b, d_c, nx, ny);
hipMemcpy(h_c, d_c, mSize, hipMemcpyDeviceToHost);
for (int i=0; i<nx*ny; i++){
if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) {
printf("2D2D");
printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i);
break;
}
}
sumMatrix1D1D<<<128, 128>>>(d_a, d_b, d_c, nx, ny);
hipMemcpy(h_c, d_c, mSize, hipMemcpyDeviceToHost);
for (int i=0; i<nx*ny; i++){
if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) {
printf("1D1D");
printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i);
break;
}
}
return 0;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include<cstdlib>
#include<stdio.h>
void initialize(float* mtx, int const nx, int const ny){
int tmp = nx*ny;
for(int i=0; i<tmp; i++){
mtx[i] = rand()/(float)RAND_MAX;
}
};
__global__ void sumMatrix2D2D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int mp = j*ny+i;
d_c[mp] = d_a[mp] + d_b[mp];
};
__global__ void sumMatrix1D1D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){
int i = blockIdx.x;
for (; i < nx; i += gridDim.x){
int j = threadIdx.x;
for (; j < ny; j += blockDim.x){
int mp = i*ny + j;
d_c[mp] = d_a[mp] + d_b[mp];
}
}
};
int main(int argc, char **argv){
int const nx = 1<<14;
int const ny = 1<<14;
size_t mSize = nx*ny*sizeof(float);
float* h_a;
h_a = (float*)malloc(mSize);
float* h_b;
h_b = (float*)malloc(mSize);
float* h_c;
h_c = (float*)malloc(mSize);
initialize(h_a, nx, ny);
initialize(h_b, nx, ny);
float* d_a;
float* d_b;
float* d_c;
hipMalloc((void**)&d_a, mSize);
hipMalloc((void**)&d_b, mSize);
hipMalloc((void**)&d_c, mSize);
hipMemcpy(d_a, h_a, mSize, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, mSize, hipMemcpyHostToDevice);
int xBlock = 32;
int yBlock = 16;
if(argc > 1) xBlock = atoi(argv[1]);
if(argc > 2) yBlock = atoi(argv[2]);
dim3 block(xBlock, yBlock);
dim3 grid(nx/xBlock, ny/yBlock);
printf("run with block %d, %d", xBlock, yBlock);
sumMatrix2D2D<<<grid, block>>>(d_a, d_b, d_c, nx, ny);
hipMemcpy(h_c, d_c, mSize, hipMemcpyDeviceToHost);
for (int i=0; i<nx*ny; i++){
if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) {
printf("2D2D");
printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i);
break;
}
}
sumMatrix1D1D<<<128, 128>>>(d_a, d_b, d_c, nx, ny);
hipMemcpy(h_c, d_c, mSize, hipMemcpyDeviceToHost);
for (int i=0; i<nx*ny; i++){
if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) {
printf("1D1D");
printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i);
break;
}
}
return 0;
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13sumMatrix2D2DPfS_S_ii
.globl _Z13sumMatrix2D2DPfS_S_ii
.p2align 8
.type _Z13sumMatrix2D2DPfS_S_ii,@function
_Z13sumMatrix2D2DPfS_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x1c
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s4, s2, 16
s_and_b32 s2, s2, 0xffff
v_mad_u64_u32 v[2:3], null, s15, s4, v[1:2]
s_load_b128 s[4:7], s[0:1], 0x0
s_mul_i32 s14, s14, s2
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, v2, s3
v_add3_u32 v0, s14, v0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13sumMatrix2D2DPfS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13sumMatrix2D2DPfS_S_ii, .Lfunc_end0-_Z13sumMatrix2D2DPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13sumMatrix1D1DPfS_S_ii
.globl _Z13sumMatrix1D1DPfS_S_ii
.p2align 8
.type _Z13sumMatrix1D1DPfS_S_ii,@function
_Z13sumMatrix1D1DPfS_S_ii:
s_load_b32 s12, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s15, s12
s_cbranch_scc1 .LBB1_6
s_clause 0x2
s_load_b64 s[2:3], s[0:1], 0x1c
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[8:9], s[0:1], 0x10
s_add_u32 s10, s0, 32
s_addc_u32 s11, s1, 0
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_mul_i32 s13, s15, s2
s_mul_i32 s14, s3, s2
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_3
.p2align 6
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s16
s_add_i32 s15, s3, s15
s_add_i32 s13, s13, s14
s_cmp_lt_i32 s15, s12
s_cbranch_scc0 .LBB1_6
.LBB1_3:
s_and_saveexec_b32 s16, vcc_lo
s_cbranch_execz .LBB1_2
s_load_b32 s0, s[10:11], 0xc
v_mov_b32_e32 v1, v0
s_mov_b32 s18, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s17, s0, 0xffff
.p2align 6
.LBB1_5:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, s13, v1
v_add_nc_u32_e32 v1, s17, v1
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v4, s0, s4, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v5, s0, s5, v3, s0
v_add_co_u32 v6, s0, s6, v2
v_add_co_ci_u32_e64 v7, s0, s7, v3, s0
v_cmp_le_i32_e64 s0, s2, v1
global_load_b32 v4, v[4:5], off
global_load_b32 v5, v[6:7], off
v_add_co_u32 v2, s1, s8, v2
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s1, s9, v3, s1
s_or_b32 s18, s0, s18
s_waitcnt vmcnt(0)
v_add_f32_e32 v4, v4, v5
global_store_b32 v[2:3], v4, off
s_and_not1_b32 exec_lo, exec_lo, s18
s_cbranch_execnz .LBB1_5
s_branch .LBB1_2
.LBB1_6:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13sumMatrix1D1DPfS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 19
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z13sumMatrix1D1DPfS_S_ii, .Lfunc_end1-_Z13sumMatrix1D1DPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13sumMatrix2D2DPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13sumMatrix2D2DPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13sumMatrix1D1DPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 21
.sgpr_spill_count: 0
.symbol: _Z13sumMatrix1D1DPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#include<cstdlib>
#include<stdio.h>
void initialize(float* mtx, int const nx, int const ny){
int tmp = nx*ny;
for(int i=0; i<tmp; i++){
mtx[i] = rand()/(float)RAND_MAX;
}
};
__global__ void sumMatrix2D2D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int mp = j*ny+i;
d_c[mp] = d_a[mp] + d_b[mp];
};
__global__ void sumMatrix1D1D(float* d_a, float* d_b, float* d_c, int const nx, int const ny){
int i = blockIdx.x;
for (; i < nx; i += gridDim.x){
int j = threadIdx.x;
for (; j < ny; j += blockDim.x){
int mp = i*ny + j;
d_c[mp] = d_a[mp] + d_b[mp];
}
}
};
int main(int argc, char **argv){
int const nx = 1<<14;
int const ny = 1<<14;
size_t mSize = nx*ny*sizeof(float);
float* h_a;
h_a = (float*)malloc(mSize);
float* h_b;
h_b = (float*)malloc(mSize);
float* h_c;
h_c = (float*)malloc(mSize);
initialize(h_a, nx, ny);
initialize(h_b, nx, ny);
float* d_a;
float* d_b;
float* d_c;
hipMalloc((void**)&d_a, mSize);
hipMalloc((void**)&d_b, mSize);
hipMalloc((void**)&d_c, mSize);
hipMemcpy(d_a, h_a, mSize, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, mSize, hipMemcpyHostToDevice);
int xBlock = 32;
int yBlock = 16;
if(argc > 1) xBlock = atoi(argv[1]);
if(argc > 2) yBlock = atoi(argv[2]);
dim3 block(xBlock, yBlock);
dim3 grid(nx/xBlock, ny/yBlock);
printf("run with block %d, %d", xBlock, yBlock);
sumMatrix2D2D<<<grid, block>>>(d_a, d_b, d_c, nx, ny);
hipMemcpy(h_c, d_c, mSize, hipMemcpyDeviceToHost);
for (int i=0; i<nx*ny; i++){
if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) {
printf("2D2D");
printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i);
break;
}
}
sumMatrix1D1D<<<128, 128>>>(d_a, d_b, d_c, nx, ny);
hipMemcpy(h_c, d_c, mSize, hipMemcpyDeviceToHost);
for (int i=0; i<nx*ny; i++){
if ( abs(h_c[i] - (h_a[i] + h_b[i])) > 1e-4 ) {
printf("1D1D");
printf("%8.5f, %8.5f, %8.5f, %d \n", h_a[i], h_b[i], h_c[i], i);
break;
}
}
return 0;
}
|
.text
.file "sumMatrix.hip"
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z10initializePfii
.LCPI0_0:
.long 0x30000000 # float 4.65661287E-10
.text
.globl _Z10initializePfii
.p2align 4, 0x90
.type _Z10initializePfii,@function
_Z10initializePfii: # @_Z10initializePfii
.cfi_startproc
# %bb.0:
imull %edx, %esi
testl %esi, %esi
jle .LBB0_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI0_0(%rip), %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB0_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB0_4: # %._crit_edge
retq
.Lfunc_end0:
.size _Z10initializePfii, .Lfunc_end0-_Z10initializePfii
.cfi_endproc
# -- End function
.globl _Z28__device_stub__sumMatrix2D2DPfS_S_ii # -- Begin function _Z28__device_stub__sumMatrix2D2DPfS_S_ii
.p2align 4, 0x90
.type _Z28__device_stub__sumMatrix2D2DPfS_S_ii,@function
_Z28__device_stub__sumMatrix2D2DPfS_S_ii: # @_Z28__device_stub__sumMatrix2D2DPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13sumMatrix2D2DPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z28__device_stub__sumMatrix2D2DPfS_S_ii, .Lfunc_end1-_Z28__device_stub__sumMatrix2D2DPfS_S_ii
.cfi_endproc
# -- End function
.globl _Z28__device_stub__sumMatrix1D1DPfS_S_ii # -- Begin function _Z28__device_stub__sumMatrix1D1DPfS_S_ii
.p2align 4, 0x90
.type _Z28__device_stub__sumMatrix1D1DPfS_S_ii,@function
_Z28__device_stub__sumMatrix1D1DPfS_S_ii: # @_Z28__device_stub__sumMatrix1D1DPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13sumMatrix1D1DPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z28__device_stub__sumMatrix1D1DPfS_S_ii, .Lfunc_end2-_Z28__device_stub__sumMatrix1D1DPfS_S_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x30000000 # float 4.65661287E-10
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI3_1:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI3_2:
.quad 0x3f1a36e2eb1c432d # double 1.0E-4
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r12
movl %edi, %ebp
movl $1073741824, %edi # imm = 0x40000000
callq malloc
movq %rax, %rbx
movl $1073741824, %edi # imm = 0x40000000
callq malloc
movq %rax, %r14
movl $1073741824, %edi # imm = 0x40000000
callq malloc
movq %rax, %r15
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
movss .LCPI3_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss %xmm0, (%rbx,%r13,4)
incq %r13
cmpq $268435456, %r13 # imm = 0x10000000
jne .LBB3_1
# %bb.2: # %.lr.ph.i70.preheader
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_3: # %.lr.ph.i70
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI3_0(%rip), %xmm0
movss %xmm0, (%r14,%r13,4)
incq %r13
cmpq $268435456, %r13 # imm = 0x10000000
jne .LBB3_3
# %bb.4: # %_Z10initializePfii.exit74
leaq 32(%rsp), %rdi
movl $1073741824, %esi # imm = 0x40000000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $1073741824, %esi # imm = 0x40000000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $1073741824, %esi # imm = 0x40000000
callq hipMalloc
movq 32(%rsp), %rdi
movl $1073741824, %edx # imm = 0x40000000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $1073741824, %edx # imm = 0x40000000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl $32, %r13d
cmpl $2, %ebp
jl .LBB3_6
# %bb.5:
movq 8(%r12), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r13
.LBB3_6:
movl $16, %ecx
cmpl $3, %ebp
jl .LBB3_8
# %bb.7:
movq 16(%r12), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rcx
.LBB3_8:
movl %r13d, %eax
movq %rcx, %r12
shlq $32, %r12
orq %rax, %r12
movl $16384, %eax # imm = 0x4000
xorl %edx, %edx
idivl %r13d
movl %eax, %ebp
movl $16384, %eax # imm = 0x4000
xorl %edx, %edx
idivl %ecx
# kill: def $eax killed $eax def $rax
shlq $32, %rax
orq %rax, %rbp
movl $.L.str, %edi
movl %r13d, %esi
movl %ecx, %edx
xorl %eax, %eax
callq printf
movq %rbp, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_10
# %bb.9:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $16384, 12(%rsp) # imm = 0x4000
movl $16384, 8(%rsp) # imm = 0x4000
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z13sumMatrix2D2DPfS_S_ii, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_10:
movq 16(%rsp), %rsi
movl $1073741824, %edx # imm = 0x40000000
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_11: # =>This Inner Loop Header: Depth=1
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%rbx,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
addss (%r14,%r12,4), %xmm1
subss %xmm1, %xmm0
andps .LCPI3_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
ucomisd .LCPI3_2(%rip), %xmm0
ja .LBB3_12
# %bb.13: # in Loop: Header=BB3_11 Depth=1
incq %r12
cmpq $268435456, %r12 # imm = 0x10000000
jne .LBB3_11
jmp .LBB3_14
.LBB3_12:
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movss (%r14,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
movss (%r15,%r12,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
movl $.L.str.2, %edi
movl %r12d, %esi
movb $3, %al
callq printf
.LBB3_14: # %.loopexit87
movabsq $4294967424, %rdi # imm = 0x100000080
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_16
# %bb.15:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $16384, 12(%rsp) # imm = 0x4000
movl $16384, 8(%rsp) # imm = 0x4000
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z13sumMatrix1D1DPfS_S_ii, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_16:
movq 16(%rsp), %rsi
movl $1073741824, %edx # imm = 0x40000000
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
movaps .LCPI3_1(%rip), %xmm2 # xmm2 = [NaN,NaN,NaN,NaN]
movsd .LCPI3_2(%rip), %xmm3 # xmm3 = mem[0],zero
.p2align 4, 0x90
.LBB3_17: # =>This Inner Loop Header: Depth=1
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%rbx,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
addss (%r14,%r12,4), %xmm1
subss %xmm1, %xmm0
andps %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
ucomisd %xmm3, %xmm0
ja .LBB3_18
# %bb.19: # in Loop: Header=BB3_17 Depth=1
incq %r12
cmpq $268435456, %r12 # imm = 0x10000000
jne .LBB3_17
jmp .LBB3_20
.LBB3_18:
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movss (%r14,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
movss (%r15,%r12,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
movl $.L.str.2, %edi
movl %r12d, %esi
movb $3, %al
callq printf
.LBB3_20: # %.loopexit
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13sumMatrix2D2DPfS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13sumMatrix1D1DPfS_S_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13sumMatrix2D2DPfS_S_ii,@object # @_Z13sumMatrix2D2DPfS_S_ii
.section .rodata,"a",@progbits
.globl _Z13sumMatrix2D2DPfS_S_ii
.p2align 3, 0x0
_Z13sumMatrix2D2DPfS_S_ii:
.quad _Z28__device_stub__sumMatrix2D2DPfS_S_ii
.size _Z13sumMatrix2D2DPfS_S_ii, 8
.type _Z13sumMatrix1D1DPfS_S_ii,@object # @_Z13sumMatrix1D1DPfS_S_ii
.globl _Z13sumMatrix1D1DPfS_S_ii
.p2align 3, 0x0
_Z13sumMatrix1D1DPfS_S_ii:
.quad _Z28__device_stub__sumMatrix1D1DPfS_S_ii
.size _Z13sumMatrix1D1DPfS_S_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "run with block %d, %d"
.size .L.str, 22
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "2D2D"
.size .L.str.1, 5
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%8.5f, %8.5f, %8.5f, %d \n"
.size .L.str.2, 26
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "1D1D"
.size .L.str.3, 5
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z13sumMatrix2D2DPfS_S_ii"
.size .L__unnamed_1, 26
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z13sumMatrix1D1DPfS_S_ii"
.size .L__unnamed_2, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__sumMatrix2D2DPfS_S_ii
.addrsig_sym _Z28__device_stub__sumMatrix1D1DPfS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13sumMatrix2D2DPfS_S_ii
.addrsig_sym _Z13sumMatrix1D1DPfS_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z13sumMatrix1D1DPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e240000002500 */
/*0020*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x001fda0003f06270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ S2R R8, SR_TID.X ; /* 0x0000000000087919 */
/* 0x000e220000002100 */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0060*/ ISETP.GE.AND P0, PT, R8, c[0x0][0x17c], PT ; /* 0x00005f0008007a0c */
/* 0x001fe20003f06270 */
/*0070*/ BSSY B0, 0x170 ; /* 0x000000f000007945 */
/* 0x000fd80003800000 */
/*0080*/ @P0 BRA 0x160 ; /* 0x000000d000000947 */
/* 0x000fea0003800000 */
/*0090*/ MOV R9, R8 ; /* 0x0000000800097202 */
/* 0x000fc60000000f00 */
/*00a0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x001fe400078e00ff */
/*00b0*/ IMAD R6, R0, c[0x0][0x17c], R9 ; /* 0x00005f0000067a24 */
/* 0x000fc800078e0209 */
/*00c0*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*00d0*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00e0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00f0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0100*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0110*/ IADD3 R9, R9, c[0x0][0x0], RZ ; /* 0x0000000009097a10 */
/* 0x000fc80007ffe0ff */
/*0120*/ ISETP.GE.AND P0, PT, R9, c[0x0][0x17c], PT ; /* 0x00005f0009007a0c */
/* 0x000fe20003f06270 */
/*0130*/ FADD R11, R4, R3 ; /* 0x00000003040b7221 */
/* 0x004fca0000000000 */
/*0140*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0001ee000c101904 */
/*0150*/ @!P0 BRA 0xa0 ; /* 0xffffff4000008947 */
/* 0x000fea000383ffff */
/*0160*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0170*/ IADD3 R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a10 */
/* 0x000fc80007ffe0ff */
/*0180*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fda0003f06270 */
/*0190*/ @!P0 BRA 0x60 ; /* 0xfffffec000008947 */
/* 0x000fea000383ffff */
/*01a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01b0*/ BRA 0x1b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13sumMatrix2D2DPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0050*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0060*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc400078e0203 */
/*0080*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fc800078e0205 */
/*0090*/ IMAD R0, R3, c[0x0][0x17c], R0 ; /* 0x00005f0003007a24 */
/* 0x000fc800078e0200 */
/*00a0*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0207 */
/*00b0*/ IMAD.WIDE R4, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x0c0fe400078e0207 */
/*00c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00d0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00e0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0207 */
/*00f0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*0100*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13sumMatrix2D2DPfS_S_ii
.globl _Z13sumMatrix2D2DPfS_S_ii
.p2align 8
.type _Z13sumMatrix2D2DPfS_S_ii,@function
_Z13sumMatrix2D2DPfS_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x1c
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s4, s2, 16
s_and_b32 s2, s2, 0xffff
v_mad_u64_u32 v[2:3], null, s15, s4, v[1:2]
s_load_b128 s[4:7], s[0:1], 0x0
s_mul_i32 s14, s14, s2
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, v2, s3
v_add3_u32 v0, s14, v0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13sumMatrix2D2DPfS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13sumMatrix2D2DPfS_S_ii, .Lfunc_end0-_Z13sumMatrix2D2DPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13sumMatrix1D1DPfS_S_ii
.globl _Z13sumMatrix1D1DPfS_S_ii
.p2align 8
.type _Z13sumMatrix1D1DPfS_S_ii,@function
_Z13sumMatrix1D1DPfS_S_ii:
s_load_b32 s12, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s15, s12
s_cbranch_scc1 .LBB1_6
s_clause 0x2
s_load_b64 s[2:3], s[0:1], 0x1c
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[8:9], s[0:1], 0x10
s_add_u32 s10, s0, 32
s_addc_u32 s11, s1, 0
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_mul_i32 s13, s15, s2
s_mul_i32 s14, s3, s2
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_3
.p2align 6
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s16
s_add_i32 s15, s3, s15
s_add_i32 s13, s13, s14
s_cmp_lt_i32 s15, s12
s_cbranch_scc0 .LBB1_6
.LBB1_3:
s_and_saveexec_b32 s16, vcc_lo
s_cbranch_execz .LBB1_2
s_load_b32 s0, s[10:11], 0xc
v_mov_b32_e32 v1, v0
s_mov_b32 s18, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s17, s0, 0xffff
.p2align 6
.LBB1_5:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, s13, v1
v_add_nc_u32_e32 v1, s17, v1
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v4, s0, s4, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v5, s0, s5, v3, s0
v_add_co_u32 v6, s0, s6, v2
v_add_co_ci_u32_e64 v7, s0, s7, v3, s0
v_cmp_le_i32_e64 s0, s2, v1
global_load_b32 v4, v[4:5], off
global_load_b32 v5, v[6:7], off
v_add_co_u32 v2, s1, s8, v2
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s1, s9, v3, s1
s_or_b32 s18, s0, s18
s_waitcnt vmcnt(0)
v_add_f32_e32 v4, v4, v5
global_store_b32 v[2:3], v4, off
s_and_not1_b32 exec_lo, exec_lo, s18
s_cbranch_execnz .LBB1_5
s_branch .LBB1_2
.LBB1_6:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13sumMatrix1D1DPfS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 19
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z13sumMatrix1D1DPfS_S_ii, .Lfunc_end1-_Z13sumMatrix1D1DPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13sumMatrix2D2DPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13sumMatrix2D2DPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13sumMatrix1D1DPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 21
.sgpr_spill_count: 0
.symbol: _Z13sumMatrix1D1DPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_001bcada_00000000-6_sumMatrix.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10initializePfii
.type _Z10initializePfii, @function
_Z10initializePfii:
.LFB2057:
.cfi_startproc
endbr64
imull %edx, %esi
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2057:
.size _Z10initializePfii, .-_Z10initializePfii
.globl _Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii
.type _Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii, @function
_Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13sumMatrix2D2DPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii, .-_Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii
.globl _Z13sumMatrix2D2DPfS_S_ii
.type _Z13sumMatrix2D2DPfS_S_ii, @function
_Z13sumMatrix2D2DPfS_S_ii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z13sumMatrix2D2DPfS_S_ii, .-_Z13sumMatrix2D2DPfS_S_ii
.globl _Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii
.type _Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii, @function
_Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13sumMatrix1D1DPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii, .-_Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii
.globl _Z13sumMatrix1D1DPfS_S_ii
.type _Z13sumMatrix1D1DPfS_S_ii, @function
_Z13sumMatrix1D1DPfS_S_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z13sumMatrix1D1DPfS_S_ii, .-_Z13sumMatrix1D1DPfS_S_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "run with block %d, %d"
.LC4:
.string "2D2D"
.LC5:
.string "%8.5f, %8.5f, %8.5f, %d \n"
.LC6:
.string "1D1D"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movl %edi, %r13d
movq %rsi, %r14
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $1073741824, %edi
call malloc@PLT
movq %rax, %rbp
movl $1073741824, %edi
call malloc@PLT
movq %rax, %rbx
movl $1073741824, %edi
call malloc@PLT
movq %rax, 8(%rsp)
movl $16384, %edx
movl $16384, %esi
movq %rbp, %rdi
call _Z10initializePfii
movl $16384, %edx
movl $16384, %esi
movq %rbx, %rdi
call _Z10initializePfii
leaq 16(%rsp), %rdi
movl $1073741824, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $1073741824, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $1073741824, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $1073741824, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $1073741824, %edx
movq %rbx, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $32, %r12d
movl $16, %esi
cmpl $1, %r13d
jg .L49
.L28:
movl %r12d, 40(%rsp)
movl %esi, 44(%rsp)
movl $1, 48(%rsp)
movl $16384, %ecx
movl %ecx, %eax
cltd
idivl %r12d
movl %eax, 52(%rsp)
movl %ecx, %eax
cltd
idivl %esi
movl %eax, 56(%rsp)
movl $1, 60(%rsp)
movl %esi, %ecx
movl %r12d, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 52(%rsp), %rdi
movl 60(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L50
.L29:
movl $2, %ecx
movl $1073741824, %edx
movq 32(%rsp), %rsi
movq 8(%rsp), %r15
movq %r15, %rdi
call cudaMemcpy@PLT
movq %r15, %r13
movq %rbp, %r14
movq %rbx, %r12
movl $0, 4(%rsp)
movss .LC2(%rip), %xmm3
movsd .LC3(%rip), %xmm2
.L33:
movss 0(%rbp), %xmm1
addss (%rbx), %xmm1
movss (%r15), %xmm0
subss %xmm1, %xmm0
andps %xmm3, %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm2, %xmm0
ja .L51
addl $1, 4(%rsp)
movl 4(%rsp), %eax
addq $4, %r15
addq $4, %rbp
addq $4, %rbx
cmpl $268435456, %eax
jne .L33
jmp .L32
.L49:
movq 8(%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r12d
movl $16, %esi
cmpl $2, %r13d
jle .L28
movq 16(%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %esi
jmp .L28
.L50:
movl $16384, %r8d
movl $16384, %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z39__device_stub__Z13sumMatrix2D2DPfS_S_iiPfS_S_ii
jmp .L29
.L51:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 0(%rbp), %xmm0
movl 4(%rsp), %edx
pxor %xmm2, %xmm2
cvtss2sd (%r15), %xmm2
pxor %xmm1, %xmm1
cvtss2sd (%rbx), %xmm1
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
.L32:
movl $128, 76(%rsp)
movl $1, 80(%rsp)
movl $128, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L52
.L34:
movl $2, %ecx
movl $1073741824, %edx
movq 32(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %ebx
movss .LC2(%rip), %xmm3
movsd .LC3(%rip), %xmm2
.L38:
movss (%r14), %xmm1
addss (%r12), %xmm1
movss 0(%r13), %xmm0
subss %xmm1, %xmm0
andps %xmm3, %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm2, %xmm0
ja .L53
addl $1, %ebx
addq $4, %r13
addq $4, %r12
addq $4, %r14
cmpl $268435456, %ebx
jne .L38
jmp .L37
.L52:
movl $16384, %r8d
movl $16384, %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z39__device_stub__Z13sumMatrix1D1DPfS_S_iiPfS_S_ii
jmp .L34
.L53:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd (%r14), %xmm0
movl %ebx, %edx
pxor %xmm2, %xmm2
cvtss2sd 0(%r13), %xmm2
pxor %xmm1, %xmm1
cvtss2sd (%r12), %xmm1
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
.L37:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L54
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L54:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z13sumMatrix1D1DPfS_S_ii"
.LC8:
.string "_Z13sumMatrix2D2DPfS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z13sumMatrix1D1DPfS_S_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z13sumMatrix2D2DPfS_S_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 805306368
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC2:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC3:
.long -350469331
.long 1058682594
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "sumMatrix.hip"
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z10initializePfii
.LCPI0_0:
.long 0x30000000 # float 4.65661287E-10
.text
.globl _Z10initializePfii
.p2align 4, 0x90
.type _Z10initializePfii,@function
_Z10initializePfii: # @_Z10initializePfii
.cfi_startproc
# %bb.0:
imull %edx, %esi
testl %esi, %esi
jle .LBB0_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI0_0(%rip), %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB0_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB0_4: # %._crit_edge
retq
.Lfunc_end0:
.size _Z10initializePfii, .Lfunc_end0-_Z10initializePfii
.cfi_endproc
# -- End function
.globl _Z28__device_stub__sumMatrix2D2DPfS_S_ii # -- Begin function _Z28__device_stub__sumMatrix2D2DPfS_S_ii
.p2align 4, 0x90
.type _Z28__device_stub__sumMatrix2D2DPfS_S_ii,@function
_Z28__device_stub__sumMatrix2D2DPfS_S_ii: # @_Z28__device_stub__sumMatrix2D2DPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13sumMatrix2D2DPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z28__device_stub__sumMatrix2D2DPfS_S_ii, .Lfunc_end1-_Z28__device_stub__sumMatrix2D2DPfS_S_ii
.cfi_endproc
# -- End function
.globl _Z28__device_stub__sumMatrix1D1DPfS_S_ii # -- Begin function _Z28__device_stub__sumMatrix1D1DPfS_S_ii
.p2align 4, 0x90
.type _Z28__device_stub__sumMatrix1D1DPfS_S_ii,@function
_Z28__device_stub__sumMatrix1D1DPfS_S_ii: # @_Z28__device_stub__sumMatrix1D1DPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13sumMatrix1D1DPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z28__device_stub__sumMatrix1D1DPfS_S_ii, .Lfunc_end2-_Z28__device_stub__sumMatrix1D1DPfS_S_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x30000000 # float 4.65661287E-10
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI3_1:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI3_2:
.quad 0x3f1a36e2eb1c432d # double 1.0E-4
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r12
movl %edi, %ebp
movl $1073741824, %edi # imm = 0x40000000
callq malloc
movq %rax, %rbx
movl $1073741824, %edi # imm = 0x40000000
callq malloc
movq %rax, %r14
movl $1073741824, %edi # imm = 0x40000000
callq malloc
movq %rax, %r15
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
movss .LCPI3_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss %xmm0, (%rbx,%r13,4)
incq %r13
cmpq $268435456, %r13 # imm = 0x10000000
jne .LBB3_1
# %bb.2: # %.lr.ph.i70.preheader
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_3: # %.lr.ph.i70
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI3_0(%rip), %xmm0
movss %xmm0, (%r14,%r13,4)
incq %r13
cmpq $268435456, %r13 # imm = 0x10000000
jne .LBB3_3
# %bb.4: # %_Z10initializePfii.exit74
leaq 32(%rsp), %rdi
movl $1073741824, %esi # imm = 0x40000000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $1073741824, %esi # imm = 0x40000000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $1073741824, %esi # imm = 0x40000000
callq hipMalloc
movq 32(%rsp), %rdi
movl $1073741824, %edx # imm = 0x40000000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $1073741824, %edx # imm = 0x40000000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl $32, %r13d
cmpl $2, %ebp
jl .LBB3_6
# %bb.5:
movq 8(%r12), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r13
.LBB3_6:
movl $16, %ecx
cmpl $3, %ebp
jl .LBB3_8
# %bb.7:
movq 16(%r12), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rcx
.LBB3_8:
movl %r13d, %eax
movq %rcx, %r12
shlq $32, %r12
orq %rax, %r12
movl $16384, %eax # imm = 0x4000
xorl %edx, %edx
idivl %r13d
movl %eax, %ebp
movl $16384, %eax # imm = 0x4000
xorl %edx, %edx
idivl %ecx
# kill: def $eax killed $eax def $rax
shlq $32, %rax
orq %rax, %rbp
movl $.L.str, %edi
movl %r13d, %esi
movl %ecx, %edx
xorl %eax, %eax
callq printf
movq %rbp, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_10
# %bb.9:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $16384, 12(%rsp) # imm = 0x4000
movl $16384, 8(%rsp) # imm = 0x4000
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z13sumMatrix2D2DPfS_S_ii, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_10:
movq 16(%rsp), %rsi
movl $1073741824, %edx # imm = 0x40000000
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_11: # =>This Inner Loop Header: Depth=1
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%rbx,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
addss (%r14,%r12,4), %xmm1
subss %xmm1, %xmm0
andps .LCPI3_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
ucomisd .LCPI3_2(%rip), %xmm0
ja .LBB3_12
# %bb.13: # in Loop: Header=BB3_11 Depth=1
incq %r12
cmpq $268435456, %r12 # imm = 0x10000000
jne .LBB3_11
jmp .LBB3_14
.LBB3_12:
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movss (%r14,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
movss (%r15,%r12,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
movl $.L.str.2, %edi
movl %r12d, %esi
movb $3, %al
callq printf
.LBB3_14: # %.loopexit87
movabsq $4294967424, %rdi # imm = 0x100000080
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_16
# %bb.15:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $16384, 12(%rsp) # imm = 0x4000
movl $16384, 8(%rsp) # imm = 0x4000
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z13sumMatrix1D1DPfS_S_ii, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_16:
movq 16(%rsp), %rsi
movl $1073741824, %edx # imm = 0x40000000
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
movaps .LCPI3_1(%rip), %xmm2 # xmm2 = [NaN,NaN,NaN,NaN]
movsd .LCPI3_2(%rip), %xmm3 # xmm3 = mem[0],zero
.p2align 4, 0x90
.LBB3_17: # =>This Inner Loop Header: Depth=1
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%rbx,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
addss (%r14,%r12,4), %xmm1
subss %xmm1, %xmm0
andps %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
ucomisd %xmm3, %xmm0
ja .LBB3_18
# %bb.19: # in Loop: Header=BB3_17 Depth=1
incq %r12
cmpq $268435456, %r12 # imm = 0x10000000
jne .LBB3_17
jmp .LBB3_20
.LBB3_18:
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movss (%r14,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
movss (%r15,%r12,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
movl $.L.str.2, %edi
movl %r12d, %esi
movb $3, %al
callq printf
.LBB3_20: # %.loopexit
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13sumMatrix2D2DPfS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13sumMatrix1D1DPfS_S_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13sumMatrix2D2DPfS_S_ii,@object # @_Z13sumMatrix2D2DPfS_S_ii
.section .rodata,"a",@progbits
.globl _Z13sumMatrix2D2DPfS_S_ii
.p2align 3, 0x0
_Z13sumMatrix2D2DPfS_S_ii:
.quad _Z28__device_stub__sumMatrix2D2DPfS_S_ii
.size _Z13sumMatrix2D2DPfS_S_ii, 8
.type _Z13sumMatrix1D1DPfS_S_ii,@object # @_Z13sumMatrix1D1DPfS_S_ii
.globl _Z13sumMatrix1D1DPfS_S_ii
.p2align 3, 0x0
_Z13sumMatrix1D1DPfS_S_ii:
.quad _Z28__device_stub__sumMatrix1D1DPfS_S_ii
.size _Z13sumMatrix1D1DPfS_S_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "run with block %d, %d"
.size .L.str, 22
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "2D2D"
.size .L.str.1, 5
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%8.5f, %8.5f, %8.5f, %d \n"
.size .L.str.2, 26
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "1D1D"
.size .L.str.3, 5
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z13sumMatrix2D2DPfS_S_ii"
.size .L__unnamed_1, 26
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z13sumMatrix1D1DPfS_S_ii"
.size .L__unnamed_2, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__sumMatrix2D2DPfS_S_ii
.addrsig_sym _Z28__device_stub__sumMatrix1D1DPfS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13sumMatrix2D2DPfS_S_ii
.addrsig_sym _Z13sumMatrix1D1DPfS_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include "includes.h"
__global__ void cudaNoConversion_kernel(float * data, float * tickOutputsTraces, float * tickOutputsTracesLearning, float scaling, unsigned int inputDimX, unsigned int inputDimY, unsigned int inputDimZ)
{
const unsigned int inputSize = inputDimX * inputDimY * inputDimZ;
const unsigned int batchOffset = blockIdx.x * inputSize;
for (unsigned int idx = threadIdx.x; idx < inputSize; idx += blockDim.x) {
float value = data[idx + batchOffset];
tickOutputsTraces[idx + batchOffset] = scaling*value;
tickOutputsTracesLearning[idx + batchOffset] += scaling*value;
}
}
|
code for sm_80
Function : _Z23cudaNoConversion_kernelPfS_S_fjjj
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ ULDC UR6, c[0x0][0x17c] ; /* 0x00005f0000067ab9 */
/* 0x000fe40000000800 */
/*0030*/ ULDC.64 UR4, c[0x0][0x180] ; /* 0x0000600000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ UIMAD UR4, UR4, UR6, URZ ; /* 0x00000006040472a4 */
/* 0x000fc8000f8e023f */
/*0050*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fcc000f8e023f */
/*0060*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x001fda000bf06070 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ S2UR UR5, SR_CTAID.X ; /* 0x00000000000579c3 */
/* 0x000e220000002500 */
/*0090*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*00a0*/ UIMAD UR5, UR4, UR5, URZ ; /* 0x00000005040572a4 */
/* 0x001fca000f8e023f */
/*00b0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x001fe200000001ff */
/*00c0*/ IADD3 R6, R0, UR5, RZ ; /* 0x0000000500067c10 */
/* 0x000fd2000fffe0ff */
/*00d0*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fcc00078e0007 */
/*00e0*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ IMAD.WIDE.U32 R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0007 */
/*0100*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0007 */
/*0110*/ FMUL R9, R2, c[0x0][0x178] ; /* 0x00005e0002097a20 */
/* 0x004fca0000400000 */
/*0120*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001e8000c101906 */
/*0130*/ LDG.E R8, [R6.64] ; /* 0x0000000606087981 */
/* 0x000ea2000c1e1900 */
/*0140*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fc80007ffe0ff */
/*0150*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe2000bf06070 */
/*0160*/ FADD R11, R9, R8 ; /* 0x00000008090b7221 */
/* 0x004fca0000000000 */
/*0170*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0001ee000c101906 */
/*0180*/ @!P0 BRA 0xb0 ; /* 0xffffff2000008947 */
/* 0x000fea000383ffff */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include "includes.h"
__global__ void cudaNoConversion_kernel(float * data, float * tickOutputsTraces, float * tickOutputsTracesLearning, float scaling, unsigned int inputDimX, unsigned int inputDimY, unsigned int inputDimZ)
{
const unsigned int inputSize = inputDimX * inputDimY * inputDimZ;
const unsigned int batchOffset = blockIdx.x * inputSize;
for (unsigned int idx = threadIdx.x; idx < inputSize; idx += blockDim.x) {
float value = data[idx + batchOffset];
tickOutputsTraces[idx + batchOffset] = scaling*value;
tickOutputsTracesLearning[idx + batchOffset] += scaling*value;
}
}
|
.file "tmpxft_001a26a4_00000000-6_cudaNoConversion_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z23cudaNoConversion_kernelPfS_S_fjjjPfS_S_fjjj
.type _Z51__device_stub__Z23cudaNoConversion_kernelPfS_S_fjjjPfS_S_fjjj, @function
_Z51__device_stub__Z23cudaNoConversion_kernelPfS_S_fjjjPfS_S_fjjj:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movss %xmm0, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z23cudaNoConversion_kernelPfS_S_fjjj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z51__device_stub__Z23cudaNoConversion_kernelPfS_S_fjjjPfS_S_fjjj, .-_Z51__device_stub__Z23cudaNoConversion_kernelPfS_S_fjjjPfS_S_fjjj
.globl _Z23cudaNoConversion_kernelPfS_S_fjjj
.type _Z23cudaNoConversion_kernelPfS_S_fjjj, @function
_Z23cudaNoConversion_kernelPfS_S_fjjj:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z23cudaNoConversion_kernelPfS_S_fjjjPfS_S_fjjj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z23cudaNoConversion_kernelPfS_S_fjjj, .-_Z23cudaNoConversion_kernelPfS_S_fjjj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z23cudaNoConversion_kernelPfS_S_fjjj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z23cudaNoConversion_kernelPfS_S_fjjj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include "includes.h"
__global__ void cudaNoConversion_kernel(float * data, float * tickOutputsTraces, float * tickOutputsTracesLearning, float scaling, unsigned int inputDimX, unsigned int inputDimY, unsigned int inputDimZ)
{
const unsigned int inputSize = inputDimX * inputDimY * inputDimZ;
const unsigned int batchOffset = blockIdx.x * inputSize;
for (unsigned int idx = threadIdx.x; idx < inputSize; idx += blockDim.x) {
float value = data[idx + batchOffset];
tickOutputsTraces[idx + batchOffset] = scaling*value;
tickOutputsTracesLearning[idx + batchOffset] += scaling*value;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cudaNoConversion_kernel(float * data, float * tickOutputsTraces, float * tickOutputsTracesLearning, float scaling, unsigned int inputDimX, unsigned int inputDimY, unsigned int inputDimZ)
{
const unsigned int inputSize = inputDimX * inputDimY * inputDimZ;
const unsigned int batchOffset = blockIdx.x * inputSize;
for (unsigned int idx = threadIdx.x; idx < inputSize; idx += blockDim.x) {
float value = data[idx + batchOffset];
tickOutputsTraces[idx + batchOffset] = scaling*value;
tickOutputsTracesLearning[idx + batchOffset] += scaling*value;
}
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cudaNoConversion_kernel(float * data, float * tickOutputsTraces, float * tickOutputsTracesLearning, float scaling, unsigned int inputDimX, unsigned int inputDimY, unsigned int inputDimZ)
{
const unsigned int inputSize = inputDimX * inputDimY * inputDimZ;
const unsigned int batchOffset = blockIdx.x * inputSize;
for (unsigned int idx = threadIdx.x; idx < inputSize; idx += blockDim.x) {
float value = data[idx + batchOffset];
tickOutputsTraces[idx + batchOffset] = scaling*value;
tickOutputsTracesLearning[idx + batchOffset] += scaling*value;
}
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23cudaNoConversion_kernelPfS_S_fjjj
.globl _Z23cudaNoConversion_kernelPfS_S_fjjj
.p2align 8
.type _Z23cudaNoConversion_kernelPfS_S_fjjj,@function
_Z23cudaNoConversion_kernelPfS_S_fjjj:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x1c
s_load_b32 s4, s[0:1], 0x24
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s3, s2
s_mov_b32 s3, exec_lo
s_mul_i32 s2, s2, s4
s_delay_alu instid0(SALU_CYCLE_1)
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB0_3
s_clause 0x3
s_load_b32 s9, s[0:1], 0x34
s_load_b32 s3, s[0:1], 0x18
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_mov_b32_e32 v2, 0
s_mul_i32 s8, s2, s15
s_mov_b32 s10, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s9, 0xffff
.p2align 6
.LBB0_2:
v_add_nc_u32_e32 v1, s8, v0
v_add_nc_u32_e32 v0, s9, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_co_u32 v5, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v4, vcc_lo
global_load_b32 v1, v[5:6], off
v_add_co_u32 v5, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v4, vcc_lo
v_add_co_u32 v3, vcc_lo, s0, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s2, v0
s_or_b32 s10, vcc_lo, s10
s_waitcnt vmcnt(0)
v_mul_f32_e32 v7, s3, v1
global_store_b32 v[5:6], v7, off
global_load_b32 v5, v[3:4], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v5, s3, v1
global_store_b32 v[3:4], v5, off
s_and_not1_b32 exec_lo, exec_lo, s10
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23cudaNoConversion_kernelPfS_S_fjjj
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23cudaNoConversion_kernelPfS_S_fjjj, .Lfunc_end0-_Z23cudaNoConversion_kernelPfS_S_fjjj
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23cudaNoConversion_kernelPfS_S_fjjj
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23cudaNoConversion_kernelPfS_S_fjjj.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cudaNoConversion_kernel(float * data, float * tickOutputsTraces, float * tickOutputsTracesLearning, float scaling, unsigned int inputDimX, unsigned int inputDimY, unsigned int inputDimZ)
{
const unsigned int inputSize = inputDimX * inputDimY * inputDimZ;
const unsigned int batchOffset = blockIdx.x * inputSize;
for (unsigned int idx = threadIdx.x; idx < inputSize; idx += blockDim.x) {
float value = data[idx + batchOffset];
tickOutputsTraces[idx + batchOffset] = scaling*value;
tickOutputsTracesLearning[idx + batchOffset] += scaling*value;
}
}
|
.text
.file "cudaNoConversion_kernel.hip"
.globl _Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj # -- Begin function _Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj
.p2align 4, 0x90
.type _Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj,@function
_Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj: # @_Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movss %xmm0, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z23cudaNoConversion_kernelPfS_S_fjjj, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj, .Lfunc_end0-_Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23cudaNoConversion_kernelPfS_S_fjjj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z23cudaNoConversion_kernelPfS_S_fjjj,@object # @_Z23cudaNoConversion_kernelPfS_S_fjjj
.section .rodata,"a",@progbits
.globl _Z23cudaNoConversion_kernelPfS_S_fjjj
.p2align 3, 0x0
_Z23cudaNoConversion_kernelPfS_S_fjjj:
.quad _Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj
.size _Z23cudaNoConversion_kernelPfS_S_fjjj, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z23cudaNoConversion_kernelPfS_S_fjjj"
.size .L__unnamed_1, 38
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23cudaNoConversion_kernelPfS_S_fjjj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z23cudaNoConversion_kernelPfS_S_fjjj
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ ULDC UR6, c[0x0][0x17c] ; /* 0x00005f0000067ab9 */
/* 0x000fe40000000800 */
/*0030*/ ULDC.64 UR4, c[0x0][0x180] ; /* 0x0000600000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ UIMAD UR4, UR4, UR6, URZ ; /* 0x00000006040472a4 */
/* 0x000fc8000f8e023f */
/*0050*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fcc000f8e023f */
/*0060*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x001fda000bf06070 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ S2UR UR5, SR_CTAID.X ; /* 0x00000000000579c3 */
/* 0x000e220000002500 */
/*0090*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*00a0*/ UIMAD UR5, UR4, UR5, URZ ; /* 0x00000005040572a4 */
/* 0x001fca000f8e023f */
/*00b0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x001fe200000001ff */
/*00c0*/ IADD3 R6, R0, UR5, RZ ; /* 0x0000000500067c10 */
/* 0x000fd2000fffe0ff */
/*00d0*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fcc00078e0007 */
/*00e0*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ IMAD.WIDE.U32 R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0007 */
/*0100*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0007 */
/*0110*/ FMUL R9, R2, c[0x0][0x178] ; /* 0x00005e0002097a20 */
/* 0x004fca0000400000 */
/*0120*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001e8000c101906 */
/*0130*/ LDG.E R8, [R6.64] ; /* 0x0000000606087981 */
/* 0x000ea2000c1e1900 */
/*0140*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fc80007ffe0ff */
/*0150*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe2000bf06070 */
/*0160*/ FADD R11, R9, R8 ; /* 0x00000008090b7221 */
/* 0x004fca0000000000 */
/*0170*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0001ee000c101906 */
/*0180*/ @!P0 BRA 0xb0 ; /* 0xffffff2000008947 */
/* 0x000fea000383ffff */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23cudaNoConversion_kernelPfS_S_fjjj
.globl _Z23cudaNoConversion_kernelPfS_S_fjjj
.p2align 8
.type _Z23cudaNoConversion_kernelPfS_S_fjjj,@function
_Z23cudaNoConversion_kernelPfS_S_fjjj:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x1c
s_load_b32 s4, s[0:1], 0x24
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s3, s2
s_mov_b32 s3, exec_lo
s_mul_i32 s2, s2, s4
s_delay_alu instid0(SALU_CYCLE_1)
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB0_3
s_clause 0x3
s_load_b32 s9, s[0:1], 0x34
s_load_b32 s3, s[0:1], 0x18
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_mov_b32_e32 v2, 0
s_mul_i32 s8, s2, s15
s_mov_b32 s10, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s9, 0xffff
.p2align 6
.LBB0_2:
v_add_nc_u32_e32 v1, s8, v0
v_add_nc_u32_e32 v0, s9, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_co_u32 v5, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v4, vcc_lo
global_load_b32 v1, v[5:6], off
v_add_co_u32 v5, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v4, vcc_lo
v_add_co_u32 v3, vcc_lo, s0, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s2, v0
s_or_b32 s10, vcc_lo, s10
s_waitcnt vmcnt(0)
v_mul_f32_e32 v7, s3, v1
global_store_b32 v[5:6], v7, off
global_load_b32 v5, v[3:4], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v5, s3, v1
global_store_b32 v[3:4], v5, off
s_and_not1_b32 exec_lo, exec_lo, s10
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23cudaNoConversion_kernelPfS_S_fjjj
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23cudaNoConversion_kernelPfS_S_fjjj, .Lfunc_end0-_Z23cudaNoConversion_kernelPfS_S_fjjj
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23cudaNoConversion_kernelPfS_S_fjjj
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23cudaNoConversion_kernelPfS_S_fjjj.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_001a26a4_00000000-6_cudaNoConversion_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z23cudaNoConversion_kernelPfS_S_fjjjPfS_S_fjjj
.type _Z51__device_stub__Z23cudaNoConversion_kernelPfS_S_fjjjPfS_S_fjjj, @function
_Z51__device_stub__Z23cudaNoConversion_kernelPfS_S_fjjjPfS_S_fjjj:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movss %xmm0, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z23cudaNoConversion_kernelPfS_S_fjjj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z51__device_stub__Z23cudaNoConversion_kernelPfS_S_fjjjPfS_S_fjjj, .-_Z51__device_stub__Z23cudaNoConversion_kernelPfS_S_fjjjPfS_S_fjjj
.globl _Z23cudaNoConversion_kernelPfS_S_fjjj
.type _Z23cudaNoConversion_kernelPfS_S_fjjj, @function
_Z23cudaNoConversion_kernelPfS_S_fjjj:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z23cudaNoConversion_kernelPfS_S_fjjjPfS_S_fjjj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z23cudaNoConversion_kernelPfS_S_fjjj, .-_Z23cudaNoConversion_kernelPfS_S_fjjj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z23cudaNoConversion_kernelPfS_S_fjjj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z23cudaNoConversion_kernelPfS_S_fjjj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "cudaNoConversion_kernel.hip"
.globl _Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj # -- Begin function _Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj
.p2align 4, 0x90
.type _Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj,@function
_Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj: # @_Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movss %xmm0, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z23cudaNoConversion_kernelPfS_S_fjjj, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj, .Lfunc_end0-_Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23cudaNoConversion_kernelPfS_S_fjjj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z23cudaNoConversion_kernelPfS_S_fjjj,@object # @_Z23cudaNoConversion_kernelPfS_S_fjjj
.section .rodata,"a",@progbits
.globl _Z23cudaNoConversion_kernelPfS_S_fjjj
.p2align 3, 0x0
_Z23cudaNoConversion_kernelPfS_S_fjjj:
.quad _Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj
.size _Z23cudaNoConversion_kernelPfS_S_fjjj, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z23cudaNoConversion_kernelPfS_S_fjjj"
.size .L__unnamed_1, 38
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__cudaNoConversion_kernelPfS_S_fjjj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23cudaNoConversion_kernelPfS_S_fjjj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
int main(void)
{
// H has storage for 4 integers
thrust::host_vector<int> H(4);
// initialize individual elements
H[0] = 14;
H[1] = 20;
H[2] = 38;
H[3] = 46;
// H.size() returns the size of vector H
std::cout << "H has size " << H.size() << std::endl;
// print contents of H
for(int i = 0; i < H.size(); i++)
std::cout << "H[" << i << "] = " << H[i] << std::endl;
// resize H
H.resize(2);
std::cout << "H now has size " << H.size() << std::endl;
// Copy host_vector H to device_vector D
thrust::device_vector<int> D = H;
// elements of D can be modified
D[0] = 99;
D[1] = 88;
// print contents of D
for(int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << std::endl;
// H and D are automatically deleted when the function returns
return 0;
}
|
code for sm_80
Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
int main(void)
{
// H has storage for 4 integers
thrust::host_vector<int> H(4);
// initialize individual elements
H[0] = 14;
H[1] = 20;
H[2] = 38;
H[3] = 46;
// H.size() returns the size of vector H
std::cout << "H has size " << H.size() << std::endl;
// print contents of H
for(int i = 0; i < H.size(); i++)
std::cout << "H[" << i << "] = " << H[i] << std::endl;
// resize H
H.resize(2);
std::cout << "H now has size " << H.size() << std::endl;
// Copy host_vector H to device_vector D
thrust::device_vector<int> D = H;
// elements of D can be modified
D[0] = 99;
D[1] = 88;
// print contents of D
for(int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << std::endl;
// H and D are automatically deleted when the function returns
return 0;
}
|
#include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
int main(void)
{
// H has storage for 4 integers
thrust::host_vector<int> H(4);
// initialize individual elements
H[0] = 14;
H[1] = 20;
H[2] = 38;
H[3] = 46;
// H.size() returns the size of vector H
std::cout << "H has size " << H.size() << std::endl;
// print contents of H
for(int i = 0; i < H.size(); i++)
std::cout << "H[" << i << "] = " << H[i] << std::endl;
// resize H
H.resize(2);
std::cout << "H now has size " << H.size() << std::endl;
// Copy host_vector H to device_vector D
thrust::device_vector<int> D = H;
// elements of D can be modified
D[0] = 99;
D[1] = 88;
// print contents of D
for(int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << std::endl;
// H and D are automatically deleted when the function returns
return 0;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
int main(void)
{
// H has storage for 4 integers
thrust::host_vector<int> H(4);
// initialize individual elements
H[0] = 14;
H[1] = 20;
H[2] = 38;
H[3] = 46;
// H.size() returns the size of vector H
std::cout << "H has size " << H.size() << std::endl;
// print contents of H
for(int i = 0; i < H.size(); i++)
std::cout << "H[" << i << "] = " << H[i] << std::endl;
// resize H
H.resize(2);
std::cout << "H now has size " << H.size() << std::endl;
// Copy host_vector H to device_vector D
thrust::device_vector<int> D = H;
// elements of D can be modified
D[0] = 99;
D[1] = 88;
// print contents of D
for(int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << std::endl;
// H and D are automatically deleted when the function returns
return 0;
}
|
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-7-11
*/
#include "../../XTensor.h"
#include "../../XDevice.h"
#include "ConvertDataType.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda_fp16.h>
__global__
void KernelFloatToFloat16(float * s, __half * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __float2half(s[i]);
}
}
__global__
void KernelFloat16ToFloat(__half * s, float * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __half2float(s[i]);
}
}
__global__
void KernelFloatToInt(float * inputData, int * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (int)(inputData[i]);
}
}
__global__
void KernelIntToFloat(int * inputData, float * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (float)(inputData[i]);
}
}
/*
data conversion (cuda code)
>> devID - device id
>> s - source data array
>> typeS - source data type
>> t - target data array
>> typeT - target data type
>> size - number of the items in s (and t)
*/
void _CudaConvertDataType(int devID, void * s, TENSOR_DATA_TYPE typeS, void * t, TENSOR_DATA_TYPE typeT, int size)
{
CheckNTErrors((devID >= 0), "This code must be run on GPUs!");
if(typeS == typeT)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(devID, size, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
if(typeS == X_FLOAT && typeT == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>((float*)s, (__half*)t, size);
else if(typeS == X_FLOAT16 && typeT == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>((__half*)s, (float*)t, size);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(devID, devIDBackup);
}
/*
convert data type (cuda code)
>> input - input tensor
>> output - output tensor
*/
void _CudaConvertDataType(const XTensor * input, XTensor * output)
{
if (input->dataType == output->dataType)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(input->devID, input->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(input->devID, devIDBackup);
if(input->dataType == X_FLOAT && output->dataType == X_INT)
KernelFloatToInt<<<blocks, threads>>>
((float*)input->data, (int*)output->data, input->unitNum);
else if(input->dataType == X_INT && output->dataType == X_FLOAT)
KernelIntToFloat<<<blocks, threads>>>
((int*)input->data, (float*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT && output->dataType == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>
((float*)input->data, (__half*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT16 && output->dataType == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>
((__half*)input->data, (float*)output->data, input->unitNum);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(input->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
code for sm_80
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-7-11
*/
#include "../../XTensor.h"
#include "../../XDevice.h"
#include "ConvertDataType.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda_fp16.h>
__global__
void KernelFloatToFloat16(float * s, __half * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __float2half(s[i]);
}
}
__global__
void KernelFloat16ToFloat(__half * s, float * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __half2float(s[i]);
}
}
__global__
void KernelFloatToInt(float * inputData, int * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (int)(inputData[i]);
}
}
__global__
void KernelIntToFloat(int * inputData, float * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (float)(inputData[i]);
}
}
/*
data conversion (cuda code)
>> devID - device id
>> s - source data array
>> typeS - source data type
>> t - target data array
>> typeT - target data type
>> size - number of the items in s (and t)
*/
void _CudaConvertDataType(int devID, void * s, TENSOR_DATA_TYPE typeS, void * t, TENSOR_DATA_TYPE typeT, int size)
{
CheckNTErrors((devID >= 0), "This code must be run on GPUs!");
if(typeS == typeT)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(devID, size, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
if(typeS == X_FLOAT && typeT == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>((float*)s, (__half*)t, size);
else if(typeS == X_FLOAT16 && typeT == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>((__half*)s, (float*)t, size);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(devID, devIDBackup);
}
/*
convert data type (cuda code)
>> input - input tensor
>> output - output tensor
*/
void _CudaConvertDataType(const XTensor * input, XTensor * output)
{
if (input->dataType == output->dataType)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(input->devID, input->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(input->devID, devIDBackup);
if(input->dataType == X_FLOAT && output->dataType == X_INT)
KernelFloatToInt<<<blocks, threads>>>
((float*)input->data, (int*)output->data, input->unitNum);
else if(input->dataType == X_INT && output->dataType == X_FLOAT)
KernelIntToFloat<<<blocks, threads>>>
((int*)input->data, (float*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT && output->dataType == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>
((float*)input->data, (__half*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT16 && output->dataType == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>
((__half*)input->data, (float*)output->data, input->unitNum);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(input->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
.file "tmpxft_000c4f9c_00000000-6_ConvertDataType.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3094:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3094:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3117:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3117:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-7-11
*/
#include "../../XTensor.h"
#include "../../XDevice.h"
#include "ConvertDataType.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda_fp16.h>
__global__
void KernelFloatToFloat16(float * s, __half * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __float2half(s[i]);
}
}
__global__
void KernelFloat16ToFloat(__half * s, float * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __half2float(s[i]);
}
}
__global__
void KernelFloatToInt(float * inputData, int * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (int)(inputData[i]);
}
}
__global__
void KernelIntToFloat(int * inputData, float * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (float)(inputData[i]);
}
}
/*
data conversion (cuda code)
>> devID - device id
>> s - source data array
>> typeS - source data type
>> t - target data array
>> typeT - target data type
>> size - number of the items in s (and t)
*/
void _CudaConvertDataType(int devID, void * s, TENSOR_DATA_TYPE typeS, void * t, TENSOR_DATA_TYPE typeT, int size)
{
CheckNTErrors((devID >= 0), "This code must be run on GPUs!");
if(typeS == typeT)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(devID, size, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
if(typeS == X_FLOAT && typeT == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>((float*)s, (__half*)t, size);
else if(typeS == X_FLOAT16 && typeT == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>((__half*)s, (float*)t, size);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(devID, devIDBackup);
}
/*
convert data type (cuda code)
>> input - input tensor
>> output - output tensor
*/
void _CudaConvertDataType(const XTensor * input, XTensor * output)
{
if (input->dataType == output->dataType)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(input->devID, input->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(input->devID, devIDBackup);
if(input->dataType == X_FLOAT && output->dataType == X_INT)
KernelFloatToInt<<<blocks, threads>>>
((float*)input->data, (int*)output->data, input->unitNum);
else if(input->dataType == X_INT && output->dataType == X_FLOAT)
KernelIntToFloat<<<blocks, threads>>>
((int*)input->data, (float*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT && output->dataType == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>
((float*)input->data, (__half*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT16 && output->dataType == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>
((__half*)input->data, (float*)output->data, input->unitNum);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(input->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-7-11
*/
#include "../../XTensor.h"
#include "../../XDevice.h"
#include "ConvertDataType.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
#include <hip/hip_runtime.h>
#include <hipblas.h>
#include <hip/hip_fp16.h>
__global__
void KernelFloatToFloat16(float * s, __half * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __float2half(s[i]);
}
}
__global__
void KernelFloat16ToFloat(__half * s, float * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __half2float(s[i]);
}
}
__global__
void KernelFloatToInt(float * inputData, int * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (int)(inputData[i]);
}
}
__global__
void KernelIntToFloat(int * inputData, float * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (float)(inputData[i]);
}
}
/*
data conversion (cuda code)
>> devID - device id
>> s - source data array
>> typeS - source data type
>> t - target data array
>> typeT - target data type
>> size - number of the items in s (and t)
*/
void _CudaConvertDataType(int devID, void * s, TENSOR_DATA_TYPE typeS, void * t, TENSOR_DATA_TYPE typeT, int size)
{
CheckNTErrors((devID >= 0), "This code must be run on GPUs!");
if(typeS == typeT)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(devID, size, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
if(typeS == X_FLOAT && typeT == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>((float*)s, (__half*)t, size);
else if(typeS == X_FLOAT16 && typeT == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>((__half*)s, (float*)t, size);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(devID, devIDBackup);
}
/*
convert data type (cuda code)
>> input - input tensor
>> output - output tensor
*/
void _CudaConvertDataType(const XTensor * input, XTensor * output)
{
if (input->dataType == output->dataType)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(input->devID, input->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(input->devID, devIDBackup);
if(input->dataType == X_FLOAT && output->dataType == X_INT)
KernelFloatToInt<<<blocks, threads>>>
((float*)input->data, (int*)output->data, input->unitNum);
else if(input->dataType == X_INT && output->dataType == X_FLOAT)
KernelIntToFloat<<<blocks, threads>>>
((int*)input->data, (float*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT && output->dataType == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>
((float*)input->data, (__half*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT16 && output->dataType == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>
((__half*)input->data, (float*)output->data, input->unitNum);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(input->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-7-11
*/
#include "../../XTensor.h"
#include "../../XDevice.h"
#include "ConvertDataType.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
#include <hip/hip_runtime.h>
#include <hipblas.h>
#include <hip/hip_fp16.h>
__global__
void KernelFloatToFloat16(float * s, __half * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __float2half(s[i]);
}
}
__global__
void KernelFloat16ToFloat(__half * s, float * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __half2float(s[i]);
}
}
__global__
void KernelFloatToInt(float * inputData, int * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (int)(inputData[i]);
}
}
__global__
void KernelIntToFloat(int * inputData, float * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (float)(inputData[i]);
}
}
/*
data conversion (cuda code)
>> devID - device id
>> s - source data array
>> typeS - source data type
>> t - target data array
>> typeT - target data type
>> size - number of the items in s (and t)
*/
void _CudaConvertDataType(int devID, void * s, TENSOR_DATA_TYPE typeS, void * t, TENSOR_DATA_TYPE typeT, int size)
{
CheckNTErrors((devID >= 0), "This code must be run on GPUs!");
if(typeS == typeT)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(devID, size, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
if(typeS == X_FLOAT && typeT == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>((float*)s, (__half*)t, size);
else if(typeS == X_FLOAT16 && typeT == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>((__half*)s, (float*)t, size);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(devID, devIDBackup);
}
/*
convert data type (cuda code)
>> input - input tensor
>> output - output tensor
*/
void _CudaConvertDataType(const XTensor * input, XTensor * output)
{
if (input->dataType == output->dataType)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(input->devID, input->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(input->devID, devIDBackup);
if(input->dataType == X_FLOAT && output->dataType == X_INT)
KernelFloatToInt<<<blocks, threads>>>
((float*)input->data, (int*)output->data, input->unitNum);
else if(input->dataType == X_INT && output->dataType == X_FLOAT)
KernelIntToFloat<<<blocks, threads>>>
((int*)input->data, (float*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT && output->dataType == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>
((float*)input->data, (__half*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT16 && output->dataType == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>
((__half*)input->data, (float*)output->data, input->unitNum);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(input->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-7-11
*/
#include "../../XTensor.h"
#include "../../XDevice.h"
#include "ConvertDataType.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
#include <hip/hip_runtime.h>
#include <hipblas.h>
#include <hip/hip_fp16.h>
__global__
void KernelFloatToFloat16(float * s, __half * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __float2half(s[i]);
}
}
__global__
void KernelFloat16ToFloat(__half * s, float * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __half2float(s[i]);
}
}
__global__
void KernelFloatToInt(float * inputData, int * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (int)(inputData[i]);
}
}
__global__
void KernelIntToFloat(int * inputData, float * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (float)(inputData[i]);
}
}
/*
data conversion (cuda code)
>> devID - device id
>> s - source data array
>> typeS - source data type
>> t - target data array
>> typeT - target data type
>> size - number of the items in s (and t)
*/
void _CudaConvertDataType(int devID, void * s, TENSOR_DATA_TYPE typeS, void * t, TENSOR_DATA_TYPE typeT, int size)
{
CheckNTErrors((devID >= 0), "This code must be run on GPUs!");
if(typeS == typeT)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(devID, size, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
if(typeS == X_FLOAT && typeT == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>((float*)s, (__half*)t, size);
else if(typeS == X_FLOAT16 && typeT == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>((__half*)s, (float*)t, size);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(devID, devIDBackup);
}
/*
convert data type (cuda code)
>> input - input tensor
>> output - output tensor
*/
void _CudaConvertDataType(const XTensor * input, XTensor * output)
{
if (input->dataType == output->dataType)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(input->devID, input->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(input->devID, devIDBackup);
if(input->dataType == X_FLOAT && output->dataType == X_INT)
KernelFloatToInt<<<blocks, threads>>>
((float*)input->data, (int*)output->data, input->unitNum);
else if(input->dataType == X_INT && output->dataType == X_FLOAT)
KernelIntToFloat<<<blocks, threads>>>
((int*)input->data, (float*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT && output->dataType == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>
((float*)input->data, (__half*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT16 && output->dataType == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>
((__half*)input->data, (float*)output->data, input->unitNum);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(input->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
.text
.file "ConvertDataType.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
|
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_000c4f9c_00000000-6_ConvertDataType.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3094:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3094:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3117:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3117:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "ConvertDataType.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include "includes.h"
__global__ void CUDAkernel_multiply( float* sourceA, float* sourceB, float* destination, int size )
{
int index = CUDASTDOFFSET;
float a = sourceA[index];
float b = sourceB[index];
if( index < size )
{
destination[index] = a * b;
}
}
|
code for sm_80
Function : _Z19CUDAkernel_multiplyPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Z ; /* 0x0000000000007919 */
/* 0x000e280000002700 */
/*0020*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0030*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e680000002500 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000ea20000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x10], R3 ; /* 0x0000040000007a24 */
/* 0x001fc800078e0203 */
/*0060*/ IMAD R0, R0, c[0x0][0xc], R5 ; /* 0x0000030000007a24 */
/* 0x002fc800078e0205 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */
/* 0x004fca00078e0207 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fda0003f06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*00c0*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0207 */
/*00d0*/ IMAD.WIDE R4, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x0c0fe400078e0207 */
/*00e0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00f0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0100*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0207 */
/*0110*/ FMUL R9, R2, R5 ; /* 0x0000000502097220 */
/* 0x004fca0000400000 */
/*0120*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include "includes.h"
__global__ void CUDAkernel_multiply( float* sourceA, float* sourceB, float* destination, int size )
{
int index = CUDASTDOFFSET;
float a = sourceA[index];
float b = sourceB[index];
if( index < size )
{
destination[index] = a * b;
}
}
|
.file "tmpxft_00080c3c_00000000-6_CUDAkernel_multiply.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z44__device_stub__Z19CUDAkernel_multiplyPfS_S_iPfS_S_i
.type _Z44__device_stub__Z19CUDAkernel_multiplyPfS_S_iPfS_S_i, @function
_Z44__device_stub__Z19CUDAkernel_multiplyPfS_S_iPfS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19CUDAkernel_multiplyPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z44__device_stub__Z19CUDAkernel_multiplyPfS_S_iPfS_S_i, .-_Z44__device_stub__Z19CUDAkernel_multiplyPfS_S_iPfS_S_i
.globl _Z19CUDAkernel_multiplyPfS_S_i
.type _Z19CUDAkernel_multiplyPfS_S_i, @function
_Z19CUDAkernel_multiplyPfS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z19CUDAkernel_multiplyPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z19CUDAkernel_multiplyPfS_S_i, .-_Z19CUDAkernel_multiplyPfS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z19CUDAkernel_multiplyPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19CUDAkernel_multiplyPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include "includes.h"
__global__ void CUDAkernel_multiply( float* sourceA, float* sourceB, float* destination, int size )
{
int index = CUDASTDOFFSET;
float a = sourceA[index];
float b = sourceB[index];
if( index < size )
{
destination[index] = a * b;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void CUDAkernel_multiply( float* sourceA, float* sourceB, float* destination, int size )
{
int index = CUDASTDOFFSET;
float a = sourceA[index];
float b = sourceB[index];
if( index < size )
{
destination[index] = a * b;
}
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void CUDAkernel_multiply( float* sourceA, float* sourceB, float* destination, int size )
{
int index = CUDASTDOFFSET;
float a = sourceA[index];
float b = sourceB[index];
if( index < size )
{
destination[index] = a * b;
}
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19CUDAkernel_multiplyPfS_S_i
.globl _Z19CUDAkernel_multiplyPfS_S_i
.p2align 8
.type _Z19CUDAkernel_multiplyPfS_S_i,@function
_Z19CUDAkernel_multiplyPfS_S_i:
s_clause 0x2
s_load_b64 s[2:3], s[0:1], 0x20
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s5, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_mul_i32 s3, s3, s15
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s3, s3, s14
s_mul_i32 s2, s3, s2
s_and_b32 s3, s4, 0xffff
s_add_i32 s2, s2, s13
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s5, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s6, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_mul_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19CUDAkernel_multiplyPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19CUDAkernel_multiplyPfS_S_i, .Lfunc_end0-_Z19CUDAkernel_multiplyPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19CUDAkernel_multiplyPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19CUDAkernel_multiplyPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.