system
stringclasses
7 values
input
stringlengths
10
94.5k
output
stringlengths
14
55.5k
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z7takeLogPfS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ IMAD.SHL.U32 R6, R0, 0x2, RZ ; /* 0x0000000200067824 */ /* 0x000fe200078e00ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0080*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fc800078e00ff */ /*0090*/ IMAD.WIDE R6, R6, R3, c[0x0][0x160] ; /* 0x0000580006067625 */ /* 0x000fca00078e0203 */ /*00a0*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */ /* 0x000ea2000c1e1900 */ /*00b0*/ BSSY B0, 0x4c0 ; /* 0x0000040000007945 */ /* 0x000fe20003800000 */ /*00c0*/ F2F.F64.F32 R4, R2 ; /* 0x0000000200047310 */ /* 0x0040620000201800 */ /*00d0*/ FSETP.GT.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720b */ /* 0x000fe20003f24000 */ /*00e0*/ IMAD.MOV.U32 R2, RZ, RZ, -0x3ff ; /* 0xfffffc01ff027424 */ /* 0x001fc600078e00ff */ /*00f0*/ FSEL R5, R5, 0.006091669667512178421, P1 ; /* 0x3bc79ca105057808 */ /* 0x002fe40000800000 */ /*0100*/ FSEL R4, R4, 2.2534666340150459063e-31, P1 ; /* 0x0c92422304047808 */ /* 0x000fe40000800000 */ /*0110*/ ISETP.GT.AND P0, PT, R5, 0xfffff, PT ; /* 0x000fffff0500780c */ /* 0x000fe20003f04270 */ /*0120*/ IMAD.MOV.U32 R9, RZ, RZ, R5 ; /* 0x000000ffff097224 */ /* 0x000fe400078e0005 */ /*0130*/ IMAD.MOV.U32 R8, RZ, RZ, R4 ; /* 0x000000ffff087224 */ /* 0x000fd400078e0004 */ /*0140*/ @!P0 DMUL R8, R8, 1.80143985094819840000e+16 ; /* 0x4350000008088828 */ /* 0x000e220000000000 */ /*0150*/ @!P0 MOV R2, 0xfffffbcb ; /* 0xfffffbcb00028802 */ /* 0x000fd20000000f00 */ /*0160*/ @!P0 MOV R5, R9 ; /* 0x0000000900058202 */ /* 0x001fe20000000f00 */ /*0170*/ @!P0 IMAD.MOV.U32 R4, RZ, RZ, R8 ; /* 0x000000ffff048224 */ /* 0x000fc600078e0008 */ /*0180*/ IADD3 R10, R5, -0x1, RZ ; /* 0xffffffff050a7810 */ /* 0x000fc80007ffe0ff */ /*0190*/ ISETP.GE.U32.AND P1, PT, R10, 0x7fefffff, PT ; /* 0x7fefffff0a00780c */ /* 0x000fda0003f26070 */ /*01a0*/ @P1 IMAD.MOV.U32 R6, RZ, RZ, 0x0 ; /* 0x00000000ff061424 */ /* 0x000fe200078e00ff */ /*01b0*/ @P1 FSETP.NEU.AND P2, PT, R9, RZ, PT ; /* 0x000000ff0900120b */ /* 0x000fe20003f4d000 */ /*01c0*/ @P1 IMAD.MOV.U32 R7, RZ, RZ, 0x7ff00000 ; /* 0x7ff00000ff071424 */ /* 0x000fcc00078e00ff */ /*01d0*/ @P1 DFMA R6, R8, R6, +INF ; /* 0x7ff000000806142b */ /* 0x000e140000000006 */ /*01e0*/ @P1 FSEL R6, R6, RZ, P2 ; /* 0x000000ff06061208 */ /* 0x001fe40001000000 */ /*01f0*/ @P1 FSEL R7, R7, -QNAN , P2 ; /* 0xfff0000007071808 */ /* 0x000fe20001000000 */ /*0200*/ @P1 BRA 0x4b0 ; /* 0x000002a000001947 */ /* 0x000fea0003800000 */ /*0210*/ LOP3.LUT R6, R5.reuse, 0x800fffff, RZ, 0xc0, !PT ; /* 0x800fffff05067812 */ /* 0x040fe200078ec0ff */ /*0220*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */ /* 0x000fe200078e00ff */ /*0230*/ MOV R16, 0x3ae80f1e ; /* 0x3ae80f1e00107802 */ /* 0x000fe20000000f00 */ /*0240*/ IMAD.MOV.U32 R17, RZ, RZ, 0x3eb1380b ; /* 0x3eb1380bff117424 */ /* 0x000fe200078e00ff */ /*0250*/ LOP3.LUT R7, R6, 0x3ff00000, RZ, 0xfc, !PT ; /* 0x3ff0000006077812 */ /* 0x000fe200078efcff */ /*0260*/ IMAD.MOV.U32 R6, RZ, RZ, R4 ; /* 0x000000ffff067224 */ /* 0x000fe200078e0004 */ /*0270*/ LEA.HI R5, R5, R2, RZ, 0xc ; /* 0x0000000205057211 */ /* 0x000fe400078f60ff */ /*0280*/ ISETP.GE.AND P0, PT, R7, 0x3ff6a09f, PT ; /* 0x3ff6a09f0700780c */ /* 0x000fda0003f06270 */ /*0290*/ @P0 IADD3 R9, R7, -0x100000, RZ ; /* 0xfff0000007090810 */ /* 0x000fe40007ffe0ff */ /*02a0*/ @P0 IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105050810 */ /* 0x000fc60007ffe0ff */ /*02b0*/ @P0 IMAD.MOV.U32 R7, RZ, RZ, R9 ; /* 0x000000ffff070224 */ /* 0x000fe200078e0009 */ /*02c0*/ LOP3.LUT R4, R5, 0x80000000, RZ, 0x3c, !PT ; /* 0x8000000005047812 */ /* 0x000fe200078e3cff */ /*02d0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x43300000 ; /* 0x43300000ff057424 */ /* 0x000fc800078e00ff */ /*02e0*/ DADD R14, R6, 1 ; /* 0x3ff00000060e7429 */ /* 0x000e080000000000 */ /*02f0*/ DADD R6, R6, -1 ; /* 0xbff0000006067429 */ /* 0x000fe40000000000 */ /*0300*/ MUFU.RCP64H R9, R15 ; /* 0x0000000f00097308 */ /* 0x001e240000001800 */ /*0310*/ DADD R4, R4, c[0x2][0x38] ; /* 0x00800e0004047629 */ /* 0x000fc80000000000 */ /*0320*/ DFMA R10, -R14, R8, 1 ; /* 0x3ff000000e0a742b */ /* 0x001e0c0000000108 */ /*0330*/ DFMA R10, R10, R10, R10 ; /* 0x0000000a0a0a722b */ /* 0x001e0c000000000a */ /*0340*/ DFMA R8, R8, R10, R8 ; /* 0x0000000a0808722b */ /* 0x001e0c0000000008 */ /*0350*/ DMUL R10, R8, R6 ; /* 0x00000006080a7228 */ /* 0x001e0c0000000000 */ /*0360*/ DFMA R10, R8, R6, R10 ; /* 0x00000006080a722b */ /* 0x001e0c000000000a */ /*0370*/ DMUL R12, R10, R10 ; /* 0x0000000a0a0c7228 */ /* 0x001e080000000000 */ /*0380*/ DADD R14, R6, -R10 ; /* 0x00000000060e7229 */ /* 0x000e48000000080a */ /*0390*/ DFMA R16, R12, R16, c[0x2][0x0] ; /* 0x008000000c10762b */ /* 0x001e080000000010 */ /*03a0*/ DADD R18, R14, R14 ; /* 0x000000000e127229 */ /* 0x002fc8000000000e */ /*03b0*/ DFMA R16, R12, R16, c[0x2][0x8] ; /* 0x008002000c10762b */ /* 0x001e080000000010 */ /*03c0*/ DFMA R14, R4, c[0x2][0x40], R10 ; /* 0x00801000040e7a2b */ /* 0x000fc8000000000a */ /*03d0*/ DFMA R16, R12, R16, c[0x2][0x10] ; /* 0x008004000c10762b */ /* 0x001e080000000010 */ /*03e0*/ DFMA R18, R6, -R10, R18 ; /* 0x8000000a0612722b */ /* 0x000fc80000000012 */ /*03f0*/ DFMA R16, R12, R16, c[0x2][0x18] ; /* 0x008006000c10762b */ /* 0x001e080000000010 */ /*0400*/ DFMA R6, -R4, c[0x2][0x40], R14 ; /* 0x0080100004067a2b */ /* 0x000fc8000000010e */ /*0410*/ DFMA R16, R12, R16, c[0x2][0x20] ; /* 0x008008000c10762b */ /* 0x001e080000000010 */ /*0420*/ DMUL R18, R8, R18 ; /* 0x0000001208127228 */ /* 0x000fc80000000000 */ /*0430*/ DFMA R16, R12, R16, c[0x2][0x28] ; /* 0x00800a000c10762b */ /* 0x001e080000000010 */ /*0440*/ DADD R6, -R10, R6 ; /* 0x000000000a067229 */ /* 0x000fc80000000106 */ /*0450*/ DFMA R16, R12, R16, c[0x2][0x30] ; /* 0x00800c000c10762b */ /* 0x001e0c0000000010 */ /*0460*/ DMUL R16, R12, R16 ; /* 0x000000100c107228 */ /* 0x001e0c0000000000 */ /*0470*/ DFMA R16, R10, R16, R18 ; /* 0x000000100a10722b */ /* 0x001e0c0000000012 */ /*0480*/ DADD R6, R16, -R6 ; /* 0x0000000010067229 */ /* 0x001e0c0000000806 */ /*0490*/ DFMA R6, R4, c[0x2][0x48], R6 ; /* 0x0080120004067a2b */ /* 0x001e0c0000000006 */ /*04a0*/ DADD R6, R14, R6 ; /* 0x000000000e067229 */ /* 0x00104c0000000006 */ /*04b0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*04c0*/ F2F.F32.F64 R7, R6 ; /* 0x0000000600077310 */ /* 0x002e620000301000 */ /*04d0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */ /* 0x000fca00078e0203 */ /*04e0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x002fe2000c101904 */ /*04f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0500*/ BRA 0x500; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0510*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0520*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0530*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0540*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0550*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0560*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0570*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0580*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0590*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*05f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z7takeLogPfS_i .globl _Z7takeLogPfS_i .p2align 8 .type _Z7takeLogPfS_i,@function _Z7takeLogPfS_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b32 s3, s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[0:3], s[0:1], 0x0 v_lshlrev_b32_e32 v2, 1, v1 s_mov_b32 s5, 0x3fc38538 s_mov_b32 s4, 0x6b47b09a s_mov_b32 s7, 0x3fc3ab76 s_mov_b32 s6, 0xbf559e2b v_ashrrev_i32_e32 v3, 31, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo s_mov_b32 s1, 0x3fe55555 s_mov_b32 s0, 0x55555555 global_load_b32 v0, v[2:3], off s_waitcnt vmcnt(0) v_cvt_f64_f32_e32 v[2:3], v0 v_cmp_lt_f32_e32 vcc_lo, 0, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e32 v3, 0x3bc79ca1, v3, vcc_lo v_cndmask_b32_e32 v2, 0xc924223, v2, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_frexp_mant_f64_e32 v[4:5], v[2:3] v_cmp_gt_f64_e32 vcc_lo, s[0:1], v[4:5] s_mov_b32 s0, 0x55555780 v_cndmask_b32_e64 v0, 0, 1, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_ldexp_f64 v[4:5], v[4:5], v0 v_frexp_exp_i32_f64_e32 v0, v[2:3] v_add_f64 v[6:7], v[4:5], 1.0 v_add_f64 v[12:13], v[4:5], -1.0 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) v_subrev_co_ci_u32_e32 v0, vcc_lo, 0, v0, vcc_lo v_cmp_class_f64_e64 vcc_lo, v[2:3], 0x204 v_rcp_f64_e32 v[8:9], v[6:7] v_add_f64 v[14:15], v[6:7], -1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_add_f64 v[4:5], v[4:5], -v[14:15] s_waitcnt_depctr 0xfff v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0 v_fma_f64 v[8:9], v[10:11], v[8:9], v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0 v_fma_f64 v[8:9], v[10:11], v[8:9], v[8:9] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f64 v[10:11], v[12:13], v[8:9] v_mul_f64 v[16:17], v[6:7], v[10:11] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[6:7], v[10:11], v[6:7], -v[16:17] v_fma_f64 v[4:5], v[10:11], v[4:5], v[6:7] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_f64 v[6:7], v[16:17], v[4:5] v_add_f64 v[14:15], v[12:13], -v[6:7] v_add_f64 v[16:17], v[6:7], -v[16:17] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[12:13], v[12:13], -v[14:15] v_add_f64 v[4:5], v[16:17], -v[4:5] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_f64 v[6:7], v[12:13], -v[6:7] v_add_f64 v[4:5], v[4:5], v[6:7] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_f64 v[4:5], v[14:15], v[4:5] v_mul_f64 v[4:5], v[8:9], v[4:5] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_f64 v[6:7], v[10:11], v[4:5] v_mul_f64 v[8:9], v[6:7], v[6:7] s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2) v_fma_f64 v[12:13], v[8:9], s[6:7], s[4:5] s_mov_b32 s5, 0x3fc7474d s_mov_b32 s4, 0xd7f4df2e v_mul_f64 v[14:15], v[6:7], v[8:9] v_fma_f64 v[12:13], v[8:9], v[12:13], s[4:5] s_mov_b32 s5, 0x3fcc71c0 s_mov_b32 s4, 0x16291751 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_fma_f64 v[12:13], v[8:9], v[12:13], s[4:5] s_mov_b32 s5, 0x3fd24924 s_mov_b32 s4, 0x9b27acf1 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_fma_f64 v[12:13], v[8:9], v[12:13], s[4:5] s_mov_b32 s5, 0x3fd99999 s_mov_b32 s4, 0x998ef7b6 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_fma_f64 v[12:13], v[8:9], v[12:13], s[4:5] s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3) v_fma_f64 v[8:9], v[8:9], v[12:13], s[0:1] v_ldexp_f64 v[12:13], v[6:7], 1 v_add_f64 v[6:7], v[6:7], -v[10:11] s_mov_b32 s1, 0x3fe62e42 s_mov_b32 s0, 0xfefa39ef v_mul_f64 v[8:9], v[14:15], v[8:9] v_cvt_f64_i32_e32 v[14:15], v0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_f64 v[4:5], v[4:5], -v[6:7] v_add_f64 v[10:11], v[12:13], v[8:9] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_mul_f64 v[16:17], v[14:15], s[0:1] v_ldexp_f64 v[4:5], v[4:5], 1 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_f64 v[6:7], v[10:11], -v[12:13] v_fma_f64 v[12:13], v[14:15], s[0:1], -v[16:17] s_mov_b32 s1, 0x3c7abc9e s_mov_b32 s0, 0x3b39803f s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[6:7], v[8:9], -v[6:7] v_fma_f64 v[8:9], v[14:15], s[0:1], v[12:13] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[4:5], v[4:5], v[6:7] v_add_f64 v[6:7], v[16:17], v[8:9] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[12:13], v[10:11], v[4:5] v_add_f64 v[16:17], v[6:7], -v[16:17] s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_f64 v[14:15], v[6:7], v[12:13] v_add_f64 v[10:11], v[12:13], -v[10:11] v_add_f64 v[8:9], v[8:9], -v[16:17] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_f64 v[18:19], v[14:15], -v[6:7] v_add_f64 v[4:5], v[4:5], -v[10:11] s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_f64 v[20:21], v[14:15], -v[18:19] v_add_f64 v[10:11], v[12:13], -v[18:19] v_add_f64 v[12:13], v[8:9], v[4:5] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_f64 v[6:7], v[6:7], -v[20:21] v_add_f64 v[6:7], v[10:11], v[6:7] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[10:11], v[12:13], -v[8:9] v_add_f64 v[6:7], v[12:13], v[6:7] s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_add_f64 v[12:13], v[12:13], -v[10:11] v_add_f64 v[4:5], v[4:5], -v[10:11] v_add_f64 v[16:17], v[14:15], v[6:7] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[8:9], v[8:9], -v[12:13] v_add_f64 v[10:11], v[16:17], -v[14:15] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f64 v[4:5], v[4:5], v[8:9] v_add_f64 v[6:7], v[6:7], -v[10:11] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_f64 v[4:5], v[4:5], v[6:7] v_add_f64 v[4:5], v[16:17], v[4:5] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v0, v4, v2, vcc_lo v_cndmask_b32_e32 v4, v5, v3, vcc_lo v_cmp_ngt_f64_e32 vcc_lo, 0, v[2:3] s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v5, 0x7ff80000, v4, vcc_lo v_cmp_nge_f64_e32 vcc_lo, 0, v[2:3] v_cndmask_b32_e32 v4, 0, v0, vcc_lo v_cmp_neq_f64_e32 vcc_lo, 0, v[2:3] v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[0:1], 2, v[1:2] v_cndmask_b32_e32 v5, 0xfff00000, v5, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v0, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo s_delay_alu instid0(VALU_DEP_3) v_cvt_f32_f64_e32 v3, v[4:5] global_store_b32 v[0:1], v3, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z7takeLogPfS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 22 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z7takeLogPfS_i, .Lfunc_end0-_Z7takeLogPfS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z7takeLogPfS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z7takeLogPfS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 22 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001b2b24_00000000-6_takeLog.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z29__device_stub__Z7takeLogPfS_iPfS_i .type _Z29__device_stub__Z7takeLogPfS_iPfS_i, @function _Z29__device_stub__Z7takeLogPfS_iPfS_i: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z7takeLogPfS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z29__device_stub__Z7takeLogPfS_iPfS_i, .-_Z29__device_stub__Z7takeLogPfS_iPfS_i .globl _Z7takeLogPfS_i .type _Z7takeLogPfS_i, @function _Z7takeLogPfS_i: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z7takeLogPfS_iPfS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z7takeLogPfS_i, .-_Z7takeLogPfS_i .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z7takeLogPfS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z7takeLogPfS_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "takeLog.hip" .globl _Z22__device_stub__takeLogPfS_i # -- Begin function _Z22__device_stub__takeLogPfS_i .p2align 4, 0x90 .type _Z22__device_stub__takeLogPfS_i,@function _Z22__device_stub__takeLogPfS_i: # @_Z22__device_stub__takeLogPfS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z7takeLogPfS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z22__device_stub__takeLogPfS_i, .Lfunc_end0-_Z22__device_stub__takeLogPfS_i .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7takeLogPfS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z7takeLogPfS_i,@object # @_Z7takeLogPfS_i .section .rodata,"a",@progbits .globl _Z7takeLogPfS_i .p2align 3, 0x0 _Z7takeLogPfS_i: .quad _Z22__device_stub__takeLogPfS_i .size _Z7takeLogPfS_i, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z7takeLogPfS_i" .size .L__unnamed_1, 16 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__takeLogPfS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z7takeLogPfS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "Atomic.cuh" __device__ bool tryAtomicStore(volatile int* ref, int oldValue, int newValue) { int replaced = atomicCAS((int*)ref, oldValue, newValue); return replaced == oldValue; } __device__ bool tryAtomicStore(volatile float* ref, float oldValue, float newValue) { return tryAtomicStore((volatile int*)ref, __float_as_int(oldValue), __float_as_int(newValue)); }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "Atomic.cuh" __device__ bool tryAtomicStore(volatile int* ref, int oldValue, int newValue) { int replaced = atomicCAS((int*)ref, oldValue, newValue); return replaced == oldValue; } __device__ bool tryAtomicStore(volatile float* ref, float oldValue, float newValue) { return tryAtomicStore((volatile int*)ref, __float_as_int(oldValue), __float_as_int(newValue)); }
.file "tmpxft_0003abb8_00000000-6_Atomic.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2031: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2031: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z14tryAtomicStorePViii .type _Z14tryAtomicStorePViii, @function _Z14tryAtomicStorePViii: .LFB2027: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2027: .size _Z14tryAtomicStorePViii, .-_Z14tryAtomicStorePViii .globl _Z14tryAtomicStorePVfff .type _Z14tryAtomicStorePVfff, @function _Z14tryAtomicStorePVfff: .LFB2028: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2028: .size _Z14tryAtomicStorePVfff, .-_Z14tryAtomicStorePVfff .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "Atomic.cuh" __device__ bool tryAtomicStore(volatile int* ref, int oldValue, int newValue) { int replaced = atomicCAS((int*)ref, oldValue, newValue); return replaced == oldValue; } __device__ bool tryAtomicStore(volatile float* ref, float oldValue, float newValue) { return tryAtomicStore((volatile int*)ref, __float_as_int(oldValue), __float_as_int(newValue)); }
#pragma once #include <hip/hip_runtime.h> __device__ bool tryAtomicStore(volatile int* ref, int oldValue, int newValue); __device__ bool tryAtomicStore(volatile float* ref, float oldValue, float newValue);
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#pragma once #include <hip/hip_runtime.h> __device__ bool tryAtomicStore(volatile int* ref, int oldValue, int newValue); __device__ bool tryAtomicStore(volatile float* ref, float oldValue, float newValue);
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#pragma once #include <hip/hip_runtime.h> __device__ bool tryAtomicStore(volatile int* ref, int oldValue, int newValue); __device__ bool tryAtomicStore(volatile float* ref, float oldValue, float newValue);
.text .file "Atomic.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0003abb8_00000000-6_Atomic.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2031: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2031: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z14tryAtomicStorePViii .type _Z14tryAtomicStorePViii, @function _Z14tryAtomicStorePViii: .LFB2027: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2027: .size _Z14tryAtomicStorePViii, .-_Z14tryAtomicStorePViii .globl _Z14tryAtomicStorePVfff .type _Z14tryAtomicStorePVfff, @function _Z14tryAtomicStorePVfff: .LFB2028: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2028: .size _Z14tryAtomicStorePVfff, .-_Z14tryAtomicStorePVfff .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "Atomic.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> // KERNEL __global__ void Vadd(float* A, float* B, float* C, int N) { /* int tx = threadIdx.x; int i = 4*(blockIdx.x * blockDim.x) + tx; int count = 0; while (i < N && count < 4) { C[i] = A[i] + B[i]; i += blockDim.x; count ++; } */ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } // HOST int main(int argc, char** argv) { // Variables float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C; int N = 5000000; // Number of floats per vector size_t size = N * sizeof(float); // Allocate vectors in host memory and device memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); // Initialize input vectors for (int i = 0; i < N; ++i){ h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; } // Copy vectors from host memory to device memory cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); int threadsPerBlock = 256; // FILL IN KERNEL SETUP AND INVOCATION // int blocks = N / (4*threadsPerBlock); // if (N % (4*threadsPerBlock) != 0) blocks++; int blocks = N / threadsPerBlock; if (N % threadsPerBlock != 0) blocks++; Vadd <<< blocks, threadsPerBlock >>> (d_A, d_B, d_C, N); cudaDeviceSynchronize(); // Copy result from device memory to host memory cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); bool correct = true; // Calculate solution on the host and compare for (int i = 0; i < N; i++) { if (h_C[i] != (h_A[i] + h_B[i])) { printf("ERROR: expected h_C[%i] = %f but received %f\n", i, h_A[i] + h_B[i], h_C[i]); correct = false; break; } } if (correct) printf("---PASSED---\n"); // Free host and device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); }
code for sm_80 Function : _Z4VaddPfS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x000fc800078e0207 */ /*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x0c0fe400078e0207 */ /*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fc800078e0207 */ /*00d0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */ /* 0x004fca0000000000 */ /*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> // KERNEL __global__ void Vadd(float* A, float* B, float* C, int N) { /* int tx = threadIdx.x; int i = 4*(blockIdx.x * blockDim.x) + tx; int count = 0; while (i < N && count < 4) { C[i] = A[i] + B[i]; i += blockDim.x; count ++; } */ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } // HOST int main(int argc, char** argv) { // Variables float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C; int N = 5000000; // Number of floats per vector size_t size = N * sizeof(float); // Allocate vectors in host memory and device memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); // Initialize input vectors for (int i = 0; i < N; ++i){ h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; } // Copy vectors from host memory to device memory cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); int threadsPerBlock = 256; // FILL IN KERNEL SETUP AND INVOCATION // int blocks = N / (4*threadsPerBlock); // if (N % (4*threadsPerBlock) != 0) blocks++; int blocks = N / threadsPerBlock; if (N % threadsPerBlock != 0) blocks++; Vadd <<< blocks, threadsPerBlock >>> (d_A, d_B, d_C, N); cudaDeviceSynchronize(); // Copy result from device memory to host memory cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); bool correct = true; // Calculate solution on the host and compare for (int i = 0; i < N; i++) { if (h_C[i] != (h_A[i] + h_B[i])) { printf("ERROR: expected h_C[%i] = %f but received %f\n", i, h_A[i] + h_B[i], h_C[i]); correct = false; break; } } if (correct) printf("---PASSED---\n"); // Free host and device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); }
.file "tmpxft_0017e1e2_00000000-6_vadd.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z28__device_stub__Z4VaddPfS_S_iPfS_S_i .type _Z28__device_stub__Z4VaddPfS_S_iPfS_S_i, @function _Z28__device_stub__Z4VaddPfS_S_iPfS_S_i: .LFB2082: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z4VaddPfS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z28__device_stub__Z4VaddPfS_S_iPfS_S_i, .-_Z28__device_stub__Z4VaddPfS_S_iPfS_S_i .globl _Z4VaddPfS_S_i .type _Z4VaddPfS_S_i, @function _Z4VaddPfS_S_i: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z28__device_stub__Z4VaddPfS_S_iPfS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z4VaddPfS_S_i, .-_Z4VaddPfS_S_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "ERROR: expected h_C[%i] = %f but received %f\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "---PASSED---\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $72, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $20000000, %edi call malloc@PLT movq %rax, %r12 movl $20000000, %edi call malloc@PLT movq %rax, %rbp movl $20000000, %edi call malloc@PLT movq %rax, %r13 leaq 8(%rsp), %rdi movl $20000000, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $20000000, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $20000000, %esi call cudaMalloc@PLT movl $0, %ebx .L12: call rand@PLT pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC0(%rip), %xmm0 movss %xmm0, (%r12,%rbx) call rand@PLT pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC0(%rip), %xmm0 movss %xmm0, 0(%rbp,%rbx) addq $4, %rbx cmpq $20000000, %rbx jne .L12 movl $1, %ecx movl $20000000, %edx movq %r12, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $20000000, %edx movq %rbp, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl $256, 44(%rsp) movl $1, 48(%rsp) movl $19532, 32(%rsp) movl $1, 36(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L23 .L13: call cudaDeviceSynchronize@PLT movl $2, %ecx movl $20000000, %edx movq 24(%rsp), %rsi movq %r13, %rdi call cudaMemcpy@PLT movl $0, %edx .L17: movss 0(%r13,%rdx,4), %xmm1 movss (%r12,%rdx,4), %xmm0 addss 0(%rbp,%rdx,4), %xmm0 ucomiss %xmm0, %xmm1 jp .L19 jne .L19 addq $1, %rdx cmpq $5000000, %rdx jne .L17 leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L16 .L23: movl $5000000, %ecx movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z28__device_stub__Z4VaddPfS_S_iPfS_S_i jmp .L13 .L19: cvtss2sd %xmm0, %xmm0 cvtss2sd %xmm1, %xmm1 leaq .LC1(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT .L16: movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq %r12, %rdi call free@PLT movq %rbp, %rdi call free@PLT movq %r13, %rdi call free@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L24 movl $0, %eax addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L24: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC3: .string "_Z4VaddPfS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z4VaddPfS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 805306368 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> // KERNEL __global__ void Vadd(float* A, float* B, float* C, int N) { /* int tx = threadIdx.x; int i = 4*(blockIdx.x * blockDim.x) + tx; int count = 0; while (i < N && count < 4) { C[i] = A[i] + B[i]; i += blockDim.x; count ++; } */ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } // HOST int main(int argc, char** argv) { // Variables float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C; int N = 5000000; // Number of floats per vector size_t size = N * sizeof(float); // Allocate vectors in host memory and device memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); // Initialize input vectors for (int i = 0; i < N; ++i){ h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; } // Copy vectors from host memory to device memory cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); int threadsPerBlock = 256; // FILL IN KERNEL SETUP AND INVOCATION // int blocks = N / (4*threadsPerBlock); // if (N % (4*threadsPerBlock) != 0) blocks++; int blocks = N / threadsPerBlock; if (N % threadsPerBlock != 0) blocks++; Vadd <<< blocks, threadsPerBlock >>> (d_A, d_B, d_C, N); cudaDeviceSynchronize(); // Copy result from device memory to host memory cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); bool correct = true; // Calculate solution on the host and compare for (int i = 0; i < N; i++) { if (h_C[i] != (h_A[i] + h_B[i])) { printf("ERROR: expected h_C[%i] = %f but received %f\n", i, h_A[i] + h_B[i], h_C[i]); correct = false; break; } } if (correct) printf("---PASSED---\n"); // Free host and device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); }
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> // KERNEL __global__ void Vadd(float* A, float* B, float* C, int N) { /* int tx = threadIdx.x; int i = 4*(blockIdx.x * blockDim.x) + tx; int count = 0; while (i < N && count < 4) { C[i] = A[i] + B[i]; i += blockDim.x; count ++; } */ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } // HOST int main(int argc, char** argv) { // Variables float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C; int N = 5000000; // Number of floats per vector size_t size = N * sizeof(float); // Allocate vectors in host memory and device memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); hipMalloc((void**)&d_A, size); hipMalloc((void**)&d_B, size); hipMalloc((void**)&d_C, size); // Initialize input vectors for (int i = 0; i < N; ++i){ h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; } // Copy vectors from host memory to device memory hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); int threadsPerBlock = 256; // FILL IN KERNEL SETUP AND INVOCATION // int blocks = N / (4*threadsPerBlock); // if (N % (4*threadsPerBlock) != 0) blocks++; int blocks = N / threadsPerBlock; if (N % threadsPerBlock != 0) blocks++; Vadd <<< blocks, threadsPerBlock >>> (d_A, d_B, d_C, N); hipDeviceSynchronize(); // Copy result from device memory to host memory hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); bool correct = true; // Calculate solution on the host and compare for (int i = 0; i < N; i++) { if (h_C[i] != (h_A[i] + h_B[i])) { printf("ERROR: expected h_C[%i] = %f but received %f\n", i, h_A[i] + h_B[i], h_C[i]); correct = false; break; } } if (correct) printf("---PASSED---\n"); // Free host and device memory hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> // KERNEL __global__ void Vadd(float* A, float* B, float* C, int N) { /* int tx = threadIdx.x; int i = 4*(blockIdx.x * blockDim.x) + tx; int count = 0; while (i < N && count < 4) { C[i] = A[i] + B[i]; i += blockDim.x; count ++; } */ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } // HOST int main(int argc, char** argv) { // Variables float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C; int N = 5000000; // Number of floats per vector size_t size = N * sizeof(float); // Allocate vectors in host memory and device memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); hipMalloc((void**)&d_A, size); hipMalloc((void**)&d_B, size); hipMalloc((void**)&d_C, size); // Initialize input vectors for (int i = 0; i < N; ++i){ h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; } // Copy vectors from host memory to device memory hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); int threadsPerBlock = 256; // FILL IN KERNEL SETUP AND INVOCATION // int blocks = N / (4*threadsPerBlock); // if (N % (4*threadsPerBlock) != 0) blocks++; int blocks = N / threadsPerBlock; if (N % threadsPerBlock != 0) blocks++; Vadd <<< blocks, threadsPerBlock >>> (d_A, d_B, d_C, N); hipDeviceSynchronize(); // Copy result from device memory to host memory hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); bool correct = true; // Calculate solution on the host and compare for (int i = 0; i < N; i++) { if (h_C[i] != (h_A[i] + h_B[i])) { printf("ERROR: expected h_C[%i] = %f but received %f\n", i, h_A[i] + h_B[i], h_C[i]); correct = false; break; } } if (correct) printf("---PASSED---\n"); // Free host and device memory hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z4VaddPfS_S_i .globl _Z4VaddPfS_S_i .p2align 8 .type _Z4VaddPfS_S_i,@function _Z4VaddPfS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_load_b64 s[0:1], s[0:1], 0x10 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) v_add_f32_e32 v2, v2, v3 global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4VaddPfS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z4VaddPfS_S_i, .Lfunc_end0-_Z4VaddPfS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4VaddPfS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z4VaddPfS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> // KERNEL __global__ void Vadd(float* A, float* B, float* C, int N) { /* int tx = threadIdx.x; int i = 4*(blockIdx.x * blockDim.x) + tx; int count = 0; while (i < N && count < 4) { C[i] = A[i] + B[i]; i += blockDim.x; count ++; } */ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } // HOST int main(int argc, char** argv) { // Variables float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C; int N = 5000000; // Number of floats per vector size_t size = N * sizeof(float); // Allocate vectors in host memory and device memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); hipMalloc((void**)&d_A, size); hipMalloc((void**)&d_B, size); hipMalloc((void**)&d_C, size); // Initialize input vectors for (int i = 0; i < N; ++i){ h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; } // Copy vectors from host memory to device memory hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); int threadsPerBlock = 256; // FILL IN KERNEL SETUP AND INVOCATION // int blocks = N / (4*threadsPerBlock); // if (N % (4*threadsPerBlock) != 0) blocks++; int blocks = N / threadsPerBlock; if (N % threadsPerBlock != 0) blocks++; Vadd <<< blocks, threadsPerBlock >>> (d_A, d_B, d_C, N); hipDeviceSynchronize(); // Copy result from device memory to host memory hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); bool correct = true; // Calculate solution on the host and compare for (int i = 0; i < N; i++) { if (h_C[i] != (h_A[i] + h_B[i])) { printf("ERROR: expected h_C[%i] = %f but received %f\n", i, h_A[i] + h_B[i], h_C[i]); correct = false; break; } } if (correct) printf("---PASSED---\n"); // Free host and device memory hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); }
.text .file "vadd.hip" .globl _Z19__device_stub__VaddPfS_S_i # -- Begin function _Z19__device_stub__VaddPfS_S_i .p2align 4, 0x90 .type _Z19__device_stub__VaddPfS_S_i,@function _Z19__device_stub__VaddPfS_S_i: # @_Z19__device_stub__VaddPfS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z4VaddPfS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z19__device_stub__VaddPfS_S_i, .Lfunc_end0-_Z19__device_stub__VaddPfS_S_i .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI1_0: .long 0x30000000 # float 4.65661287E-10 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 subq $152, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl $20000000, %edi # imm = 0x1312D00 callq malloc movq %rax, %rbx movl $20000000, %edi # imm = 0x1312D00 callq malloc movq %rax, %r14 movl $20000000, %edi # imm = 0x1312D00 callq malloc movq %rax, %r15 leaq 24(%rsp), %rdi movl $20000000, %esi # imm = 0x1312D00 callq hipMalloc leaq 16(%rsp), %rdi movl $20000000, %esi # imm = 0x1312D00 callq hipMalloc leaq 8(%rsp), %rdi movl $20000000, %esi # imm = 0x1312D00 callq hipMalloc xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 callq rand xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss %xmm1, %xmm0 movss %xmm0, (%rbx,%r12,4) callq rand xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss .LCPI1_0(%rip), %xmm0 movss %xmm0, (%r14,%r12,4) incq %r12 cmpq $5000000, %r12 # imm = 0x4C4B40 jne .LBB1_1 # %bb.2: movq 24(%rsp), %rdi movl $20000000, %edx # imm = 0x1312D00 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movl $20000000, %edx # imm = 0x1312D00 movq %r14, %rsi movl $1, %ecx callq hipMemcpy movabsq $4294967552, %rdx # imm = 0x100000100 leaq 19276(%rdx), %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) movl $5000000, 36(%rsp) # imm = 0x4C4B40 leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 36(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z4VaddPfS_S_i, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: callq hipDeviceSynchronize movq 8(%rsp), %rsi movl $20000000, %edx # imm = 0x1312D00 movq %r15, %rdi movl $2, %ecx callq hipMemcpy xorl %esi, %esi .p2align 4, 0x90 .LBB1_5: # =>This Inner Loop Header: Depth=1 movss (%r15,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero movss (%rbx,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero addss (%r14,%rsi,4), %xmm0 ucomiss %xmm0, %xmm1 jne .LBB1_6 jp .LBB1_6 # %bb.7: # in Loop: Header=BB1_5 Depth=1 incq %rsi cmpq $5000000, %rsi # imm = 0x4C4B40 jne .LBB1_5 # %bb.8: # %.critedge movl $.Lstr, %edi callq puts@PLT jmp .LBB1_9 .LBB1_6: cvtss2sd %xmm0, %xmm0 cvtss2sd %xmm1, %xmm1 movl $.L.str, %edi # kill: def $esi killed $esi killed $rsi movb $2, %al callq printf .LBB1_9: movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free xorl %eax, %eax addq $152, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z4VaddPfS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z4VaddPfS_S_i,@object # @_Z4VaddPfS_S_i .section .rodata,"a",@progbits .globl _Z4VaddPfS_S_i .p2align 3, 0x0 _Z4VaddPfS_S_i: .quad _Z19__device_stub__VaddPfS_S_i .size _Z4VaddPfS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "ERROR: expected h_C[%i] = %f but received %f\n" .size .L.str, 46 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z4VaddPfS_S_i" .size .L__unnamed_1, 15 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "---PASSED---" .size .Lstr, 13 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z19__device_stub__VaddPfS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z4VaddPfS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z4VaddPfS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x000fc800078e0207 */ /*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x0c0fe400078e0207 */ /*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fc800078e0207 */ /*00d0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */ /* 0x004fca0000000000 */ /*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z4VaddPfS_S_i .globl _Z4VaddPfS_S_i .p2align 8 .type _Z4VaddPfS_S_i,@function _Z4VaddPfS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_load_b64 s[0:1], s[0:1], 0x10 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) v_add_f32_e32 v2, v2, v3 global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4VaddPfS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z4VaddPfS_S_i, .Lfunc_end0-_Z4VaddPfS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4VaddPfS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z4VaddPfS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0017e1e2_00000000-6_vadd.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z28__device_stub__Z4VaddPfS_S_iPfS_S_i .type _Z28__device_stub__Z4VaddPfS_S_iPfS_S_i, @function _Z28__device_stub__Z4VaddPfS_S_iPfS_S_i: .LFB2082: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z4VaddPfS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z28__device_stub__Z4VaddPfS_S_iPfS_S_i, .-_Z28__device_stub__Z4VaddPfS_S_iPfS_S_i .globl _Z4VaddPfS_S_i .type _Z4VaddPfS_S_i, @function _Z4VaddPfS_S_i: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z28__device_stub__Z4VaddPfS_S_iPfS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z4VaddPfS_S_i, .-_Z4VaddPfS_S_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "ERROR: expected h_C[%i] = %f but received %f\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "---PASSED---\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $72, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $20000000, %edi call malloc@PLT movq %rax, %r12 movl $20000000, %edi call malloc@PLT movq %rax, %rbp movl $20000000, %edi call malloc@PLT movq %rax, %r13 leaq 8(%rsp), %rdi movl $20000000, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $20000000, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $20000000, %esi call cudaMalloc@PLT movl $0, %ebx .L12: call rand@PLT pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC0(%rip), %xmm0 movss %xmm0, (%r12,%rbx) call rand@PLT pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC0(%rip), %xmm0 movss %xmm0, 0(%rbp,%rbx) addq $4, %rbx cmpq $20000000, %rbx jne .L12 movl $1, %ecx movl $20000000, %edx movq %r12, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $20000000, %edx movq %rbp, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT movl $256, 44(%rsp) movl $1, 48(%rsp) movl $19532, 32(%rsp) movl $1, 36(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L23 .L13: call cudaDeviceSynchronize@PLT movl $2, %ecx movl $20000000, %edx movq 24(%rsp), %rsi movq %r13, %rdi call cudaMemcpy@PLT movl $0, %edx .L17: movss 0(%r13,%rdx,4), %xmm1 movss (%r12,%rdx,4), %xmm0 addss 0(%rbp,%rdx,4), %xmm0 ucomiss %xmm0, %xmm1 jp .L19 jne .L19 addq $1, %rdx cmpq $5000000, %rdx jne .L17 leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L16 .L23: movl $5000000, %ecx movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z28__device_stub__Z4VaddPfS_S_iPfS_S_i jmp .L13 .L19: cvtss2sd %xmm0, %xmm0 cvtss2sd %xmm1, %xmm1 leaq .LC1(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT .L16: movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq %r12, %rdi call free@PLT movq %rbp, %rdi call free@PLT movq %r13, %rdi call free@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L24 movl $0, %eax addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L24: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC3: .string "_Z4VaddPfS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z4VaddPfS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 805306368 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "vadd.hip" .globl _Z19__device_stub__VaddPfS_S_i # -- Begin function _Z19__device_stub__VaddPfS_S_i .p2align 4, 0x90 .type _Z19__device_stub__VaddPfS_S_i,@function _Z19__device_stub__VaddPfS_S_i: # @_Z19__device_stub__VaddPfS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z4VaddPfS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z19__device_stub__VaddPfS_S_i, .Lfunc_end0-_Z19__device_stub__VaddPfS_S_i .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI1_0: .long 0x30000000 # float 4.65661287E-10 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 subq $152, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl $20000000, %edi # imm = 0x1312D00 callq malloc movq %rax, %rbx movl $20000000, %edi # imm = 0x1312D00 callq malloc movq %rax, %r14 movl $20000000, %edi # imm = 0x1312D00 callq malloc movq %rax, %r15 leaq 24(%rsp), %rdi movl $20000000, %esi # imm = 0x1312D00 callq hipMalloc leaq 16(%rsp), %rdi movl $20000000, %esi # imm = 0x1312D00 callq hipMalloc leaq 8(%rsp), %rdi movl $20000000, %esi # imm = 0x1312D00 callq hipMalloc xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_1: # =>This Inner Loop Header: Depth=1 callq rand xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss %xmm1, %xmm0 movss %xmm0, (%rbx,%r12,4) callq rand xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss .LCPI1_0(%rip), %xmm0 movss %xmm0, (%r14,%r12,4) incq %r12 cmpq $5000000, %r12 # imm = 0x4C4B40 jne .LBB1_1 # %bb.2: movq 24(%rsp), %rdi movl $20000000, %edx # imm = 0x1312D00 movq %rbx, %rsi movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movl $20000000, %edx # imm = 0x1312D00 movq %r14, %rsi movl $1, %ecx callq hipMemcpy movabsq $4294967552, %rdx # imm = 0x100000100 leaq 19276(%rdx), %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 8(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) movl $5000000, 36(%rsp) # imm = 0x4C4B40 leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 36(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z4VaddPfS_S_i, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: callq hipDeviceSynchronize movq 8(%rsp), %rsi movl $20000000, %edx # imm = 0x1312D00 movq %r15, %rdi movl $2, %ecx callq hipMemcpy xorl %esi, %esi .p2align 4, 0x90 .LBB1_5: # =>This Inner Loop Header: Depth=1 movss (%r15,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero movss (%rbx,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero addss (%r14,%rsi,4), %xmm0 ucomiss %xmm0, %xmm1 jne .LBB1_6 jp .LBB1_6 # %bb.7: # in Loop: Header=BB1_5 Depth=1 incq %rsi cmpq $5000000, %rsi # imm = 0x4C4B40 jne .LBB1_5 # %bb.8: # %.critedge movl $.Lstr, %edi callq puts@PLT jmp .LBB1_9 .LBB1_6: cvtss2sd %xmm0, %xmm0 cvtss2sd %xmm1, %xmm1 movl $.L.str, %edi # kill: def $esi killed $esi killed $rsi movb $2, %al callq printf .LBB1_9: movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free xorl %eax, %eax addq $152, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z4VaddPfS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z4VaddPfS_S_i,@object # @_Z4VaddPfS_S_i .section .rodata,"a",@progbits .globl _Z4VaddPfS_S_i .p2align 3, 0x0 _Z4VaddPfS_S_i: .quad _Z19__device_stub__VaddPfS_S_i .size _Z4VaddPfS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "ERROR: expected h_C[%i] = %f but received %f\n" .size .L.str, 46 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z4VaddPfS_S_i" .size .L__unnamed_1, 15 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "---PASSED---" .size .Lstr, 13 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z19__device_stub__VaddPfS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z4VaddPfS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
//#include "caffe/layers/cosine_loss_layer.hpp" // //namespace caffe { // // template<typename Dtype> // __global__ void channels_gpu_l2_norm(const int n, const int channels, const Dtype* bottom, // Dtype *norm_data) { // CUDA_KERNEL_LOOP(index, n) { // caffe_gpu_l2norm(channels, bottom + index * channels, norm_data); // } // } // // template <typename Dtype> // __global__ void calc_loss(const int n, Dtype *inner_product_data, Dtype *norm_data, Dtype *loss) { // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, // const vector<Blob<Dtype>*>& top) { // // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // // const Dtype *bottom_data = bottom[0]->gpu_data(); // Dtype *norm_data = norm_.mutable_gpu_data(); // Dtype *inner_product_data = inner_product.mutable_gpu_data(); // Dtype loss = Dtype(0.0); // const Dtype* label = bottom[1]->gpu_data(); // for (size_t i = 0; i < batch; i++) { // caffe_gpu_l2norm(channels, bottom_data + i * channels, norm_data + i); // } // caffe_gpu_gemm(CblasNoTrans, CblasTrans, batch, batch, channels, Dtype(1.0), // bottom_data, bottom_data, Dtype(0.0), inner_product_data); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = i; j < batch; j++) { // //inner_product_data[i * batch + j] = caffe_cpu_dot(channels, bottom_data + i * channels, bottom_data + j * channels); // inner_product_data[i * batch + j] / (norm_data[i] * norm_data[j] + Dtype(1.0)); // if (label[i] == label[j]) { // loss += (1 - inner_product_data[i * batch + j]); // } // else { // loss += inner_product_data[i * batch + j]; // } // } // } // // top[0]->mutable_cpu_data()[0] = loss / bottom[0]->count(); // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, // const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // const Dtype* label = bottom[1]->gpu_data(); // const Dtype *norm_data = norm_.gpu_data(); // const Dtype *bottom_data = bottom[0]->mutable_gpu_data(); // // if (propagate_down[0]) { // // Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = 0; j < batch; j++) { // if (i == j) { // continue; // } // else { // bool reverse = label[i] == label[j]; // accu_assign(batch, channels, reverse, bottom_data + j * channels, bottom_diff + i * channels, norm_data[i]); // } // } // } // } // // } // // // INSTANTIATE_LAYER_GPU_FUNCS(CosineLossLayer); // //}
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
//#include "caffe/layers/cosine_loss_layer.hpp" // //namespace caffe { // // template<typename Dtype> // __global__ void channels_gpu_l2_norm(const int n, const int channels, const Dtype* bottom, // Dtype *norm_data) { // CUDA_KERNEL_LOOP(index, n) { // caffe_gpu_l2norm(channels, bottom + index * channels, norm_data); // } // } // // template <typename Dtype> // __global__ void calc_loss(const int n, Dtype *inner_product_data, Dtype *norm_data, Dtype *loss) { // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, // const vector<Blob<Dtype>*>& top) { // // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // // const Dtype *bottom_data = bottom[0]->gpu_data(); // Dtype *norm_data = norm_.mutable_gpu_data(); // Dtype *inner_product_data = inner_product.mutable_gpu_data(); // Dtype loss = Dtype(0.0); // const Dtype* label = bottom[1]->gpu_data(); // for (size_t i = 0; i < batch; i++) { // caffe_gpu_l2norm(channels, bottom_data + i * channels, norm_data + i); // } // caffe_gpu_gemm(CblasNoTrans, CblasTrans, batch, batch, channels, Dtype(1.0), // bottom_data, bottom_data, Dtype(0.0), inner_product_data); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = i; j < batch; j++) { // //inner_product_data[i * batch + j] = caffe_cpu_dot(channels, bottom_data + i * channels, bottom_data + j * channels); // inner_product_data[i * batch + j] / (norm_data[i] * norm_data[j] + Dtype(1.0)); // if (label[i] == label[j]) { // loss += (1 - inner_product_data[i * batch + j]); // } // else { // loss += inner_product_data[i * batch + j]; // } // } // } // // top[0]->mutable_cpu_data()[0] = loss / bottom[0]->count(); // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, // const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // const Dtype* label = bottom[1]->gpu_data(); // const Dtype *norm_data = norm_.gpu_data(); // const Dtype *bottom_data = bottom[0]->mutable_gpu_data(); // // if (propagate_down[0]) { // // Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = 0; j < batch; j++) { // if (i == j) { // continue; // } // else { // bool reverse = label[i] == label[j]; // accu_assign(batch, channels, reverse, bottom_data + j * channels, bottom_diff + i * channels, norm_data[i]); // } // } // } // } // // } // // // INSTANTIATE_LAYER_GPU_FUNCS(CosineLossLayer); // //}
.file "tmpxft_001648b6_00000000-6_cosine_loss_layer.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
//#include "caffe/layers/cosine_loss_layer.hpp" // //namespace caffe { // // template<typename Dtype> // __global__ void channels_gpu_l2_norm(const int n, const int channels, const Dtype* bottom, // Dtype *norm_data) { // CUDA_KERNEL_LOOP(index, n) { // caffe_gpu_l2norm(channels, bottom + index * channels, norm_data); // } // } // // template <typename Dtype> // __global__ void calc_loss(const int n, Dtype *inner_product_data, Dtype *norm_data, Dtype *loss) { // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, // const vector<Blob<Dtype>*>& top) { // // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // // const Dtype *bottom_data = bottom[0]->gpu_data(); // Dtype *norm_data = norm_.mutable_gpu_data(); // Dtype *inner_product_data = inner_product.mutable_gpu_data(); // Dtype loss = Dtype(0.0); // const Dtype* label = bottom[1]->gpu_data(); // for (size_t i = 0; i < batch; i++) { // caffe_gpu_l2norm(channels, bottom_data + i * channels, norm_data + i); // } // caffe_gpu_gemm(CblasNoTrans, CblasTrans, batch, batch, channels, Dtype(1.0), // bottom_data, bottom_data, Dtype(0.0), inner_product_data); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = i; j < batch; j++) { // //inner_product_data[i * batch + j] = caffe_cpu_dot(channels, bottom_data + i * channels, bottom_data + j * channels); // inner_product_data[i * batch + j] / (norm_data[i] * norm_data[j] + Dtype(1.0)); // if (label[i] == label[j]) { // loss += (1 - inner_product_data[i * batch + j]); // } // else { // loss += inner_product_data[i * batch + j]; // } // } // } // // top[0]->mutable_cpu_data()[0] = loss / bottom[0]->count(); // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, // const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // const Dtype* label = bottom[1]->gpu_data(); // const Dtype *norm_data = norm_.gpu_data(); // const Dtype *bottom_data = bottom[0]->mutable_gpu_data(); // // if (propagate_down[0]) { // // Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = 0; j < batch; j++) { // if (i == j) { // continue; // } // else { // bool reverse = label[i] == label[j]; // accu_assign(batch, channels, reverse, bottom_data + j * channels, bottom_diff + i * channels, norm_data[i]); // } // } // } // } // // } // // // INSTANTIATE_LAYER_GPU_FUNCS(CosineLossLayer); // //}
#include <hip/hip_runtime.h> //#include "caffe/layers/cosine_loss_layer.hpp" // //namespace caffe { // // template<typename Dtype> // __global__ void channels_gpu_l2_norm(const int n, const int channels, const Dtype* bottom, // Dtype *norm_data) { // CUDA_KERNEL_LOOP(index, n) { // caffe_gpu_l2norm(channels, bottom + index * channels, norm_data); // } // } // // template <typename Dtype> // __global__ void calc_loss(const int n, Dtype *inner_product_data, Dtype *norm_data, Dtype *loss) { // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, // const vector<Blob<Dtype>*>& top) { // // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // // const Dtype *bottom_data = bottom[0]->gpu_data(); // Dtype *norm_data = norm_.mutable_gpu_data(); // Dtype *inner_product_data = inner_product.mutable_gpu_data(); // Dtype loss = Dtype(0.0); // const Dtype* label = bottom[1]->gpu_data(); // for (size_t i = 0; i < batch; i++) { // caffe_gpu_l2norm(channels, bottom_data + i * channels, norm_data + i); // } // caffe_gpu_gemm(CblasNoTrans, CblasTrans, batch, batch, channels, Dtype(1.0), // bottom_data, bottom_data, Dtype(0.0), inner_product_data); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = i; j < batch; j++) { // //inner_product_data[i * batch + j] = caffe_cpu_dot(channels, bottom_data + i * channels, bottom_data + j * channels); // inner_product_data[i * batch + j] / (norm_data[i] * norm_data[j] + Dtype(1.0)); // if (label[i] == label[j]) { // loss += (1 - inner_product_data[i * batch + j]); // } // else { // loss += inner_product_data[i * batch + j]; // } // } // } // // top[0]->mutable_cpu_data()[0] = loss / bottom[0]->count(); // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, // const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // const Dtype* label = bottom[1]->gpu_data(); // const Dtype *norm_data = norm_.gpu_data(); // const Dtype *bottom_data = bottom[0]->mutable_gpu_data(); // // if (propagate_down[0]) { // // Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = 0; j < batch; j++) { // if (i == j) { // continue; // } // else { // bool reverse = label[i] == label[j]; // accu_assign(batch, channels, reverse, bottom_data + j * channels, bottom_diff + i * channels, norm_data[i]); // } // } // } // } // // } // // // INSTANTIATE_LAYER_GPU_FUNCS(CosineLossLayer); // //}
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> //#include "caffe/layers/cosine_loss_layer.hpp" // //namespace caffe { // // template<typename Dtype> // __global__ void channels_gpu_l2_norm(const int n, const int channels, const Dtype* bottom, // Dtype *norm_data) { // CUDA_KERNEL_LOOP(index, n) { // caffe_gpu_l2norm(channels, bottom + index * channels, norm_data); // } // } // // template <typename Dtype> // __global__ void calc_loss(const int n, Dtype *inner_product_data, Dtype *norm_data, Dtype *loss) { // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, // const vector<Blob<Dtype>*>& top) { // // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // // const Dtype *bottom_data = bottom[0]->gpu_data(); // Dtype *norm_data = norm_.mutable_gpu_data(); // Dtype *inner_product_data = inner_product.mutable_gpu_data(); // Dtype loss = Dtype(0.0); // const Dtype* label = bottom[1]->gpu_data(); // for (size_t i = 0; i < batch; i++) { // caffe_gpu_l2norm(channels, bottom_data + i * channels, norm_data + i); // } // caffe_gpu_gemm(CblasNoTrans, CblasTrans, batch, batch, channels, Dtype(1.0), // bottom_data, bottom_data, Dtype(0.0), inner_product_data); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = i; j < batch; j++) { // //inner_product_data[i * batch + j] = caffe_cpu_dot(channels, bottom_data + i * channels, bottom_data + j * channels); // inner_product_data[i * batch + j] / (norm_data[i] * norm_data[j] + Dtype(1.0)); // if (label[i] == label[j]) { // loss += (1 - inner_product_data[i * batch + j]); // } // else { // loss += inner_product_data[i * batch + j]; // } // } // } // // top[0]->mutable_cpu_data()[0] = loss / bottom[0]->count(); // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, // const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // const Dtype* label = bottom[1]->gpu_data(); // const Dtype *norm_data = norm_.gpu_data(); // const Dtype *bottom_data = bottom[0]->mutable_gpu_data(); // // if (propagate_down[0]) { // // Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = 0; j < batch; j++) { // if (i == j) { // continue; // } // else { // bool reverse = label[i] == label[j]; // accu_assign(batch, channels, reverse, bottom_data + j * channels, bottom_diff + i * channels, norm_data[i]); // } // } // } // } // // } // // // INSTANTIATE_LAYER_GPU_FUNCS(CosineLossLayer); // //}
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> //#include "caffe/layers/cosine_loss_layer.hpp" // //namespace caffe { // // template<typename Dtype> // __global__ void channels_gpu_l2_norm(const int n, const int channels, const Dtype* bottom, // Dtype *norm_data) { // CUDA_KERNEL_LOOP(index, n) { // caffe_gpu_l2norm(channels, bottom + index * channels, norm_data); // } // } // // template <typename Dtype> // __global__ void calc_loss(const int n, Dtype *inner_product_data, Dtype *norm_data, Dtype *loss) { // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, // const vector<Blob<Dtype>*>& top) { // // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // // const Dtype *bottom_data = bottom[0]->gpu_data(); // Dtype *norm_data = norm_.mutable_gpu_data(); // Dtype *inner_product_data = inner_product.mutable_gpu_data(); // Dtype loss = Dtype(0.0); // const Dtype* label = bottom[1]->gpu_data(); // for (size_t i = 0; i < batch; i++) { // caffe_gpu_l2norm(channels, bottom_data + i * channels, norm_data + i); // } // caffe_gpu_gemm(CblasNoTrans, CblasTrans, batch, batch, channels, Dtype(1.0), // bottom_data, bottom_data, Dtype(0.0), inner_product_data); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = i; j < batch; j++) { // //inner_product_data[i * batch + j] = caffe_cpu_dot(channels, bottom_data + i * channels, bottom_data + j * channels); // inner_product_data[i * batch + j] / (norm_data[i] * norm_data[j] + Dtype(1.0)); // if (label[i] == label[j]) { // loss += (1 - inner_product_data[i * batch + j]); // } // else { // loss += inner_product_data[i * batch + j]; // } // } // } // // top[0]->mutable_cpu_data()[0] = loss / bottom[0]->count(); // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, // const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // const Dtype* label = bottom[1]->gpu_data(); // const Dtype *norm_data = norm_.gpu_data(); // const Dtype *bottom_data = bottom[0]->mutable_gpu_data(); // // if (propagate_down[0]) { // // Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = 0; j < batch; j++) { // if (i == j) { // continue; // } // else { // bool reverse = label[i] == label[j]; // accu_assign(batch, channels, reverse, bottom_data + j * channels, bottom_diff + i * channels, norm_data[i]); // } // } // } // } // // } // // // INSTANTIATE_LAYER_GPU_FUNCS(CosineLossLayer); // //}
.text .file "cosine_loss_layer.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001648b6_00000000-6_cosine_loss_layer.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "cosine_loss_layer.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
__global__ void kernel(int *a) { int i = gridDim.x; a[i] = i; } int main(void) { int *a; cudaMalloc(&a, 4); kernel<<<1,1>>>(a); cudaError_t err = cudaDeviceSynchronize(); if (cudaSuccess != err) { return 1; } return 0; }
code for sm_80 Function : _Z6kernelPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */ /* 0x000fe200000001ff */ /*0020*/ MOV R5, c[0x0][0xc] ; /* 0x0000030000057a02 */ /* 0x000fe20000000f00 */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*0040*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */ /* 0x000fca00078e0202 */ /*0050*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*0060*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0070*/ BRA 0x70; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
__global__ void kernel(int *a) { int i = gridDim.x; a[i] = i; } int main(void) { int *a; cudaMalloc(&a, 4); kernel<<<1,1>>>(a); cudaError_t err = cudaDeviceSynchronize(); if (cudaSuccess != err) { return 1; } return 0; }
.file "tmpxft_000a2242_00000000-6_main.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2030: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2030: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z25__device_stub__Z6kernelPiPi .type _Z25__device_stub__Z6kernelPiPi, @function _Z25__device_stub__Z6kernelPiPi: .LFB2052: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 88(%rsp), %rax subq %fs:40, %rax jne .L8 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z6kernelPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2052: .size _Z25__device_stub__Z6kernelPiPi, .-_Z25__device_stub__Z6kernelPiPi .globl _Z6kernelPi .type _Z6kernelPi, @function _Z6kernelPi: .LFB2053: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z25__device_stub__Z6kernelPiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2053: .size _Z6kernelPi, .-_Z6kernelPi .globl main .type main, @function main: .LFB2027: .cfi_startproc endbr64 subq $56, %rsp .cfi_def_cfa_offset 64 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leaq 8(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 16(%rsp) movl $1, 20(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L12: call cudaDeviceSynchronize@PLT testl %eax, %eax setne %al movzbl %al, %eax movq 40(%rsp), %rdx subq %fs:40, %rdx jne .L16 addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movq 8(%rsp), %rdi call _Z25__device_stub__Z6kernelPiPi jmp .L12 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2027: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z6kernelPi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2055: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z6kernelPi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2055: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
__global__ void kernel(int *a) { int i = gridDim.x; a[i] = i; } int main(void) { int *a; cudaMalloc(&a, 4); kernel<<<1,1>>>(a); cudaError_t err = cudaDeviceSynchronize(); if (cudaSuccess != err) { return 1; } return 0; }
#include <hip/hip_runtime.h> __global__ void kernel(int *a) { int i = gridDim.x; a[i] = i; } int main(void) { int *a; hipMalloc(&a, 4); kernel<<<1,1>>>(a); hipError_t err = hipDeviceSynchronize(); if (hipSuccess != err) { return 1; } return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> __global__ void kernel(int *a) { int i = gridDim.x; a[i] = i; } int main(void) { int *a; hipMalloc(&a, 4); kernel<<<1,1>>>(a); hipError_t err = hipDeviceSynchronize(); if (hipSuccess != err) { return 1; } return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6kernelPi .globl _Z6kernelPi .p2align 8 .type _Z6kernelPi,@function _Z6kernelPi: s_clause 0x1 s_load_b32 s2, s[0:1], 0x8 s_load_b64 s[0:1], s[0:1], 0x0 s_waitcnt lgkmcnt(0) v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 s_ashr_i32 s3, s2, 31 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_lshl_b64 s[4:5], s[2:3], 2 s_add_u32 s0, s0, s4 s_addc_u32 s1, s1, s5 global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6kernelPi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 264 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 2 .amdhsa_next_free_sgpr 6 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6kernelPi, .Lfunc_end0-_Z6kernelPi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: hidden_block_count_x - .offset: 12 .size: 4 .value_kind: hidden_block_count_y - .offset: 16 .size: 4 .value_kind: hidden_block_count_z - .offset: 20 .size: 2 .value_kind: hidden_group_size_x - .offset: 22 .size: 2 .value_kind: hidden_group_size_y - .offset: 24 .size: 2 .value_kind: hidden_group_size_z - .offset: 26 .size: 2 .value_kind: hidden_remainder_x - .offset: 28 .size: 2 .value_kind: hidden_remainder_y - .offset: 30 .size: 2 .value_kind: hidden_remainder_z - .offset: 48 .size: 8 .value_kind: hidden_global_offset_x - .offset: 56 .size: 8 .value_kind: hidden_global_offset_y - .offset: 64 .size: 8 .value_kind: hidden_global_offset_z - .offset: 72 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 264 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6kernelPi .private_segment_fixed_size: 0 .sgpr_count: 6 .sgpr_spill_count: 0 .symbol: _Z6kernelPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 2 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> __global__ void kernel(int *a) { int i = gridDim.x; a[i] = i; } int main(void) { int *a; hipMalloc(&a, 4); kernel<<<1,1>>>(a); hipError_t err = hipDeviceSynchronize(); if (hipSuccess != err) { return 1; } return 0; }
.text .file "main.hip" .globl _Z21__device_stub__kernelPi # -- Begin function _Z21__device_stub__kernelPi .p2align 4, 0x90 .type _Z21__device_stub__kernelPi,@function _Z21__device_stub__kernelPi: # @_Z21__device_stub__kernelPi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z6kernelPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end0: .size _Z21__device_stub__kernelPi, .Lfunc_end0-_Z21__device_stub__kernelPi .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 leaq 8(%rsp), %rdi movl $4, %esi callq hipMalloc movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z6kernelPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize xorl %ecx, %ecx testl %eax, %eax setne %cl movl %ecx, %eax addq $72, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6kernelPi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6kernelPi,@object # @_Z6kernelPi .section .rodata,"a",@progbits .globl _Z6kernelPi .p2align 3, 0x0 _Z6kernelPi: .quad _Z21__device_stub__kernelPi .size _Z6kernelPi, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z6kernelPi" .size .L__unnamed_1, 12 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__kernelPi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6kernelPi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z6kernelPi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */ /* 0x000fe200000001ff */ /*0020*/ MOV R5, c[0x0][0xc] ; /* 0x0000030000057a02 */ /* 0x000fe20000000f00 */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*0040*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */ /* 0x000fca00078e0202 */ /*0050*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*0060*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0070*/ BRA 0x70; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6kernelPi .globl _Z6kernelPi .p2align 8 .type _Z6kernelPi,@function _Z6kernelPi: s_clause 0x1 s_load_b32 s2, s[0:1], 0x8 s_load_b64 s[0:1], s[0:1], 0x0 s_waitcnt lgkmcnt(0) v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 s_ashr_i32 s3, s2, 31 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_lshl_b64 s[4:5], s[2:3], 2 s_add_u32 s0, s0, s4 s_addc_u32 s1, s1, s5 global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6kernelPi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 264 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 2 .amdhsa_next_free_sgpr 6 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6kernelPi, .Lfunc_end0-_Z6kernelPi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: hidden_block_count_x - .offset: 12 .size: 4 .value_kind: hidden_block_count_y - .offset: 16 .size: 4 .value_kind: hidden_block_count_z - .offset: 20 .size: 2 .value_kind: hidden_group_size_x - .offset: 22 .size: 2 .value_kind: hidden_group_size_y - .offset: 24 .size: 2 .value_kind: hidden_group_size_z - .offset: 26 .size: 2 .value_kind: hidden_remainder_x - .offset: 28 .size: 2 .value_kind: hidden_remainder_y - .offset: 30 .size: 2 .value_kind: hidden_remainder_z - .offset: 48 .size: 8 .value_kind: hidden_global_offset_x - .offset: 56 .size: 8 .value_kind: hidden_global_offset_y - .offset: 64 .size: 8 .value_kind: hidden_global_offset_z - .offset: 72 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 264 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6kernelPi .private_segment_fixed_size: 0 .sgpr_count: 6 .sgpr_spill_count: 0 .symbol: _Z6kernelPi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 2 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000a2242_00000000-6_main.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2030: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2030: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z25__device_stub__Z6kernelPiPi .type _Z25__device_stub__Z6kernelPiPi, @function _Z25__device_stub__Z6kernelPiPi: .LFB2052: .cfi_startproc endbr64 subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 8(%rsp) movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 88(%rsp), %rax subq %fs:40, %rax jne .L8 addq $104, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 120 pushq 24(%rsp) .cfi_def_cfa_offset 128 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z6kernelPi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 112 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2052: .size _Z25__device_stub__Z6kernelPiPi, .-_Z25__device_stub__Z6kernelPiPi .globl _Z6kernelPi .type _Z6kernelPi, @function _Z6kernelPi: .LFB2053: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z25__device_stub__Z6kernelPiPi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2053: .size _Z6kernelPi, .-_Z6kernelPi .globl main .type main, @function main: .LFB2027: .cfi_startproc endbr64 subq $56, %rsp .cfi_def_cfa_offset 64 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leaq 8(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 16(%rsp) movl $1, 20(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L12: call cudaDeviceSynchronize@PLT testl %eax, %eax setne %al movzbl %al, %eax movq 40(%rsp), %rdx subq %fs:40, %rdx jne .L16 addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movq 8(%rsp), %rdi call _Z25__device_stub__Z6kernelPiPi jmp .L12 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2027: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z6kernelPi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2055: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z6kernelPi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2055: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "main.hip" .globl _Z21__device_stub__kernelPi # -- Begin function _Z21__device_stub__kernelPi .p2align 4, 0x90 .type _Z21__device_stub__kernelPi,@function _Z21__device_stub__kernelPi: # @_Z21__device_stub__kernelPi .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 movq %rdi, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z6kernelPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $88, %rsp .cfi_adjust_cfa_offset -88 retq .Lfunc_end0: .size _Z21__device_stub__kernelPi, .Lfunc_end0-_Z21__device_stub__kernelPi .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $72, %rsp .cfi_def_cfa_offset 80 leaq 8(%rsp), %rdi movl $4, %esi callq hipMalloc movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 64(%rsp) leaq 64(%rsp), %rax movq %rax, (%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movq %rsp, %r9 movl $_Z6kernelPi, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize xorl %ecx, %ecx testl %eax, %eax setne %cl movl %ecx, %eax addq $72, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6kernelPi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6kernelPi,@object # @_Z6kernelPi .section .rodata,"a",@progbits .globl _Z6kernelPi .p2align 3, 0x0 _Z6kernelPi: .quad _Z21__device_stub__kernelPi .size _Z6kernelPi, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z6kernelPi" .size .L__unnamed_1, 12 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__kernelPi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6kernelPi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "cuda.h" // Unix系统下调用CUDA使用"cuda.h",Win系统下调用CUDA使用"cuda.runtime" #include "stdio.h" // 标准输入输出,后面调用函数printf #define N 10000 __global__ void vectorAdd(float *A, float *B, float *C) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; C[i*N+j] = A[i*N+j] + B[i*N+j]; printf("======================================================\n"); printf("blockIdx.x:%d\tblockIdx.y:%d\n", blockIdx.x, blockIdx.y); printf("blockDim.x:%d\tblockDim.y:%d\n", blockDim.x, blockDim.y); printf("threadIdx.x:%d\tthreadIdx.y:%d\n", threadIdx.x, threadIdx.y); printf("i:%d j:%d C[i*N+j]:C[%2d]:%f\n", i, j, i*N+j, C[i*N+j]); } int main() { // 设置使用的GPU下标(从0开始) // 多个GPU在终端用命令'nvidia-smi'查看下标 int gpuDeviceIdx = 0; cudaSetDevice(gpuDeviceIdx); // 初始化设备(CPU)端变量 // 使用cudaMalloc方法分配指定大小的空间 float *Ad, *Bd, *Cd; int size = N*N*sizeof(float); cudaMalloc((void**)&Ad, size); cudaMalloc((void**)&Bd, size); cudaMalloc((void**)&Cd, size); // 设置程序在GPU上运行参数 // 1.grid下的block数目 // 2.block下的thread数目 dim3 bpg(10, 10); // 每个网格下的线程块个数(block num. per grid) // bpg(blockDim.x, blockDim.y) dim3 tpb(10, 10); // 每个线程块下的线程个数(thread num. per block) // tpb(threadIdx.x, threadIdx.y) // 根据资源分配的参数 // 在GPU上执行核函数(__global__修饰的函数) vectorAdd<<<bpg, tpb>>>(Ad, Bd, Cd); // 释放空间 cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); return 0; }
code for sm_80 Function : _Z9vectorAddPfS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R25, SR_CTAID.Y ; /* 0x0000000000197919 */ /* 0x000e220000002600 */ /*0020*/ IMAD.MOV.U32 R17, RZ, RZ, 0x4 ; /* 0x00000004ff117424 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR36, c[0x0][0x118] ; /* 0x0000460000247ab9 */ /* 0x000fe20000000a00 */ /*0040*/ IADD3 R1, R1, -0x18, RZ ; /* 0xffffffe801017810 */ /* 0x000fe20007ffe0ff */ /*0050*/ S2R R24, SR_CTAID.X ; /* 0x0000000000187919 */ /* 0x000e680000002500 */ /*0060*/ S2R R22, SR_TID.X ; /* 0x0000000000167919 */ /* 0x000e680000002100 */ /*0070*/ S2R R23, SR_TID.Y ; /* 0x0000000000177919 */ /* 0x000e220000002200 */ /*0080*/ IMAD R19, R24, c[0x0][0x0], R22 ; /* 0x0000000018137a24 */ /* 0x002fc400078e0216 */ /*0090*/ IMAD R18, R25, c[0x0][0x4], R23 ; /* 0x0000010019127a24 */ /* 0x001fc800078e0217 */ /*00a0*/ IMAD R26, R18, 0x2710, R19 ; /* 0x00002710121a7824 */ /* 0x000fc800078e0213 */ /*00b0*/ IMAD.WIDE R12, R26, R17, c[0x0][0x168] ; /* 0x00005a001a0c7625 */ /* 0x000fc800078e0211 */ /*00c0*/ IMAD.WIDE R10, R26.reuse, R17.reuse, c[0x0][0x160] ; /* 0x000058001a0a7625 */ /* 0x0c0fe400078e0211 */ /*00d0*/ LDG.E R12, [R12.64] ; /* 0x000000240c0c7981 */ /* 0x000ea8000c1e1900 */ /*00e0*/ LDG.E R3, [R10.64] ; /* 0x000000240a037981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ IMAD.WIDE R16, R26, R17, c[0x0][0x170] ; /* 0x00005c001a107625 */ /* 0x000fe200078e0211 */ /*0100*/ MOV R2, 0x0 ; /* 0x0000000000027802 */ /* 0x000fc80000000f00 */ /*0110*/ LDC.64 R8, c[0x4][R2] ; /* 0x0100000002087b82 */ /* 0x0000620000000a00 */ /*0120*/ IADD3 R27, P0, R1, c[0x0][0x20], RZ ; /* 0x00000800011b7a10 */ /* 0x000fe20007f1e0ff */ /*0130*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe200078e00ff */ /*0140*/ CS2R R6, SRZ ; /* 0x0000000000067805 */ /* 0x000fe2000001ff00 */ /*0150*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */ /* 0x000fe400078e00ff */ /*0160*/ IMAD.X R28, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff1c7624 */ /* 0x000fe400000e06ff */ /*0170*/ FADD R3, R3, R12 ; /* 0x0000000c03037221 */ /* 0x004fca0000000000 */ /*0180*/ STG.E [R16.64], R3 ; /* 0x0000000310007986 */ /* 0x0001e4000c101924 */ /*0190*/ LEPC R10 ; /* 0x00000000000a734e */ /* 0x002fe40000000000 */ /*01a0*/ MOV R3, 0x210 ; /* 0x0000021000037802 */ /* 0x001fe40000000f00 */ /*01b0*/ MOV R20, 0x190 ; /* 0x0000019000147802 */ /* 0x000fe40000000f00 */ /*01c0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*01d0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*01e0*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */ /* 0x000fc8000791e10a */ /*01f0*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e250b */ /*0200*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */ /* 0x000fea0003c00000 */ /*0210*/ STL.64 [R1], R24 ; /* 0x0000001801007387 */ /* 0x0001e20000100a00 */ /*0220*/ LDC.64 R8, c[0x4][R2] ; /* 0x0100000002087b82 */ /* 0x0000620000000a00 */ /*0230*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x10] ; /* 0x01000400ff047624 */ /* 0x000fe400078e00ff */ /*0240*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x14] ; /* 0x01000500ff057624 */ /* 0x000fe400078e00ff */ /*0250*/ IMAD.MOV.U32 R6, RZ, RZ, R27 ; /* 0x000000ffff067224 */ /* 0x000fe400078e001b */ /*0260*/ IMAD.MOV.U32 R7, RZ, RZ, R28 ; /* 0x000000ffff077224 */ /* 0x000fe400078e001c */ /*0270*/ LEPC R10 ; /* 0x00000000000a734e */ /* 0x000fca0000000000 */ /*0280*/ MOV R3, 0x2f0 ; /* 0x000002f000037802 */ /* 0x000fe40000000f00 */ /*0290*/ MOV R20, 0x270 ; /* 0x0000027000147802 */ /* 0x000fc40000000f00 */ /*02a0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*02b0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*02c0*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */ /* 0x000fc8000791e10a */ /*02d0*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e250b */ /*02e0*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */ /* 0x003fea0003c00000 */ /*02f0*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0a7624 */ /* 0x000fe200078e00ff */ /*0300*/ LDC.64 R8, c[0x4][R2] ; /* 0x0100000002087b82 */ /* 0x0000620000000a00 */ /*0310*/ IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x4] ; /* 0x00000100ff0b7624 */ /* 0x000fe400078e00ff */ /*0320*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x18] ; /* 0x01000600ff047624 */ /* 0x000fe400078e00ff */ /*0330*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x1c] ; /* 0x01000700ff057624 */ /* 0x000fe200078e00ff */ /*0340*/ STL.64 [R1], R10 ; /* 0x0000000a01007387 */ /* 0x0001e20000100a00 */ /*0350*/ IMAD.MOV.U32 R6, RZ, RZ, R27 ; /* 0x000000ffff067224 */ /* 0x000fe400078e001b */ /*0360*/ IMAD.MOV.U32 R7, RZ, RZ, R28 ; /* 0x000000ffff077224 */ /* 0x000fc400078e001c */ /*0370*/ LEPC R10 ; /* 0x00000000000a734e */ /* 0x001fca0000000000 */ /*0380*/ MOV R3, 0x3f0 ; /* 0x000003f000037802 */ /* 0x000fe40000000f00 */ /*0390*/ MOV R20, 0x370 ; /* 0x0000037000147802 */ /* 0x000fc40000000f00 */ /*03a0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*03b0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*03c0*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */ /* 0x000fc8000791e10a */ /*03d0*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e250b */ /*03e0*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */ /* 0x002fea0003c00000 */ /*03f0*/ STL.64 [R1], R22 ; /* 0x0000001601007387 */ /* 0x0001e20000100a00 */ /*0400*/ LDC.64 R8, c[0x4][R2] ; /* 0x0100000002087b82 */ /* 0x0000620000000a00 */ /*0410*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x20] ; /* 0x01000800ff047624 */ /* 0x000fe400078e00ff */ /*0420*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x24] ; /* 0x01000900ff057624 */ /* 0x000fe400078e00ff */ /*0430*/ IMAD.MOV.U32 R6, RZ, RZ, R27 ; /* 0x000000ffff067224 */ /* 0x000fe400078e001b */ /*0440*/ IMAD.MOV.U32 R7, RZ, RZ, R28 ; /* 0x000000ffff077224 */ /* 0x000fe400078e001c */ /*0450*/ LEPC R10 ; /* 0x00000000000a734e */ /* 0x000fca0000000000 */ /*0460*/ MOV R3, 0x4d0 ; /* 0x000004d000037802 */ /* 0x000fe40000000f00 */ /*0470*/ MOV R20, 0x450 ; /* 0x0000045000147802 */ /* 0x000fc40000000f00 */ /*0480*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*0490*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*04a0*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */ /* 0x000fc8000791e10a */ /*04b0*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e250b */ /*04c0*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */ /* 0x003fea0003c00000 */ /*04d0*/ LDG.E R16, [R16.64] ; /* 0x0000002410107981 */ /* 0x000ea2000c1e1900 */ /*04e0*/ LDC.64 R2, c[0x4][R2] ; /* 0x0100000002027b82 */ /* 0x000e220000000a00 */ /*04f0*/ IMAD.MOV.U32 R6, RZ, RZ, R27 ; /* 0x000000ffff067224 */ /* 0x000fe400078e001b */ /*0500*/ STL.64 [R1], R18 ; /* 0x0000001201007387 */ /* 0x0003e20000100a00 */ /*0510*/ IMAD.MOV.U32 R7, RZ, RZ, R28 ; /* 0x000000ffff077224 */ /* 0x000fe400078e001c */ /*0520*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x28] ; /* 0x01000a00ff047624 */ /* 0x000fe200078e00ff */ /*0530*/ STL [R1+0x8], R26 ; /* 0x0000081a01007387 */ /* 0x0003e20000100800 */ /*0540*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x2c] ; /* 0x01000b00ff057624 */ /* 0x000fe200078e00ff */ /*0550*/ F2F.F64.F32 R8, R16 ; /* 0x0000001000087310 */ /* 0x004ea40000201800 */ /*0560*/ STL.64 [R1+0x10], R8 ; /* 0x0000100801007387 */ /* 0x0043e80000100a00 */ /*0570*/ LEPC R8 ; /* 0x000000000008734e */ /* 0x003fe40000000000 */ /*0580*/ MOV R11, 0x5f0 ; /* 0x000005f0000b7802 */ /* 0x000fe40000000f00 */ /*0590*/ MOV R20, 0x570 ; /* 0x0000057000147802 */ /* 0x000fc40000000f00 */ /*05a0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*05b0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*05c0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e108 */ /*05d0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2509 */ /*05e0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */ /* 0x000fea0003c00000 */ /*05f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0600*/ BRA 0x600; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0610*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0620*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0630*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0640*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0650*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0660*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0670*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0680*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0690*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*06f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "cuda.h" // Unix系统下调用CUDA使用"cuda.h",Win系统下调用CUDA使用"cuda.runtime" #include "stdio.h" // 标准输入输出,后面调用函数printf #define N 10000 __global__ void vectorAdd(float *A, float *B, float *C) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; C[i*N+j] = A[i*N+j] + B[i*N+j]; printf("======================================================\n"); printf("blockIdx.x:%d\tblockIdx.y:%d\n", blockIdx.x, blockIdx.y); printf("blockDim.x:%d\tblockDim.y:%d\n", blockDim.x, blockDim.y); printf("threadIdx.x:%d\tthreadIdx.y:%d\n", threadIdx.x, threadIdx.y); printf("i:%d j:%d C[i*N+j]:C[%2d]:%f\n", i, j, i*N+j, C[i*N+j]); } int main() { // 设置使用的GPU下标(从0开始) // 多个GPU在终端用命令'nvidia-smi'查看下标 int gpuDeviceIdx = 0; cudaSetDevice(gpuDeviceIdx); // 初始化设备(CPU)端变量 // 使用cudaMalloc方法分配指定大小的空间 float *Ad, *Bd, *Cd; int size = N*N*sizeof(float); cudaMalloc((void**)&Ad, size); cudaMalloc((void**)&Bd, size); cudaMalloc((void**)&Cd, size); // 设置程序在GPU上运行参数 // 1.grid下的block数目 // 2.block下的thread数目 dim3 bpg(10, 10); // 每个网格下的线程块个数(block num. per grid) // bpg(blockDim.x, blockDim.y) dim3 tpb(10, 10); // 每个线程块下的线程个数(thread num. per block) // tpb(threadIdx.x, threadIdx.y) // 根据资源分配的参数 // 在GPU上执行核函数(__global__修饰的函数) vectorAdd<<<bpg, tpb>>>(Ad, Bd, Cd); // 释放空间 cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); return 0; }
.file "tmpxft_001ab0f4_00000000-6_addVector-V2.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z32__device_stub__Z9vectorAddPfS_S_PfS_S_ .type _Z32__device_stub__Z9vectorAddPfS_S_PfS_S_, @function _Z32__device_stub__Z9vectorAddPfS_S_PfS_S_: .LFB2082: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9vectorAddPfS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z32__device_stub__Z9vectorAddPfS_S_PfS_S_, .-_Z32__device_stub__Z9vectorAddPfS_S_PfS_S_ .globl _Z9vectorAddPfS_S_ .type _Z9vectorAddPfS_S_, @function _Z9vectorAddPfS_S_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z9vectorAddPfS_S_PfS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z9vectorAddPfS_S_, .-_Z9vectorAddPfS_S_ .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $72, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $0, %edi call cudaSetDevice@PLT leaq 8(%rsp), %rdi movl $400000000, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $400000000, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $400000000, %esi call cudaMalloc@PLT movl $10, 32(%rsp) movl $10, 36(%rsp) movl $10, 44(%rsp) movl $10, 48(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L12: movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L16 movl $0, %eax addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z32__device_stub__Z9vectorAddPfS_S_PfS_S_ jmp .L12 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z9vectorAddPfS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z9vectorAddPfS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "cuda.h" // Unix系统下调用CUDA使用"cuda.h",Win系统下调用CUDA使用"cuda.runtime" #include "stdio.h" // 标准输入输出,后面调用函数printf #define N 10000 __global__ void vectorAdd(float *A, float *B, float *C) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; C[i*N+j] = A[i*N+j] + B[i*N+j]; printf("======================================================\n"); printf("blockIdx.x:%d\tblockIdx.y:%d\n", blockIdx.x, blockIdx.y); printf("blockDim.x:%d\tblockDim.y:%d\n", blockDim.x, blockDim.y); printf("threadIdx.x:%d\tthreadIdx.y:%d\n", threadIdx.x, threadIdx.y); printf("i:%d j:%d C[i*N+j]:C[%2d]:%f\n", i, j, i*N+j, C[i*N+j]); } int main() { // 设置使用的GPU下标(从0开始) // 多个GPU在终端用命令'nvidia-smi'查看下标 int gpuDeviceIdx = 0; cudaSetDevice(gpuDeviceIdx); // 初始化设备(CPU)端变量 // 使用cudaMalloc方法分配指定大小的空间 float *Ad, *Bd, *Cd; int size = N*N*sizeof(float); cudaMalloc((void**)&Ad, size); cudaMalloc((void**)&Bd, size); cudaMalloc((void**)&Cd, size); // 设置程序在GPU上运行参数 // 1.grid下的block数目 // 2.block下的thread数目 dim3 bpg(10, 10); // 每个网格下的线程块个数(block num. per grid) // bpg(blockDim.x, blockDim.y) dim3 tpb(10, 10); // 每个线程块下的线程个数(thread num. per block) // tpb(threadIdx.x, threadIdx.y) // 根据资源分配的参数 // 在GPU上执行核函数(__global__修饰的函数) vectorAdd<<<bpg, tpb>>>(Ad, Bd, Cd); // 释放空间 cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); return 0; }
#include "hip/hip_runtime.h" // Unix系统下调用CUDA使用"cuda.h",Win系统下调用CUDA使用"cuda.runtime" #include "stdio.h" // 标准输入输出,后面调用函数printf #define N 10000 __global__ void vectorAdd(float *A, float *B, float *C) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; C[i*N+j] = A[i*N+j] + B[i*N+j]; printf("======================================================\n"); printf("blockIdx.x:%d\tblockIdx.y:%d\n", blockIdx.x, blockIdx.y); printf("blockDim.x:%d\tblockDim.y:%d\n", blockDim.x, blockDim.y); printf("threadIdx.x:%d\tthreadIdx.y:%d\n", threadIdx.x, threadIdx.y); printf("i:%d j:%d C[i*N+j]:C[%2d]:%f\n", i, j, i*N+j, C[i*N+j]); } int main() { // 设置使用的GPU下标(从0开始) // 多个GPU在终端用命令'nvidia-smi'查看下标 int gpuDeviceIdx = 0; hipSetDevice(gpuDeviceIdx); // 初始化设备(CPU)端变量 // 使用cudaMalloc方法分配指定大小的空间 float *Ad, *Bd, *Cd; int size = N*N*sizeof(float); hipMalloc((void**)&Ad, size); hipMalloc((void**)&Bd, size); hipMalloc((void**)&Cd, size); // 设置程序在GPU上运行参数 // 1.grid下的block数目 // 2.block下的thread数目 dim3 bpg(10, 10); // 每个网格下的线程块个数(block num. per grid) // bpg(blockDim.x, blockDim.y) dim3 tpb(10, 10); // 每个线程块下的线程个数(thread num. per block) // tpb(threadIdx.x, threadIdx.y) // 根据资源分配的参数 // 在GPU上执行核函数(__global__修饰的函数) vectorAdd<<<bpg, tpb>>>(Ad, Bd, Cd); // 释放空间 hipFree(Ad); hipFree(Bd); hipFree(Cd); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include "hip/hip_runtime.h" // Unix系统下调用CUDA使用"cuda.h",Win系统下调用CUDA使用"cuda.runtime" #include "stdio.h" // 标准输入输出,后面调用函数printf #define N 10000 __global__ void vectorAdd(float *A, float *B, float *C) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; C[i*N+j] = A[i*N+j] + B[i*N+j]; printf("======================================================\n"); printf("blockIdx.x:%d\tblockIdx.y:%d\n", blockIdx.x, blockIdx.y); printf("blockDim.x:%d\tblockDim.y:%d\n", blockDim.x, blockDim.y); printf("threadIdx.x:%d\tthreadIdx.y:%d\n", threadIdx.x, threadIdx.y); printf("i:%d j:%d C[i*N+j]:C[%2d]:%f\n", i, j, i*N+j, C[i*N+j]); } int main() { // 设置使用的GPU下标(从0开始) // 多个GPU在终端用命令'nvidia-smi'查看下标 int gpuDeviceIdx = 0; hipSetDevice(gpuDeviceIdx); // 初始化设备(CPU)端变量 // 使用cudaMalloc方法分配指定大小的空间 float *Ad, *Bd, *Cd; int size = N*N*sizeof(float); hipMalloc((void**)&Ad, size); hipMalloc((void**)&Bd, size); hipMalloc((void**)&Cd, size); // 设置程序在GPU上运行参数 // 1.grid下的block数目 // 2.block下的thread数目 dim3 bpg(10, 10); // 每个网格下的线程块个数(block num. per grid) // bpg(blockDim.x, blockDim.y) dim3 tpb(10, 10); // 每个线程块下的线程个数(thread num. per block) // tpb(threadIdx.x, threadIdx.y) // 根据资源分配的参数 // 在GPU上执行核函数(__global__修饰的函数) vectorAdd<<<bpg, tpb>>>(Ad, Bd, Cd); // 释放空间 hipFree(Ad); hipFree(Bd); hipFree(Cd); return 0; }
.text .file "addVector-V2.hip" .globl _Z24__device_stub__vectorAddPfS_S_ # -- Begin function _Z24__device_stub__vectorAddPfS_S_ .p2align 4, 0x90 .type _Z24__device_stub__vectorAddPfS_S_,@function _Z24__device_stub__vectorAddPfS_S_: # @_Z24__device_stub__vectorAddPfS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9vectorAddPfS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z24__device_stub__vectorAddPfS_S_, .Lfunc_end0-_Z24__device_stub__vectorAddPfS_S_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 xorl %edi, %edi callq hipSetDevice leaq 16(%rsp), %rdi movl $400000000, %esi # imm = 0x17D78400 callq hipMalloc leaq 8(%rsp), %rdi movl $400000000, %esi # imm = 0x17D78400 callq hipMalloc movq %rsp, %rdi movl $400000000, %esi # imm = 0x17D78400 callq hipMalloc movabsq $42949672970, %rdi # imm = 0xA0000000A movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 16(%rsp), %rax movq 8(%rsp), %rcx movq (%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %rdx, 72(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z9vectorAddPfS_S_, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq (%rsp), %rdi callq hipFree xorl %eax, %eax addq $120, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9vectorAddPfS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z9vectorAddPfS_S_,@object # @_Z9vectorAddPfS_S_ .section .rodata,"a",@progbits .globl _Z9vectorAddPfS_S_ .p2align 3, 0x0 _Z9vectorAddPfS_S_: .quad _Z24__device_stub__vectorAddPfS_S_ .size _Z9vectorAddPfS_S_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z9vectorAddPfS_S_" .size .L__unnamed_1, 19 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__vectorAddPfS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9vectorAddPfS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001ab0f4_00000000-6_addVector-V2.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z32__device_stub__Z9vectorAddPfS_S_PfS_S_ .type _Z32__device_stub__Z9vectorAddPfS_S_PfS_S_, @function _Z32__device_stub__Z9vectorAddPfS_S_PfS_S_: .LFB2082: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9vectorAddPfS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z32__device_stub__Z9vectorAddPfS_S_PfS_S_, .-_Z32__device_stub__Z9vectorAddPfS_S_PfS_S_ .globl _Z9vectorAddPfS_S_ .type _Z9vectorAddPfS_S_, @function _Z9vectorAddPfS_S_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z32__device_stub__Z9vectorAddPfS_S_PfS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z9vectorAddPfS_S_, .-_Z9vectorAddPfS_S_ .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $72, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $0, %edi call cudaSetDevice@PLT leaq 8(%rsp), %rdi movl $400000000, %esi call cudaMalloc@PLT leaq 16(%rsp), %rdi movl $400000000, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $400000000, %esi call cudaMalloc@PLT movl $10, 32(%rsp) movl $10, 36(%rsp) movl $10, 44(%rsp) movl $10, 48(%rsp) movl $0, %r9d movl $0, %r8d movq 44(%rsp), %rdx movl $1, %ecx movq 32(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L12: movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L16 movl $0, %eax addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movq 24(%rsp), %rdx movq 16(%rsp), %rsi movq 8(%rsp), %rdi call _Z32__device_stub__Z9vectorAddPfS_S_PfS_S_ jmp .L12 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z9vectorAddPfS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z9vectorAddPfS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "addVector-V2.hip" .globl _Z24__device_stub__vectorAddPfS_S_ # -- Begin function _Z24__device_stub__vectorAddPfS_S_ .p2align 4, 0x90 .type _Z24__device_stub__vectorAddPfS_S_,@function _Z24__device_stub__vectorAddPfS_S_: # @_Z24__device_stub__vectorAddPfS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9vectorAddPfS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z24__device_stub__vectorAddPfS_S_, .Lfunc_end0-_Z24__device_stub__vectorAddPfS_S_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 xorl %edi, %edi callq hipSetDevice leaq 16(%rsp), %rdi movl $400000000, %esi # imm = 0x17D78400 callq hipMalloc leaq 8(%rsp), %rdi movl $400000000, %esi # imm = 0x17D78400 callq hipMalloc movq %rsp, %rdi movl $400000000, %esi # imm = 0x17D78400 callq hipMalloc movabsq $42949672970, %rdi # imm = 0xA0000000A movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 16(%rsp), %rax movq 8(%rsp), %rcx movq (%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %rdx, 72(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z9vectorAddPfS_S_, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: movq 16(%rsp), %rdi callq hipFree movq 8(%rsp), %rdi callq hipFree movq (%rsp), %rdi callq hipFree xorl %eax, %eax addq $120, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9vectorAddPfS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z9vectorAddPfS_S_,@object # @_Z9vectorAddPfS_S_ .section .rodata,"a",@progbits .globl _Z9vectorAddPfS_S_ .p2align 3, 0x0 _Z9vectorAddPfS_S_: .quad _Z24__device_stub__vectorAddPfS_S_ .size _Z9vectorAddPfS_S_, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z9vectorAddPfS_S_" .size .L__unnamed_1, 19 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__vectorAddPfS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9vectorAddPfS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #define BLOCK_SIZE 8 #define GRID_SIZE 8 //struct timespec start, finish; //double elapsed; __global__ void ising_kernel(int *G,int *newG,double *w,int n){ int x,y; __shared__ double shared_w[25]; __shared__ int shared_G[(BLOCK_SIZE+4)*(BLOCK_SIZE+4)]; for(int i=0;i<25;i++){ shared_w[i]=w[i]; } unsigned int xBlock = blockDim.x * blockIdx.x; unsigned int yBlock = blockDim.y * blockIdx.y; unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; unsigned int tempX = xBlock + threadIdx.x; unsigned int tempY = yBlock + threadIdx.y; int iterations; if (n%(BLOCK_SIZE*GRID_SIZE)==0){ iterations=n/(BLOCK_SIZE*GRID_SIZE); }else{ iterations=n/(BLOCK_SIZE*GRID_SIZE)+1; } for(int i=0;i<iterations;i++){ xIndex=tempX+GRID_SIZE*BLOCK_SIZE*(i); for(int j=0;j<iterations;j++){ yIndex=tempY+GRID_SIZE*BLOCK_SIZE*(j); shared_G[(threadIdx.x+2)*(BLOCK_SIZE+4)+threadIdx.y+2]=G[((xIndex+n)%n)*n+(yIndex+n)%n]; if(threadIdx.x==0){ if (threadIdx.y==0){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2-k)*(BLOCK_SIZE+4)+(2-l)]=G[((xIndex-k+n)%n)*n+(yIndex-l+n)%n]; } } } }else if(threadIdx.y==BLOCK_SIZE-1){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2-k)*(BLOCK_SIZE+4)+(2+l+threadIdx.y)]=G[((xIndex-k+n)%n)*n+(yIndex+l+n)%n]; } } } }else{ shared_G[(2-1)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex-1+n)%n)*n+(yIndex+n)%n]; shared_G[(2-2)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex-2+n)%n)*n+(yIndex+n)%n]; } }else if(threadIdx.x==BLOCK_SIZE-1){ if (threadIdx.y==0){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2+k+threadIdx.x)*(BLOCK_SIZE+4)+(2-l)]=G[((xIndex+k+n)%n)*n+(yIndex-l+n)%n]; } } } }else if(threadIdx.y==BLOCK_SIZE-1){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2+k+threadIdx.x)*(BLOCK_SIZE+4)+(2+l+threadIdx.y)]=G[((xIndex+k+n)%n)*n+(yIndex+l+n)%n]; } } } }else { shared_G[(2+1+threadIdx.x)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex+1+n)%n)*n+(yIndex+n)%n]; shared_G[(2+2+threadIdx.x)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex+2+n)%n)*n+(yIndex+n)%n]; } }else{ if(threadIdx.y==0){ shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2-1)]=G[((xIndex+n)%n)*n+(yIndex-1+n)%n]; shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2-2)]=G[((xIndex+n)%n)*n+(yIndex-2+n)%n]; }else if(threadIdx.y==BLOCK_SIZE-1){ shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2+1+threadIdx.y)]=G[((xIndex+n)%n)*n+(yIndex+1+n)%n]; shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2+2+threadIdx.y)]=G[((xIndex+n)%n)*n+(yIndex+2+n)%n]; } } __syncthreads(); if(xIndex<n&&yIndex<n){ double weight=0; //printf("BLOCK IDX X: %d\n",blockIdx.x); //printf("BLOCK IDX Y: %d\n",blockIdx.y); for(int ibor=-2;ibor<3;ibor++){ for(int jbor=-2;jbor<3;jbor++){ weight+=shared_w[(ibor+2)*5+jbor+2]*shared_G[(threadIdx.x+2+ibor)*(BLOCK_SIZE+4) +(threadIdx.y+2+jbor) ]; } } if(weight<1e-4&&weight>-(1e-4)){ // newG[xIndex*n+yIndex]=G[xIndex*n+yIndex]; newG[xIndex*n+yIndex]=shared_G[(threadIdx.x+2)*(BLOCK_SIZE+4)+threadIdx.y+2]; }else if(weight>0){ newG[xIndex*n+yIndex]=1; }else{ newG[xIndex*n+yIndex]=-1; } } } } } void ising( int *G, double *w, int k, int n){ int *newG,*swapG,*G2; double *w2; cudaMallocManaged(&newG,n*n*sizeof(int)); cudaMallocManaged(&G2,n*n*sizeof(int)); cudaMallocManaged(&w2,25*sizeof(double)); cudaMemcpy( w2, w, 25*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy( G2, G, n*n*sizeof(int),cudaMemcpyHostToDevice); double total_time=0; for(int iter=0;iter<k;iter++){ int grid_dimension; bool repeat=true; dim3 grid(GRID_SIZE, GRID_SIZE); dim3 block(BLOCK_SIZE, BLOCK_SIZE); // struct timeval startwtime, endwtime; // gettimeofday (&startwtime, NULL); ising_kernel<<<grid,block>>>(G2,newG,w2,n); cudaDeviceSynchronize(); // gettimeofday (&endwtime, NULL); // double time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); // total_time+=time; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(repeat&&newG[i*n+j]!=G2[i*n+j]){ repeat=false; } int temp=newG[i*n+j]; newG[i*n+j]=G2[i*n+j]; G2[i*n+j]=temp; } } if(repeat){ break; } } cudaMemcpy(G, G2, n*n*sizeof(int),cudaMemcpyDeviceToHost); // printf("Seconds are %lf ",total_time); } int main() { printf("==========================START=============================\n"); double weight[]={0.004,0.016,0.026,0.016,0.004,0.016,0.071,0.117,0.071,0.016,0.026,0.117,0,0.117,0.026,0.016,0.071,0.117,0.071,0.016,0.004,0.016,0.026,0.016,0.004}; int n=517; int X[n*n]; size_t size; FILE *fp = fopen("conf-init.bin", "rb"); size = fread(X, sizeof(int), n * n, fp); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp); int k=1; ising(X,weight,k,n); int checkX[n*n]; printf("k=1:\n"); FILE *fp2 = fopen("conf-1.bin", "rb"); size = fread(checkX, sizeof(int), n * n, fp2); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp2); bool flag=true; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX[i*n+j]!=X[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("k=4:\n"); k=4 ; int X2[n*n]; FILE *fpA = fopen("conf-init.bin", "rb"); size = fread(X2, sizeof(int), n * n, fpA); if(size!=n*n) exit(EXIT_FAILURE); fclose(fpA); int checkX2[n*n]; FILE *fp3 = fopen("conf-4.bin", "rb"); size = fread(checkX2, sizeof(int), n * n, fp3); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp3); ising(X2,weight,k,n); flag=true; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX2[i*n+j]!=X2[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("k=11:\n"); k=11 ; int X3[n*n]; FILE *fpB = fopen("conf-init.bin", "rb"); size = fread(X3, sizeof(int), n * n, fpB); if(size!=n*n) exit(EXIT_FAILURE); fclose(fpB); int checkX3[n*n]; FILE *fp4 = fopen("conf-11.bin", "rb"); size = fread(checkX3, sizeof(int), n * n, fp4); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp4); flag=true; ising(X3,weight,k,n); for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX3[i*n+j]!=X3[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("\n=========================END=========================="); return 0; }
.file "tmpxft_00085fcc_00000000-6_ising-v3.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi .type _Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi, @function _Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi: .LFB2083: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12ising_kernelPiS_Pdi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi, .-_Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi .globl _Z12ising_kernelPiS_Pdi .type _Z12ising_kernelPiS_Pdi, @function _Z12ising_kernelPiS_Pdi: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z12ising_kernelPiS_Pdi, .-_Z12ising_kernelPiS_Pdi .globl _Z5isingPiPdii .type _Z5isingPiPdii, @function _Z5isingPiPdii: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $88, %rsp .cfi_def_cfa_offset 144 movq %rdi, %r15 movq %rdi, 8(%rsp) movq %rsi, %rbp movl %edx, %r13d movl %ecx, %ebx movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl %ecx, %r14d imull %ecx, %r14d movslq %r14d, %r14 salq $2, %r14 leaq 24(%rsp), %rdi movl $1, %edx movq %r14, %rsi call cudaMallocManaged@PLT leaq 32(%rsp), %rdi movl $1, %edx movq %r14, %rsi call cudaMallocManaged@PLT leaq 40(%rsp), %rdi movl $1, %edx movl $200, %esi call cudaMallocManaged@PLT movl $1, %ecx movl $200, %edx movq %rbp, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %r14, %rdx movq %r15, %rsi movq 32(%rsp), %rdi call cudaMemcpy@PLT testl %r13d, %r13d jle .L12 movslq %ebx, %rbp salq $2, %rbp movl $0, %r12d movl $1, %r15d movq %r14, (%rsp) jmp .L19 .L28: movl %ebx, %ecx movq 40(%rsp), %rdx movq 24(%rsp), %rsi movq 32(%rsp), %rdi call _Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi jmp .L13 .L16: movq %rax, %rsi addq 24(%rsp), %rsi movl (%rsi), %r11d movq 32(%rsp), %r14 movl (%r14,%rax), %r14d movl %r14d, (%rsi) movq 32(%rsp), %rsi movl %r11d, (%rsi,%rax) addq $4, %rax addl $1, %edx cmpl %r9d, %edx je .L26 .L17: testb %r8b, %r8b je .L16 movslq %edx, %rsi movq 24(%rsp), %r11 movq 32(%rsp), %r8 movl (%r8,%rsi,4), %r8d cmpl %r8d, (%r11,%rsi,4) sete %r8b jmp .L16 .L26: addl $1, %ecx addl %ebx, %edi addl %ebx, %r9d addq %rbp, %r10 cmpl %ecx, %ebx je .L18 .L15: movl %edi, %edx movq %r10, %rax jmp .L17 .L18: testb %r8b, %r8b jne .L24 addl $1, %r12d cmpl %r12d, %r13d je .L27 .L19: movl $8, 48(%rsp) movl $8, 52(%rsp) movl $1, 56(%rsp) movl $8, 60(%rsp) movl $8, 64(%rsp) movl $1, 68(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L28 .L13: call cudaDeviceSynchronize@PLT testl %ebx, %ebx jle .L23 movl %ebx, %r9d movl $0, %r10d movl $0, %edi movl $0, %ecx movl %r15d, %r8d jmp .L15 .L27: movq (%rsp), %r14 jmp .L12 .L23: movq (%rsp), %r14 .L12: movl $2, %ecx movq %r14, %rdx movq 32(%rsp), %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movq 72(%rsp), %rax subq %fs:40, %rax jne .L29 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L24: .cfi_restore_state movq (%rsp), %r14 jmp .L12 .L29: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z5isingPiPdii, .-_Z5isingPiPdii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "==========================START=============================\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC7: .string "rb" .LC8: .string "conf-init.bin" .LC9: .string "k=1:\n" .LC10: .string "conf-1.bin" .LC11: .string "\nWRONG IMPLEMENTATION\n" .LC12: .string "\nCORRECT IMPLEMENTATION\n" .LC13: .string "k=4:\n" .LC14: .string "conf-4.bin" .LC15: .string "k=11:\n" .LC16: .string "conf-11.bin" .section .rodata.str1.8 .align 8 .LC17: .string "\n=========================END==========================" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movq %rsp, %rbp .cfi_def_cfa_register 6 pushq %r13 pushq %r12 pushq %rbx subq $216, %rsp .cfi_offset 13, -24 .cfi_offset 12, -32 .cfi_offset 3, -40 movq %fs:40, %rax movq %rax, -40(%rbp) xorl %eax, %eax leaq .LC0(%rip), %rsi movl $2, %edi call __printf_chk@PLT movsd .LC1(%rip), %xmm1 movsd %xmm1, -240(%rbp) movsd .LC2(%rip), %xmm0 movsd %xmm0, -232(%rbp) movsd .LC3(%rip), %xmm2 movsd %xmm2, -224(%rbp) movsd %xmm0, -216(%rbp) movsd %xmm1, -208(%rbp) movsd %xmm0, -200(%rbp) movsd .LC4(%rip), %xmm3 movsd %xmm3, -192(%rbp) movsd .LC5(%rip), %xmm4 movsd %xmm4, -184(%rbp) movsd %xmm3, -176(%rbp) movsd %xmm0, -168(%rbp) movsd %xmm2, -160(%rbp) movsd %xmm4, -152(%rbp) movq $0x000000000, -144(%rbp) movsd %xmm4, -136(%rbp) movsd %xmm2, -128(%rbp) movsd %xmm0, -120(%rbp) movsd %xmm3, -112(%rbp) movsd %xmm4, -104(%rbp) movsd %xmm3, -96(%rbp) movsd %xmm0, -88(%rbp) movsd %xmm1, -80(%rbp) movsd %xmm0, -72(%rbp) movsd %xmm2, -64(%rbp) movsd %xmm0, -56(%rbp) movsd %xmm1, -48(%rbp) leaq -1069056(%rsp), %rax .L31: cmpq %rax, %rsp je .L32 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L31 .L32: subq $112, %rsp orq $0, 104(%rsp) movq %rsp, %rbx leaq .LC7(%rip), %rsi leaq .LC8(%rip), %rdi call fopen@PLT movq %rax, %r12 movq %rax, %r8 movl $267289, %ecx movl $4, %edx movl $1069156, %esi movq %rbx, %rdi call __fread_chk@PLT cmpq $267289, %rax jne .L75 movq %r12, %rdi call fclose@PLT leaq -240(%rbp), %rsi movl $517, %ecx movl $1, %edx movq %rbx, %rdi call _Z5isingPiPdii leaq -1069056(%rsp), %rax .L35: cmpq %rax, %rsp je .L36 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L35 .L75: movl $1, %edi call exit@PLT .L36: subq $112, %rsp orq $0, 104(%rsp) movq %rsp, %r12 leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC7(%rip), %rsi leaq .LC10(%rip), %rdi call fopen@PLT movq %rax, %r13 movq %rax, %r8 movl $267289, %ecx movl $4, %edx movl $1069156, %esi movq %r12, %rdi call __fread_chk@PLT cmpq $267289, %rax jne .L76 movq %r13, %rdi call fclose@PLT movl $2068, %edx .L39: leaq -2068(%rdx), %rax .L42: movl (%rbx,%rax), %ecx cmpl %ecx, (%r12,%rax) jne .L77 addq $4, %rax cmpq %rdx, %rax jne .L42 addq $2068, %rdx cmpq $1071224, %rdx jne .L39 leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L41 .L76: movl $1, %edi call exit@PLT .L77: leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L41: leaq .LC13(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq -1069056(%rsp), %rax .L44: cmpq %rax, %rsp je .L45 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L44 .L45: subq $112, %rsp orq $0, 104(%rsp) movq %rsp, %rbx leaq .LC7(%rip), %rsi leaq .LC8(%rip), %rdi call fopen@PLT movq %rax, %r12 movq %rax, %r8 movl $267289, %ecx movl $4, %edx movl $1069156, %esi movq %rbx, %rdi call __fread_chk@PLT cmpq $267289, %rax jne .L78 movq %r12, %rdi call fclose@PLT leaq -1069056(%rsp), %rax .L48: cmpq %rax, %rsp je .L49 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L48 .L78: movl $1, %edi call exit@PLT .L49: subq $112, %rsp orq $0, 104(%rsp) movq %rsp, %r12 leaq .LC7(%rip), %rsi leaq .LC14(%rip), %rdi call fopen@PLT movq %rax, %r13 movq %rax, %r8 movl $267289, %ecx movl $4, %edx movl $1069156, %esi movq %r12, %rdi call __fread_chk@PLT cmpq $267289, %rax jne .L79 movq %r13, %rdi call fclose@PLT leaq -240(%rbp), %rsi movl $517, %ecx movl $4, %edx movq %rbx, %rdi call _Z5isingPiPdii movl $2068, %edx .L52: leaq -2068(%rdx), %rax .L55: movl (%rbx,%rax), %edi cmpl %edi, (%r12,%rax) jne .L80 addq $4, %rax cmpq %rdx, %rax jne .L55 addq $2068, %rdx cmpq $1071224, %rdx jne .L52 leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L54 .L79: movl $1, %edi call exit@PLT .L80: leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L54: leaq .LC15(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq -1069056(%rsp), %rax .L57: cmpq %rax, %rsp je .L58 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L57 .L58: subq $112, %rsp orq $0, 104(%rsp) movq %rsp, %rbx leaq .LC7(%rip), %rsi leaq .LC8(%rip), %rdi call fopen@PLT movq %rax, %r12 movq %rax, %r8 movl $267289, %ecx movl $4, %edx movl $1069156, %esi movq %rbx, %rdi call __fread_chk@PLT cmpq $267289, %rax jne .L81 movq %r12, %rdi call fclose@PLT leaq -1069056(%rsp), %rax .L61: cmpq %rax, %rsp je .L62 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L61 .L81: movl $1, %edi call exit@PLT .L62: subq $112, %rsp orq $0, 104(%rsp) movq %rsp, %r12 leaq .LC7(%rip), %rsi leaq .LC16(%rip), %rdi call fopen@PLT movq %rax, %r13 movq %rax, %r8 movl $267289, %ecx movl $4, %edx movl $1069156, %esi movq %r12, %rdi call __fread_chk@PLT cmpq $267289, %rax jne .L82 movq %r13, %rdi call fclose@PLT leaq -240(%rbp), %rsi movl $517, %ecx movl $11, %edx movq %rbx, %rdi call _Z5isingPiPdii movl $2068, %edx .L65: leaq -2068(%rdx), %rax .L68: movl (%rbx,%rax), %esi cmpl %esi, (%r12,%rax) jne .L83 addq $4, %rax cmpq %rax, %rdx jne .L68 addq $2068, %rdx cmpq $1071224, %rdx jne .L65 leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L67 .L82: movl $1, %edi call exit@PLT .L83: leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L67: leaq .LC17(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq -40(%rbp), %rax subq %fs:40, %rax jne .L84 movl $0, %eax leaq -24(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %rbp .cfi_remember_state .cfi_def_cfa 7, 8 ret .L84: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.1 .LC18: .string "_Z12ising_kernelPiS_Pdi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC18(%rip), %rdx movq %rdx, %rcx leaq _Z12ising_kernelPiS_Pdi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long -755914244 .long 1064329805 .align 8 .LC2: .long -755914244 .long 1066426957 .align 8 .LC3: .long 1992864825 .long 1067098046 .align 8 .LC4: .long 1443109011 .long 1068641550 .align 8 .LC5: .long 1168231105 .long 1069413302 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #define BLOCK_SIZE 8 #define GRID_SIZE 8 //struct timespec start, finish; //double elapsed; __global__ void ising_kernel(int *G,int *newG,double *w,int n){ int x,y; __shared__ double shared_w[25]; __shared__ int shared_G[(BLOCK_SIZE+4)*(BLOCK_SIZE+4)]; for(int i=0;i<25;i++){ shared_w[i]=w[i]; } unsigned int xBlock = blockDim.x * blockIdx.x; unsigned int yBlock = blockDim.y * blockIdx.y; unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; unsigned int tempX = xBlock + threadIdx.x; unsigned int tempY = yBlock + threadIdx.y; int iterations; if (n%(BLOCK_SIZE*GRID_SIZE)==0){ iterations=n/(BLOCK_SIZE*GRID_SIZE); }else{ iterations=n/(BLOCK_SIZE*GRID_SIZE)+1; } for(int i=0;i<iterations;i++){ xIndex=tempX+GRID_SIZE*BLOCK_SIZE*(i); for(int j=0;j<iterations;j++){ yIndex=tempY+GRID_SIZE*BLOCK_SIZE*(j); shared_G[(threadIdx.x+2)*(BLOCK_SIZE+4)+threadIdx.y+2]=G[((xIndex+n)%n)*n+(yIndex+n)%n]; if(threadIdx.x==0){ if (threadIdx.y==0){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2-k)*(BLOCK_SIZE+4)+(2-l)]=G[((xIndex-k+n)%n)*n+(yIndex-l+n)%n]; } } } }else if(threadIdx.y==BLOCK_SIZE-1){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2-k)*(BLOCK_SIZE+4)+(2+l+threadIdx.y)]=G[((xIndex-k+n)%n)*n+(yIndex+l+n)%n]; } } } }else{ shared_G[(2-1)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex-1+n)%n)*n+(yIndex+n)%n]; shared_G[(2-2)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex-2+n)%n)*n+(yIndex+n)%n]; } }else if(threadIdx.x==BLOCK_SIZE-1){ if (threadIdx.y==0){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2+k+threadIdx.x)*(BLOCK_SIZE+4)+(2-l)]=G[((xIndex+k+n)%n)*n+(yIndex-l+n)%n]; } } } }else if(threadIdx.y==BLOCK_SIZE-1){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2+k+threadIdx.x)*(BLOCK_SIZE+4)+(2+l+threadIdx.y)]=G[((xIndex+k+n)%n)*n+(yIndex+l+n)%n]; } } } }else { shared_G[(2+1+threadIdx.x)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex+1+n)%n)*n+(yIndex+n)%n]; shared_G[(2+2+threadIdx.x)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex+2+n)%n)*n+(yIndex+n)%n]; } }else{ if(threadIdx.y==0){ shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2-1)]=G[((xIndex+n)%n)*n+(yIndex-1+n)%n]; shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2-2)]=G[((xIndex+n)%n)*n+(yIndex-2+n)%n]; }else if(threadIdx.y==BLOCK_SIZE-1){ shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2+1+threadIdx.y)]=G[((xIndex+n)%n)*n+(yIndex+1+n)%n]; shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2+2+threadIdx.y)]=G[((xIndex+n)%n)*n+(yIndex+2+n)%n]; } } __syncthreads(); if(xIndex<n&&yIndex<n){ double weight=0; //printf("BLOCK IDX X: %d\n",blockIdx.x); //printf("BLOCK IDX Y: %d\n",blockIdx.y); for(int ibor=-2;ibor<3;ibor++){ for(int jbor=-2;jbor<3;jbor++){ weight+=shared_w[(ibor+2)*5+jbor+2]*shared_G[(threadIdx.x+2+ibor)*(BLOCK_SIZE+4) +(threadIdx.y+2+jbor) ]; } } if(weight<1e-4&&weight>-(1e-4)){ // newG[xIndex*n+yIndex]=G[xIndex*n+yIndex]; newG[xIndex*n+yIndex]=shared_G[(threadIdx.x+2)*(BLOCK_SIZE+4)+threadIdx.y+2]; }else if(weight>0){ newG[xIndex*n+yIndex]=1; }else{ newG[xIndex*n+yIndex]=-1; } } } } } void ising( int *G, double *w, int k, int n){ int *newG,*swapG,*G2; double *w2; cudaMallocManaged(&newG,n*n*sizeof(int)); cudaMallocManaged(&G2,n*n*sizeof(int)); cudaMallocManaged(&w2,25*sizeof(double)); cudaMemcpy( w2, w, 25*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy( G2, G, n*n*sizeof(int),cudaMemcpyHostToDevice); double total_time=0; for(int iter=0;iter<k;iter++){ int grid_dimension; bool repeat=true; dim3 grid(GRID_SIZE, GRID_SIZE); dim3 block(BLOCK_SIZE, BLOCK_SIZE); // struct timeval startwtime, endwtime; // gettimeofday (&startwtime, NULL); ising_kernel<<<grid,block>>>(G2,newG,w2,n); cudaDeviceSynchronize(); // gettimeofday (&endwtime, NULL); // double time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); // total_time+=time; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(repeat&&newG[i*n+j]!=G2[i*n+j]){ repeat=false; } int temp=newG[i*n+j]; newG[i*n+j]=G2[i*n+j]; G2[i*n+j]=temp; } } if(repeat){ break; } } cudaMemcpy(G, G2, n*n*sizeof(int),cudaMemcpyDeviceToHost); // printf("Seconds are %lf ",total_time); } int main() { printf("==========================START=============================\n"); double weight[]={0.004,0.016,0.026,0.016,0.004,0.016,0.071,0.117,0.071,0.016,0.026,0.117,0,0.117,0.026,0.016,0.071,0.117,0.071,0.016,0.004,0.016,0.026,0.016,0.004}; int n=517; int X[n*n]; size_t size; FILE *fp = fopen("conf-init.bin", "rb"); size = fread(X, sizeof(int), n * n, fp); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp); int k=1; ising(X,weight,k,n); int checkX[n*n]; printf("k=1:\n"); FILE *fp2 = fopen("conf-1.bin", "rb"); size = fread(checkX, sizeof(int), n * n, fp2); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp2); bool flag=true; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX[i*n+j]!=X[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("k=4:\n"); k=4 ; int X2[n*n]; FILE *fpA = fopen("conf-init.bin", "rb"); size = fread(X2, sizeof(int), n * n, fpA); if(size!=n*n) exit(EXIT_FAILURE); fclose(fpA); int checkX2[n*n]; FILE *fp3 = fopen("conf-4.bin", "rb"); size = fread(checkX2, sizeof(int), n * n, fp3); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp3); ising(X2,weight,k,n); flag=true; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX2[i*n+j]!=X2[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("k=11:\n"); k=11 ; int X3[n*n]; FILE *fpB = fopen("conf-init.bin", "rb"); size = fread(X3, sizeof(int), n * n, fpB); if(size!=n*n) exit(EXIT_FAILURE); fclose(fpB); int checkX3[n*n]; FILE *fp4 = fopen("conf-11.bin", "rb"); size = fread(checkX3, sizeof(int), n * n, fp4); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp4); flag=true; ising(X3,weight,k,n); for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX3[i*n+j]!=X3[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("\n=========================END=========================="); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #define BLOCK_SIZE 8 #define GRID_SIZE 8 //struct timespec start, finish; //double elapsed; __global__ void ising_kernel(int *G,int *newG,double *w,int n){ int x,y; __shared__ double shared_w[25]; __shared__ int shared_G[(BLOCK_SIZE+4)*(BLOCK_SIZE+4)]; for(int i=0;i<25;i++){ shared_w[i]=w[i]; } unsigned int xBlock = blockDim.x * blockIdx.x; unsigned int yBlock = blockDim.y * blockIdx.y; unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; unsigned int tempX = xBlock + threadIdx.x; unsigned int tempY = yBlock + threadIdx.y; int iterations; if (n%(BLOCK_SIZE*GRID_SIZE)==0){ iterations=n/(BLOCK_SIZE*GRID_SIZE); }else{ iterations=n/(BLOCK_SIZE*GRID_SIZE)+1; } for(int i=0;i<iterations;i++){ xIndex=tempX+GRID_SIZE*BLOCK_SIZE*(i); for(int j=0;j<iterations;j++){ yIndex=tempY+GRID_SIZE*BLOCK_SIZE*(j); shared_G[(threadIdx.x+2)*(BLOCK_SIZE+4)+threadIdx.y+2]=G[((xIndex+n)%n)*n+(yIndex+n)%n]; if(threadIdx.x==0){ if (threadIdx.y==0){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2-k)*(BLOCK_SIZE+4)+(2-l)]=G[((xIndex-k+n)%n)*n+(yIndex-l+n)%n]; } } } }else if(threadIdx.y==BLOCK_SIZE-1){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2-k)*(BLOCK_SIZE+4)+(2+l+threadIdx.y)]=G[((xIndex-k+n)%n)*n+(yIndex+l+n)%n]; } } } }else{ shared_G[(2-1)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex-1+n)%n)*n+(yIndex+n)%n]; shared_G[(2-2)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex-2+n)%n)*n+(yIndex+n)%n]; } }else if(threadIdx.x==BLOCK_SIZE-1){ if (threadIdx.y==0){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2+k+threadIdx.x)*(BLOCK_SIZE+4)+(2-l)]=G[((xIndex+k+n)%n)*n+(yIndex-l+n)%n]; } } } }else if(threadIdx.y==BLOCK_SIZE-1){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2+k+threadIdx.x)*(BLOCK_SIZE+4)+(2+l+threadIdx.y)]=G[((xIndex+k+n)%n)*n+(yIndex+l+n)%n]; } } } }else { shared_G[(2+1+threadIdx.x)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex+1+n)%n)*n+(yIndex+n)%n]; shared_G[(2+2+threadIdx.x)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex+2+n)%n)*n+(yIndex+n)%n]; } }else{ if(threadIdx.y==0){ shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2-1)]=G[((xIndex+n)%n)*n+(yIndex-1+n)%n]; shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2-2)]=G[((xIndex+n)%n)*n+(yIndex-2+n)%n]; }else if(threadIdx.y==BLOCK_SIZE-1){ shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2+1+threadIdx.y)]=G[((xIndex+n)%n)*n+(yIndex+1+n)%n]; shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2+2+threadIdx.y)]=G[((xIndex+n)%n)*n+(yIndex+2+n)%n]; } } __syncthreads(); if(xIndex<n&&yIndex<n){ double weight=0; //printf("BLOCK IDX X: %d\n",blockIdx.x); //printf("BLOCK IDX Y: %d\n",blockIdx.y); for(int ibor=-2;ibor<3;ibor++){ for(int jbor=-2;jbor<3;jbor++){ weight+=shared_w[(ibor+2)*5+jbor+2]*shared_G[(threadIdx.x+2+ibor)*(BLOCK_SIZE+4) +(threadIdx.y+2+jbor) ]; } } if(weight<1e-4&&weight>-(1e-4)){ // newG[xIndex*n+yIndex]=G[xIndex*n+yIndex]; newG[xIndex*n+yIndex]=shared_G[(threadIdx.x+2)*(BLOCK_SIZE+4)+threadIdx.y+2]; }else if(weight>0){ newG[xIndex*n+yIndex]=1; }else{ newG[xIndex*n+yIndex]=-1; } } } } } void ising( int *G, double *w, int k, int n){ int *newG,*swapG,*G2; double *w2; hipMallocManaged(&newG,n*n*sizeof(int)); hipMallocManaged(&G2,n*n*sizeof(int)); hipMallocManaged(&w2,25*sizeof(double)); hipMemcpy( w2, w, 25*sizeof(double),hipMemcpyHostToDevice); hipMemcpy( G2, G, n*n*sizeof(int),hipMemcpyHostToDevice); double total_time=0; for(int iter=0;iter<k;iter++){ int grid_dimension; bool repeat=true; dim3 grid(GRID_SIZE, GRID_SIZE); dim3 block(BLOCK_SIZE, BLOCK_SIZE); // struct timeval startwtime, endwtime; // gettimeofday (&startwtime, NULL); ising_kernel<<<grid,block>>>(G2,newG,w2,n); hipDeviceSynchronize(); // gettimeofday (&endwtime, NULL); // double time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); // total_time+=time; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(repeat&&newG[i*n+j]!=G2[i*n+j]){ repeat=false; } int temp=newG[i*n+j]; newG[i*n+j]=G2[i*n+j]; G2[i*n+j]=temp; } } if(repeat){ break; } } hipMemcpy(G, G2, n*n*sizeof(int),hipMemcpyDeviceToHost); // printf("Seconds are %lf ",total_time); } int main() { printf("==========================START=============================\n"); double weight[]={0.004,0.016,0.026,0.016,0.004,0.016,0.071,0.117,0.071,0.016,0.026,0.117,0,0.117,0.026,0.016,0.071,0.117,0.071,0.016,0.004,0.016,0.026,0.016,0.004}; int n=517; int X[n*n]; size_t size; FILE *fp = fopen("conf-init.bin", "rb"); size = fread(X, sizeof(int), n * n, fp); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp); int k=1; ising(X,weight,k,n); int checkX[n*n]; printf("k=1:\n"); FILE *fp2 = fopen("conf-1.bin", "rb"); size = fread(checkX, sizeof(int), n * n, fp2); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp2); bool flag=true; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX[i*n+j]!=X[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("k=4:\n"); k=4 ; int X2[n*n]; FILE *fpA = fopen("conf-init.bin", "rb"); size = fread(X2, sizeof(int), n * n, fpA); if(size!=n*n) exit(EXIT_FAILURE); fclose(fpA); int checkX2[n*n]; FILE *fp3 = fopen("conf-4.bin", "rb"); size = fread(checkX2, sizeof(int), n * n, fp3); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp3); ising(X2,weight,k,n); flag=true; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX2[i*n+j]!=X2[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("k=11:\n"); k=11 ; int X3[n*n]; FILE *fpB = fopen("conf-init.bin", "rb"); size = fread(X3, sizeof(int), n * n, fpB); if(size!=n*n) exit(EXIT_FAILURE); fclose(fpB); int checkX3[n*n]; FILE *fp4 = fopen("conf-11.bin", "rb"); size = fread(checkX3, sizeof(int), n * n, fp4); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp4); flag=true; ising(X3,weight,k,n); for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX3[i*n+j]!=X3[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("\n=========================END=========================="); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #define BLOCK_SIZE 8 #define GRID_SIZE 8 //struct timespec start, finish; //double elapsed; __global__ void ising_kernel(int *G,int *newG,double *w,int n){ int x,y; __shared__ double shared_w[25]; __shared__ int shared_G[(BLOCK_SIZE+4)*(BLOCK_SIZE+4)]; for(int i=0;i<25;i++){ shared_w[i]=w[i]; } unsigned int xBlock = blockDim.x * blockIdx.x; unsigned int yBlock = blockDim.y * blockIdx.y; unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; unsigned int tempX = xBlock + threadIdx.x; unsigned int tempY = yBlock + threadIdx.y; int iterations; if (n%(BLOCK_SIZE*GRID_SIZE)==0){ iterations=n/(BLOCK_SIZE*GRID_SIZE); }else{ iterations=n/(BLOCK_SIZE*GRID_SIZE)+1; } for(int i=0;i<iterations;i++){ xIndex=tempX+GRID_SIZE*BLOCK_SIZE*(i); for(int j=0;j<iterations;j++){ yIndex=tempY+GRID_SIZE*BLOCK_SIZE*(j); shared_G[(threadIdx.x+2)*(BLOCK_SIZE+4)+threadIdx.y+2]=G[((xIndex+n)%n)*n+(yIndex+n)%n]; if(threadIdx.x==0){ if (threadIdx.y==0){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2-k)*(BLOCK_SIZE+4)+(2-l)]=G[((xIndex-k+n)%n)*n+(yIndex-l+n)%n]; } } } }else if(threadIdx.y==BLOCK_SIZE-1){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2-k)*(BLOCK_SIZE+4)+(2+l+threadIdx.y)]=G[((xIndex-k+n)%n)*n+(yIndex+l+n)%n]; } } } }else{ shared_G[(2-1)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex-1+n)%n)*n+(yIndex+n)%n]; shared_G[(2-2)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex-2+n)%n)*n+(yIndex+n)%n]; } }else if(threadIdx.x==BLOCK_SIZE-1){ if (threadIdx.y==0){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2+k+threadIdx.x)*(BLOCK_SIZE+4)+(2-l)]=G[((xIndex+k+n)%n)*n+(yIndex-l+n)%n]; } } } }else if(threadIdx.y==BLOCK_SIZE-1){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2+k+threadIdx.x)*(BLOCK_SIZE+4)+(2+l+threadIdx.y)]=G[((xIndex+k+n)%n)*n+(yIndex+l+n)%n]; } } } }else { shared_G[(2+1+threadIdx.x)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex+1+n)%n)*n+(yIndex+n)%n]; shared_G[(2+2+threadIdx.x)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex+2+n)%n)*n+(yIndex+n)%n]; } }else{ if(threadIdx.y==0){ shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2-1)]=G[((xIndex+n)%n)*n+(yIndex-1+n)%n]; shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2-2)]=G[((xIndex+n)%n)*n+(yIndex-2+n)%n]; }else if(threadIdx.y==BLOCK_SIZE-1){ shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2+1+threadIdx.y)]=G[((xIndex+n)%n)*n+(yIndex+1+n)%n]; shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2+2+threadIdx.y)]=G[((xIndex+n)%n)*n+(yIndex+2+n)%n]; } } __syncthreads(); if(xIndex<n&&yIndex<n){ double weight=0; //printf("BLOCK IDX X: %d\n",blockIdx.x); //printf("BLOCK IDX Y: %d\n",blockIdx.y); for(int ibor=-2;ibor<3;ibor++){ for(int jbor=-2;jbor<3;jbor++){ weight+=shared_w[(ibor+2)*5+jbor+2]*shared_G[(threadIdx.x+2+ibor)*(BLOCK_SIZE+4) +(threadIdx.y+2+jbor) ]; } } if(weight<1e-4&&weight>-(1e-4)){ // newG[xIndex*n+yIndex]=G[xIndex*n+yIndex]; newG[xIndex*n+yIndex]=shared_G[(threadIdx.x+2)*(BLOCK_SIZE+4)+threadIdx.y+2]; }else if(weight>0){ newG[xIndex*n+yIndex]=1; }else{ newG[xIndex*n+yIndex]=-1; } } } } } void ising( int *G, double *w, int k, int n){ int *newG,*swapG,*G2; double *w2; hipMallocManaged(&newG,n*n*sizeof(int)); hipMallocManaged(&G2,n*n*sizeof(int)); hipMallocManaged(&w2,25*sizeof(double)); hipMemcpy( w2, w, 25*sizeof(double),hipMemcpyHostToDevice); hipMemcpy( G2, G, n*n*sizeof(int),hipMemcpyHostToDevice); double total_time=0; for(int iter=0;iter<k;iter++){ int grid_dimension; bool repeat=true; dim3 grid(GRID_SIZE, GRID_SIZE); dim3 block(BLOCK_SIZE, BLOCK_SIZE); // struct timeval startwtime, endwtime; // gettimeofday (&startwtime, NULL); ising_kernel<<<grid,block>>>(G2,newG,w2,n); hipDeviceSynchronize(); // gettimeofday (&endwtime, NULL); // double time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); // total_time+=time; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(repeat&&newG[i*n+j]!=G2[i*n+j]){ repeat=false; } int temp=newG[i*n+j]; newG[i*n+j]=G2[i*n+j]; G2[i*n+j]=temp; } } if(repeat){ break; } } hipMemcpy(G, G2, n*n*sizeof(int),hipMemcpyDeviceToHost); // printf("Seconds are %lf ",total_time); } int main() { printf("==========================START=============================\n"); double weight[]={0.004,0.016,0.026,0.016,0.004,0.016,0.071,0.117,0.071,0.016,0.026,0.117,0,0.117,0.026,0.016,0.071,0.117,0.071,0.016,0.004,0.016,0.026,0.016,0.004}; int n=517; int X[n*n]; size_t size; FILE *fp = fopen("conf-init.bin", "rb"); size = fread(X, sizeof(int), n * n, fp); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp); int k=1; ising(X,weight,k,n); int checkX[n*n]; printf("k=1:\n"); FILE *fp2 = fopen("conf-1.bin", "rb"); size = fread(checkX, sizeof(int), n * n, fp2); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp2); bool flag=true; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX[i*n+j]!=X[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("k=4:\n"); k=4 ; int X2[n*n]; FILE *fpA = fopen("conf-init.bin", "rb"); size = fread(X2, sizeof(int), n * n, fpA); if(size!=n*n) exit(EXIT_FAILURE); fclose(fpA); int checkX2[n*n]; FILE *fp3 = fopen("conf-4.bin", "rb"); size = fread(checkX2, sizeof(int), n * n, fp3); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp3); ising(X2,weight,k,n); flag=true; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX2[i*n+j]!=X2[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("k=11:\n"); k=11 ; int X3[n*n]; FILE *fpB = fopen("conf-init.bin", "rb"); size = fread(X3, sizeof(int), n * n, fpB); if(size!=n*n) exit(EXIT_FAILURE); fclose(fpB); int checkX3[n*n]; FILE *fp4 = fopen("conf-11.bin", "rb"); size = fread(checkX3, sizeof(int), n * n, fp4); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp4); flag=true; ising(X3,weight,k,n); for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX3[i*n+j]!=X3[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("\n=========================END=========================="); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12ising_kernelPiS_Pdi .globl _Z12ising_kernelPiS_Pdi .p2align 8 .type _Z12ising_kernelPiS_Pdi,@function _Z12ising_kernelPiS_Pdi: s_load_b64 s[2:3], s[0:1], 0x10 s_movk_i32 s4, 0xff38 .LBB0_1: s_waitcnt lgkmcnt(0) s_load_b64 s[6:7], s[2:3], 0x0 v_mov_b32_e32 v3, s4 s_add_i32 s4, s4, 8 s_add_u32 s2, s2, 8 s_addc_u32 s3, s3, 0 s_cmp_eq_u32 s4, 0 s_waitcnt lgkmcnt(0) v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7 ds_store_b64 v3, v[1:2] offset:776 s_cbranch_scc0 .LBB0_1 s_load_b32 s10, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_and_b32 s2, s10, 63 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lg_u32 s2, 0 s_mov_b32 s2, 0 s_cbranch_scc0 .LBB0_81 s_ashr_i32 s3, s10, 31 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_lshr_b32 s3, s3, 26 s_add_i32 s3, s10, s3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_ashr_i32 s3, s3, 6 s_add_i32 s11, s3, 1 s_and_not1_b32 vcc_lo, exec_lo, s2 s_cbranch_vccnz .LBB0_5 .LBB0_4: s_ashr_i32 s11, s10, 6 .LBB0_5: s_delay_alu instid0(SALU_CYCLE_1) s_cmp_lt_i32 s11, 1 s_cbranch_scc1 .LBB0_80 s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b128 s[4:7], s[0:1], 0x0 v_cvt_f32_u32_e32 v2, s10 v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v7, v0, 10, 10 s_sub_i32 s16, 0, s10 s_add_i32 s12, s10, 1 v_rcp_iflag_f32_e32 v0, v2 v_mul_u32_u24_e32 v4, 12, v1 v_lshlrev_b32_e32 v22, 2, v7 v_add_nc_u32_e32 v6, 2, v7 s_add_i32 s13, s10, 2 s_mov_b32 s17, 0 v_add_lshl_u32 v15, v4, v7, 2 v_lshl_add_u32 v17, v4, 2, 0x64 v_mov_b32_e32 v4, 0 v_mad_u32_u24 v5, v1, 12, 24 v_mad_u32_u24 v16, v1, 48, v22 v_add_nc_u32_e32 v9, 0x98, v15 v_add_nc_u32_e32 v10, 0xc8, v15 s_waitcnt lgkmcnt(0) s_lshr_b32 s0, s2, 16 s_and_b32 s1, 0xffff, s2 s_and_b32 s0, 0xffff, s0 v_mad_u64_u32 v[2:3], null, s14, s1, v[1:2] s_mul_i32 s0, s15, s0 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_4) v_dual_mul_f32 v0, 0x4f7ffffe, v0 :: v_dual_add_nc_u32 v11, s0, v7 v_add_lshl_u32 v8, v6, v5, 2 v_add_nc_u32_e32 v12, 56, v22 v_lshlrev_b32_e32 v13, 2, v6 v_cvt_u32_f32_e32 v0, v0 v_add_nc_u32_e32 v14, 0x6c, v15 v_add_nc_u32_e32 v15, 0x70, v15 v_lshlrev_b32_e32 v18, 2, v5 v_add3_u32 v19, s0, s10, v7 v_mul_lo_u32 v3, s16, v0 v_add_nc_u32_e32 v20, 0x68, v16 v_mad_u32_u24 v21, v1, 48, 0x68 v_add_nc_u32_e32 v22, 0x68, v22 s_add_i32 s14, s10, -1 s_add_i32 s15, s10, -2 s_mov_b32 s3, 0x3f1a36e2 s_mov_b32 s2, 0xeb1c432d v_mul_hi_u32 v3, v0, v3 s_mov_b32 s9, 0xbf1a36e2 s_delay_alu instid0(VALU_DEP_1) v_add_nc_u32_e32 v23, v0, v3 s_branch .LBB0_8 .LBB0_7: s_add_i32 s17, s17, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s17, s11 s_cbranch_scc1 .LBB0_80 .LBB0_8: v_lshl_add_u32 v0, s17, 6, v2 s_mov_b32 s18, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_dual_mov_b32 v33, v19 :: v_dual_add_nc_u32 v24, s10, v0 v_add_nc_u32_e32 v5, s12, v0 v_mul_lo_u32 v30, v0, s10 v_mul_hi_u32 v3, v24, v23 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_hi_u32 v25, v5, v23 v_mul_lo_u32 v3, v3, s10 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_mul_lo_u32 v28, v25, s10 v_add_nc_u32_e32 v25, s14, v0 v_sub_nc_u32_e32 v3, v24, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v26, s10, v3 v_cmp_le_u32_e32 vcc_lo, s10, v3 v_cndmask_b32_e32 v3, v3, v26, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v26, s10, v3 v_cmp_le_u32_e32 vcc_lo, s10, v3 v_dual_cndmask_b32 v3, v3, v26 :: v_dual_add_nc_u32 v6, s13, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_mul_hi_u32 v27, v6, v23 v_add_nc_u32_e32 v26, s15, v0 v_cmp_gt_u32_e32 vcc_lo, s10, v0 v_mul_lo_u32 v29, v27, s10 v_sub_nc_u32_e32 v27, v5, v28 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v31, s10, v27 v_sub_nc_u32_e32 v28, v6, v29 v_mul_lo_u32 v29, v3, s10 s_delay_alu instid0(VALU_DEP_2) v_subrev_nc_u32_e32 v32, s10, v28 s_branch .LBB0_11 .LBB0_9: s_or_b32 exec_lo, exec_lo, s0 v_add_nc_u32_e32 v3, v34, v30 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[5:6], 2, v[3:4] v_add_co_u32 v5, s0, s6, v5 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v6, s0, s7, v6, s0 s_waitcnt lgkmcnt(0) global_store_b32 v[5:6], v0, off .LBB0_10: s_or_b32 exec_lo, exec_lo, s19 v_add_nc_u32_e32 v33, 64, v33 s_add_i32 s18, s18, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s18, s11 s_cbranch_scc1 .LBB0_7 .LBB0_11: v_lshl_add_u32 v34, s18, 6, v11 s_mov_b32 s1, 0 s_mov_b32 s19, 0 s_mov_b32 s8, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v0, s10, v34 v_mul_hi_u32 v3, v0, v23 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v3, v3, s10 v_sub_nc_u32_e32 v0, v0, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_subrev_nc_u32_e32 v3, s10, v0 v_cmp_le_u32_e64 s0, s10, v0 v_cndmask_b32_e64 v0, v0, v3, s0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_subrev_nc_u32_e32 v3, s10, v0 v_cmp_le_u32_e64 s0, s10, v0 v_cndmask_b32_e64 v0, v0, v3, s0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v3, v29, v0 v_lshlrev_b64 v[5:6], 2, v[3:4] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_co_u32 v5, s0, s4, v5 v_add_co_ci_u32_e64 v6, s0, s5, v6, s0 global_load_b32 v3, v[5:6], off s_waitcnt vmcnt(0) ds_store_b32 v8, v3 v_cmpx_lt_i32_e32 6, v1 s_xor_b32 s8, exec_lo, s8 s_cbranch_execz .LBB0_36 s_mov_b32 s20, -1 s_mov_b32 s19, exec_lo v_cmpx_eq_u32_e32 7, v1 s_cbranch_execz .LBB0_35 s_mov_b32 s20, 0 s_mov_b32 s22, 0 s_mov_b32 s21, exec_lo v_cmpx_lt_i32_e32 6, v7 s_xor_b32 s21, exec_lo, s21 s_cbranch_execz .LBB0_23 s_mov_b32 s23, -1 s_mov_b32 s22, exec_lo v_cmpx_eq_u32_e32 7, v7 s_cbranch_execz .LBB0_22 v_mov_b32_e32 v5, v20 s_mov_b32 s23, 0 s_branch .LBB0_17 .LBB0_16: s_set_inst_prefetch_distance 0x2 v_add_nc_u32_e32 v5, 48, v5 s_add_i32 s23, s23, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s23, 3 s_cbranch_scc1 .LBB0_21 .LBB0_17: v_add_nc_u32_e32 v3, s23, v24 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mov_b32_e32 v35, v5 s_mov_b32 s24, 0 v_mul_hi_u32 v6, v3, v23 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v6, v6, s10 v_sub_nc_u32_e32 v6, v3, v6 v_mov_b32_e32 v3, v33 s_delay_alu instid0(VALU_DEP_2) v_subrev_nc_u32_e32 v36, s10, v6 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_19 .p2align 6 .LBB0_18: v_add_nc_u32_e32 v3, 1, v3 v_add_nc_u32_e32 v35, 4, v35 s_add_i32 s24, s24, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s24, 3 s_cbranch_scc1 .LBB0_16 .LBB0_19: s_or_b32 s0, s24, s23 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s0, 0 s_cbranch_scc1 .LBB0_18 v_mul_hi_u32 v39, v3, v23 v_cmp_le_u32_e64 s0, s10, v6 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_not_b32_e32 v40, v39 v_mad_u64_u32 v[37:38], null, s16, v39, v[3:4] v_mad_u64_u32 v[38:39], null, s10, v40, v[3:4] s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e64 v39, v6, v36, s0 v_cmp_le_u32_e64 s0, s10, v37 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e64 v37, v37, v38, s0 v_subrev_nc_u32_e32 v38, s10, v39 v_cmp_le_u32_e64 s0, s10, v39 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v40, s10, v37 v_cndmask_b32_e64 v41, v39, v38, s0 v_cmp_le_u32_e64 s0, s10, v37 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e64 v37, v37, v40, s0 v_mad_u64_u32 v[38:39], null, v41, s10, v[37:38] v_mov_b32_e32 v39, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[37:38], 2, v[38:39] v_add_co_u32 v37, s0, s4, v37 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v38, s0, s5, v38, s0 global_load_b32 v37, v[37:38], off s_waitcnt vmcnt(0) ds_store_b32 v35, v37 s_branch .LBB0_18 .LBB0_21: s_xor_b32 s23, exec_lo, -1 .LBB0_22: s_or_b32 exec_lo, exec_lo, s22 s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 s22, s23, exec_lo .LBB0_23: s_and_not1_saveexec_b32 s21, s21 v_cmp_ne_u32_e64 s0, 0, v7 s_and_not1_b32 s22, s22, exec_lo s_mov_b32 s20, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s0, s0, exec_lo s_or_b32 s22, s22, s0 s_or_b32 exec_lo, exec_lo, s21 s_and_saveexec_b32 s0, s22 s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s21, exec_lo, s0 s_cbranch_execz .LBB0_27 v_cmp_le_u32_e64 s0, s10, v27 s_and_not1_b32 s20, s20, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e64 v3, v27, v31, s0 v_cmp_le_u32_e64 s0, s10, v28 v_subrev_nc_u32_e32 v6, s10, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e64 v5, v28, v32, s0 v_cmp_le_u32_e64 s0, s10, v3 v_subrev_nc_u32_e32 v35, s10, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cndmask_b32_e64 v3, v3, v6, s0 v_cmp_le_u32_e64 s0, s10, v5 v_cndmask_b32_e64 v37, v5, v35, s0 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_mad_u64_u32 v[5:6], null, v3, s10, v[0:1] v_mov_b32_e32 v6, 0 v_mad_u64_u32 v[35:36], null, v37, s10, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_mov_b32_e32 v36, v6 v_lshlrev_b64 v[5:6], 2, v[5:6] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[35:36], 2, v[35:36] v_add_co_u32 v5, s0, s4, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_ci_u32_e64 v6, s0, s5, v6, s0 v_add_co_u32 v35, s0, s4, v35 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v36, s0, s5, v36, s0 s_clause 0x1 global_load_b32 v3, v[5:6], off global_load_b32 v5, v[35:36], off s_waitcnt vmcnt(1) ds_store_b32 v9, v3 s_waitcnt vmcnt(0) ds_store_b32 v10, v5 .LBB0_27: s_or_b32 exec_lo, exec_lo, s21 s_and_saveexec_b32 s21, s20 s_cbranch_execz .LBB0_34 v_mov_b32_e32 v5, v21 s_mov_b32 s20, 0 s_branch .LBB0_30 .LBB0_29: s_set_inst_prefetch_distance 0x2 v_add_nc_u32_e32 v5, 48, v5 s_add_i32 s20, s20, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s20, 3 s_cbranch_scc1 .LBB0_34 .LBB0_30: v_add_nc_u32_e32 v3, s20, v24 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mov_b32_e32 v35, v5 s_mov_b32 s22, 0 v_mul_hi_u32 v6, v3, v23 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v6, v6, s10 v_sub_nc_u32_e32 v6, v3, v6 v_mov_b32_e32 v3, v33 s_delay_alu instid0(VALU_DEP_2) v_subrev_nc_u32_e32 v36, s10, v6 s_set_inst_prefetch_distance 0x1 s_branch .LBB0_32 .p2align 6 .LBB0_31: v_add_nc_u32_e32 v3, -1, v3 v_add_nc_u32_e32 v35, -4, v35 s_add_i32 s22, s22, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s22, 3 s_cbranch_scc1 .LBB0_29 .LBB0_32: s_or_b32 s0, s22, s20 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s0, 0 s_cbranch_scc1 .LBB0_31 v_mul_hi_u32 v39, v3, v23 v_cmp_le_u32_e64 s0, s10, v6 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_not_b32_e32 v40, v39 v_mad_u64_u32 v[37:38], null, s16, v39, v[3:4] v_mad_u64_u32 v[38:39], null, s10, v40, v[3:4] s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e64 v39, v6, v36, s0 v_cmp_le_u32_e64 s0, s10, v37 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e64 v37, v37, v38, s0 v_subrev_nc_u32_e32 v38, s10, v39 v_cmp_le_u32_e64 s0, s10, v39 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v40, s10, v37 v_cndmask_b32_e64 v41, v39, v38, s0 v_cmp_le_u32_e64 s0, s10, v37 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e64 v37, v37, v40, s0 v_mad_u64_u32 v[38:39], null, v41, s10, v[37:38] v_mov_b32_e32 v39, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[37:38], 2, v[38:39] v_add_co_u32 v37, s0, s4, v37 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v38, s0, s5, v38, s0 global_load_b32 v37, v[37:38], off s_waitcnt vmcnt(0) ds_store_b32 v35, v37 s_branch .LBB0_31 .LBB0_34: s_or_b32 exec_lo, exec_lo, s21 s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s20, exec_lo, -1 .LBB0_35: s_or_b32 exec_lo, exec_lo, s19 s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 s19, s20, exec_lo .LBB0_36: s_and_not1_saveexec_b32 s8, s8 v_cmp_ne_u32_e64 s0, 0, v1 s_and_not1_b32 s19, s19, exec_lo s_mov_b32 s1, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s0, s0, exec_lo s_or_b32 s19, s19, s0 s_or_b32 exec_lo, exec_lo, s8 s_and_saveexec_b32 s8, s19 s_cbranch_execz .LBB0_48 s_mov_b32 s19, exec_lo v_cmpx_lt_i32_e32 6, v7 s_xor_b32 s19, exec_lo, s19 s_cbranch_execz .LBB0_43 s_mov_b32 s20, exec_lo v_cmpx_eq_u32_e32 7, v7 s_cbranch_execz .LBB0_42 v_add_nc_u32_e32 v3, s12, v34 v_add_nc_u32_e32 v6, s13, v34 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_hi_u32 v5, v3, v23 v_mul_hi_u32 v35, v6, v23 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_lo_u32 v5, v5, s10 v_mul_lo_u32 v35, v35, s10 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v3, v3, v5 v_sub_nc_u32_e32 v6, v6, v35 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_subrev_nc_u32_e32 v5, s10, v3 v_cmp_le_u32_e64 s0, s10, v3 v_cndmask_b32_e64 v3, v3, v5, s0 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v5, s10, v6 v_cmp_le_u32_e64 s0, s10, v6 v_subrev_nc_u32_e32 v35, s10, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_cndmask_b32_e64 v36, v6, v5, s0 v_mov_b32_e32 v6, 0 v_cmp_le_u32_e64 s0, s10, v3 v_cndmask_b32_e64 v3, v3, v35, s0 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v35, s10, v36 v_cmp_le_u32_e64 s0, s10, v36 v_add_nc_u32_e32 v5, v3, v29 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e64 v3, v36, v35, s0 v_lshlrev_b64 v[35:36], 2, v[5:6] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v5, v3, v29 v_add_co_u32 v35, s0, s4, v35 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_lshlrev_b64 v[5:6], 2, v[5:6] v_add_co_ci_u32_e64 v36, s0, s5, v36, s0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_co_u32 v5, s0, s4, v5 v_add_co_ci_u32_e64 v6, s0, s5, v6, s0 s_clause 0x1 global_load_b32 v3, v[35:36], off global_load_b32 v5, v[5:6], off s_waitcnt vmcnt(1) ds_store_b32 v14, v3 s_waitcnt vmcnt(0) ds_store_b32 v15, v5 .LBB0_42: s_or_b32 exec_lo, exec_lo, s20 .LBB0_43: s_and_not1_saveexec_b32 s19, s19 s_cbranch_execz .LBB0_47 s_mov_b32 s20, exec_lo v_cmpx_eq_u32_e32 0, v7 s_cbranch_execz .LBB0_46 v_add_nc_u32_e32 v3, s14, v34 v_add_nc_u32_e32 v6, s15, v34 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_hi_u32 v5, v3, v23 v_mul_hi_u32 v35, v6, v23 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_lo_u32 v5, v5, s10 v_mul_lo_u32 v35, v35, s10 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v3, v3, v5 v_sub_nc_u32_e32 v6, v6, v35 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_subrev_nc_u32_e32 v5, s10, v3 v_cmp_le_u32_e64 s0, s10, v3 v_cndmask_b32_e64 v3, v3, v5, s0 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v5, s10, v6 v_cmp_le_u32_e64 s0, s10, v6 v_subrev_nc_u32_e32 v35, s10, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_cndmask_b32_e64 v36, v6, v5, s0 v_mov_b32_e32 v6, 0 v_cmp_le_u32_e64 s0, s10, v3 v_cndmask_b32_e64 v3, v3, v35, s0 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v35, s10, v36 v_cmp_le_u32_e64 s0, s10, v36 v_add_nc_u32_e32 v5, v3, v29 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e64 v3, v36, v35, s0 v_lshlrev_b64 v[35:36], 2, v[5:6] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_nc_u32_e32 v5, v3, v29 v_add_co_u32 v35, s0, s4, v35 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_lshlrev_b64 v[5:6], 2, v[5:6] v_add_co_ci_u32_e64 v36, s0, s5, v36, s0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_co_u32 v5, s0, s4, v5 v_add_co_ci_u32_e64 v6, s0, s5, v6, s0 s_clause 0x1 global_load_b32 v3, v[35:36], off global_load_b32 v5, v[5:6], off s_waitcnt vmcnt(1) ds_store_b32 v17, v3 s_waitcnt vmcnt(0) ds_store_b32 v18, v5 .LBB0_46: s_or_b32 exec_lo, exec_lo, s20 .LBB0_47: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s19 s_and_not1_b32 s1, s1, exec_lo .LBB0_48: s_or_b32 exec_lo, exec_lo, s8 s_and_saveexec_b32 s0, s1 s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s1, exec_lo, s0 s_cbranch_execz .LBB0_71 s_mov_b32 s8, 0 s_mov_b32 s20, 0 s_mov_b32 s19, exec_lo v_cmpx_lt_i32_e32 6, v7 s_xor_b32 s19, exec_lo, s19 s_cbranch_execz .LBB0_59 s_mov_b32 s21, -1 s_mov_b32 s20, exec_lo v_cmpx_eq_u32_e32 7, v7 s_cbranch_execz .LBB0_58 v_mov_b32_e32 v5, v22 s_mov_b32 s21, 0 s_branch .LBB0_53 .LBB0_52: v_subrev_nc_u32_e32 v5, 48, v5 s_add_i32 s21, s21, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s21, 3 s_cbranch_scc1 .LBB0_57 .LBB0_53: v_subrev_nc_u32_e32 v6, s21, v24 s_delay_alu instid0(VALU_DEP_2) v_mov_b32_e32 v35, v5 v_mov_b32_e32 v3, v33 s_mov_b32 s22, 0 s_branch .LBB0_55 .LBB0_54: v_add_nc_u32_e32 v3, 1, v3 v_add_nc_u32_e32 v35, 4, v35 s_add_i32 s22, s22, 1 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s22, 3 s_cbranch_scc1 .LBB0_52 .LBB0_55: s_or_b32 s0, s22, s21 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s0, 0 s_cbranch_scc1 .LBB0_54 v_mul_hi_u32 v36, v6, v23 v_mul_hi_u32 v38, v3, v23 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_lo_u32 v39, v36, s10 v_not_b32_e32 v40, v38 v_mad_u64_u32 v[36:37], null, s16, v38, v[3:4] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_mad_u64_u32 v[37:38], null, s10, v40, v[3:4] v_sub_nc_u32_e32 v39, v6, v39 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_le_u32_e64 s0, s10, v36 v_subrev_nc_u32_e32 v38, s10, v39 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cndmask_b32_e64 v36, v36, v37, s0 v_cmp_le_u32_e64 s0, s10, v39 v_cndmask_b32_e64 v37, v39, v38, s0 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v38, s10, v36 v_cmp_le_u32_e64 s0, s10, v36 v_subrev_nc_u32_e32 v39, s10, v37 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cndmask_b32_e64 v36, v36, v38, s0 v_cmp_le_u32_e64 s0, s10, v37 v_cndmask_b32_e64 v39, v37, v39, s0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[37:38], null, v39, s10, v[36:37] v_mov_b32_e32 v38, v4 v_lshlrev_b64 v[36:37], 2, v[37:38] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_co_u32 v36, s0, s4, v36 v_add_co_ci_u32_e64 v37, s0, s5, v37, s0 global_load_b32 v36, v[36:37], off s_waitcnt vmcnt(0) ds_store_b32 v35, v36 s_branch .LBB0_54 .LBB0_57: s_xor_b32 s21, exec_lo, -1 .LBB0_58: s_or_b32 exec_lo, exec_lo, s20 s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 s20, s21, exec_lo .LBB0_59: s_and_not1_saveexec_b32 s19, s19 v_cmp_ne_u32_e64 s0, 0, v7 s_and_not1_b32 s20, s20, exec_lo s_mov_b32 s8, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s0, s0, exec_lo s_or_b32 s20, s20, s0 s_or_b32 exec_lo, exec_lo, s19 s_and_saveexec_b32 s0, s20 s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s19, exec_lo, s0 s_cbranch_execz .LBB0_63 v_mul_hi_u32 v3, v25, v23 v_mul_hi_u32 v5, v26, v23 s_and_not1_b32 s8, s8, exec_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_lo_u32 v3, v3, s10 v_mul_lo_u32 v5, v5, s10 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_sub_nc_u32_e32 v3, v25, v3 v_sub_nc_u32_e32 v5, v26, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_subrev_nc_u32_e32 v6, s10, v3 v_cmp_le_u32_e64 s0, s10, v3 v_subrev_nc_u32_e32 v35, s10, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e64 v3, v3, v6, s0 v_cmp_le_u32_e64 s0, s10, v5 v_subrev_nc_u32_e32 v6, s10, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e64 v5, v5, v35, s0 v_cmp_le_u32_e64 s0, s10, v3 v_subrev_nc_u32_e32 v35, s10, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cndmask_b32_e64 v3, v3, v6, s0 v_cmp_le_u32_e64 s0, s10, v5 v_cndmask_b32_e64 v37, v5, v35, s0 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_mad_u64_u32 v[5:6], null, v3, s10, v[0:1] v_mov_b32_e32 v6, 0 v_mad_u64_u32 v[35:36], null, v37, s10, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_mov_b32_e32 v36, v6 v_lshlrev_b64 v[5:6], 2, v[5:6] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[35:36], 2, v[35:36] v_add_co_u32 v5, s0, s4, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_ci_u32_e64 v6, s0, s5, v6, s0 v_add_co_u32 v35, s0, s4, v35 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v36, s0, s5, v36, s0 s_clause 0x1 global_load_b32 v0, v[5:6], off global_load_b32 v3, v[35:36], off s_waitcnt vmcnt(1) ds_store_b32 v12, v0 s_waitcnt vmcnt(0) ds_store_b32 v13, v3 .LBB0_63: s_or_b32 exec_lo, exec_lo, s19 s_and_saveexec_b32 s19, s8 s_cbranch_execz .LBB0_70 s_mov_b32 s8, 0 s_movk_i32 s20, 0x68 s_branch .LBB0_66 .LBB0_65: s_add_i32 s8, s8, 1 s_sub_i32 s20, s20, 48 s_cmp_eq_u32 s8, 3 s_cbranch_scc1 .LBB0_70 .LBB0_66: v_subrev_nc_u32_e32 v3, s8, v24 v_mov_b32_e32 v0, v33 s_mov_b32 s21, s20 s_mov_b32 s22, 0 s_branch .LBB0_68 .LBB0_67: v_add_nc_u32_e32 v0, -1, v0 s_add_i32 s22, s22, 1 s_add_i32 s21, s21, -4 s_cmp_eq_u32 s22, 3 s_cbranch_scc1 .LBB0_65 .LBB0_68: s_or_b32 s0, s22, s8 s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s0, 0 s_cbranch_scc1 .LBB0_67 v_mul_hi_u32 v5, v3, v23 v_mul_hi_u32 v35, v0, v23 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_lo_u32 v36, v5, s10 v_not_b32_e32 v37, v35 v_mad_u64_u32 v[5:6], null, s16, v35, v[0:1] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_sub_nc_u32_e32 v6, v3, v36 v_mad_u64_u32 v[35:36], null, s10, v37, v[0:1] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_cmp_le_u32_e64 s0, s10, v5 v_subrev_nc_u32_e32 v36, s10, v6 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e64 v5, v5, v35, s0 v_cmp_le_u32_e64 s0, s10, v6 v_subrev_nc_u32_e32 v35, s10, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e64 v6, v6, v36, s0 v_cmp_le_u32_e64 s0, s10, v5 v_subrev_nc_u32_e32 v36, s10, v6 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cndmask_b32_e64 v5, v5, v35, s0 v_cmp_le_u32_e64 s0, s10, v6 v_cndmask_b32_e64 v6, v6, v36, s0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[35:36], null, v6, s10, v[5:6] v_mov_b32_e32 v36, v4 v_lshlrev_b64 v[5:6], 2, v[35:36] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_co_u32 v5, s0, s4, v5 v_add_co_ci_u32_e64 v6, s0, s5, v6, s0 global_load_b32 v5, v[5:6], off v_mov_b32_e32 v6, s21 s_waitcnt vmcnt(0) ds_store_b32 v6, v5 s_branch .LBB0_67 .LBB0_70: s_or_b32 exec_lo, exec_lo, s19 .LBB0_71: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s1 v_cmp_gt_u32_e64 s0, s10, v34 s_waitcnt lgkmcnt(0) s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv s_and_b32 s0, vcc_lo, s0 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s19, s0 s_cbranch_execz .LBB0_10 v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v0, v16 v_mov_b32_e32 v6, 0 s_mov_b32 s0, -2 s_movk_i32 s1, 0x240 .p2align 6 .LBB0_73: s_mov_b32 s8, 0 s_mov_b32 s20, s1 .LBB0_74: v_add_nc_u32_e32 v3, s8, v0 s_add_i32 s8, s8, 4 ds_load_b32 v3, v3 s_waitcnt lgkmcnt(0) v_cvt_f64_i32_e32 v[35:36], v3 v_mov_b32_e32 v3, s20 s_add_i32 s20, s20, 8 s_cmp_eq_u32 s8, 20 ds_load_b64 v[37:38], v3 s_waitcnt lgkmcnt(0) v_fma_f64 v[5:6], v[37:38], v[35:36], v[5:6] s_cbranch_scc0 .LBB0_74 v_add_nc_u32_e32 v0, 48, v0 s_add_i32 s0, s0, 1 s_add_i32 s1, s1, 40 s_cmp_eq_u32 s0, 3 s_cbranch_scc0 .LBB0_73 s_mov_b32 s8, s2 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cmp_ngt_f64_e64 s0, s[2:3], v[5:6] v_cmp_nlt_f64_e64 s1, s[8:9], v[5:6] s_or_b32 s0, s0, s1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_saveexec_b32 s1, s0 s_xor_b32 s1, exec_lo, s1 v_cmp_lt_f64_e64 s0, 0, v[5:6] s_delay_alu instid0(VALU_DEP_1) v_cndmask_b32_e64 v0, -1, 1, s0 s_and_not1_saveexec_b32 s0, s1 s_cbranch_execz .LBB0_9 ds_load_b32 v0, v8 s_branch .LBB0_9 .LBB0_80: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .LBB0_81: s_branch .LBB0_4 .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12ising_kernelPiS_Pdi .amdhsa_group_segment_fixed_size 776 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 42 .amdhsa_next_free_sgpr 25 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12ising_kernelPiS_Pdi, .Lfunc_end0-_Z12ising_kernelPiS_Pdi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 776 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12ising_kernelPiS_Pdi .private_segment_fixed_size: 0 .sgpr_count: 27 .sgpr_spill_count: 0 .symbol: _Z12ising_kernelPiS_Pdi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 42 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #define BLOCK_SIZE 8 #define GRID_SIZE 8 //struct timespec start, finish; //double elapsed; __global__ void ising_kernel(int *G,int *newG,double *w,int n){ int x,y; __shared__ double shared_w[25]; __shared__ int shared_G[(BLOCK_SIZE+4)*(BLOCK_SIZE+4)]; for(int i=0;i<25;i++){ shared_w[i]=w[i]; } unsigned int xBlock = blockDim.x * blockIdx.x; unsigned int yBlock = blockDim.y * blockIdx.y; unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; unsigned int tempX = xBlock + threadIdx.x; unsigned int tempY = yBlock + threadIdx.y; int iterations; if (n%(BLOCK_SIZE*GRID_SIZE)==0){ iterations=n/(BLOCK_SIZE*GRID_SIZE); }else{ iterations=n/(BLOCK_SIZE*GRID_SIZE)+1; } for(int i=0;i<iterations;i++){ xIndex=tempX+GRID_SIZE*BLOCK_SIZE*(i); for(int j=0;j<iterations;j++){ yIndex=tempY+GRID_SIZE*BLOCK_SIZE*(j); shared_G[(threadIdx.x+2)*(BLOCK_SIZE+4)+threadIdx.y+2]=G[((xIndex+n)%n)*n+(yIndex+n)%n]; if(threadIdx.x==0){ if (threadIdx.y==0){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2-k)*(BLOCK_SIZE+4)+(2-l)]=G[((xIndex-k+n)%n)*n+(yIndex-l+n)%n]; } } } }else if(threadIdx.y==BLOCK_SIZE-1){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2-k)*(BLOCK_SIZE+4)+(2+l+threadIdx.y)]=G[((xIndex-k+n)%n)*n+(yIndex+l+n)%n]; } } } }else{ shared_G[(2-1)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex-1+n)%n)*n+(yIndex+n)%n]; shared_G[(2-2)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex-2+n)%n)*n+(yIndex+n)%n]; } }else if(threadIdx.x==BLOCK_SIZE-1){ if (threadIdx.y==0){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2+k+threadIdx.x)*(BLOCK_SIZE+4)+(2-l)]=G[((xIndex+k+n)%n)*n+(yIndex-l+n)%n]; } } } }else if(threadIdx.y==BLOCK_SIZE-1){ for(int k=0;k<3;k++){ for(int l=0;l<3;l++){ if(!(k==0&&l==0)){ shared_G[(2+k+threadIdx.x)*(BLOCK_SIZE+4)+(2+l+threadIdx.y)]=G[((xIndex+k+n)%n)*n+(yIndex+l+n)%n]; } } } }else { shared_G[(2+1+threadIdx.x)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex+1+n)%n)*n+(yIndex+n)%n]; shared_G[(2+2+threadIdx.x)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex+2+n)%n)*n+(yIndex+n)%n]; } }else{ if(threadIdx.y==0){ shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2-1)]=G[((xIndex+n)%n)*n+(yIndex-1+n)%n]; shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2-2)]=G[((xIndex+n)%n)*n+(yIndex-2+n)%n]; }else if(threadIdx.y==BLOCK_SIZE-1){ shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2+1+threadIdx.y)]=G[((xIndex+n)%n)*n+(yIndex+1+n)%n]; shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2+2+threadIdx.y)]=G[((xIndex+n)%n)*n+(yIndex+2+n)%n]; } } __syncthreads(); if(xIndex<n&&yIndex<n){ double weight=0; //printf("BLOCK IDX X: %d\n",blockIdx.x); //printf("BLOCK IDX Y: %d\n",blockIdx.y); for(int ibor=-2;ibor<3;ibor++){ for(int jbor=-2;jbor<3;jbor++){ weight+=shared_w[(ibor+2)*5+jbor+2]*shared_G[(threadIdx.x+2+ibor)*(BLOCK_SIZE+4) +(threadIdx.y+2+jbor) ]; } } if(weight<1e-4&&weight>-(1e-4)){ // newG[xIndex*n+yIndex]=G[xIndex*n+yIndex]; newG[xIndex*n+yIndex]=shared_G[(threadIdx.x+2)*(BLOCK_SIZE+4)+threadIdx.y+2]; }else if(weight>0){ newG[xIndex*n+yIndex]=1; }else{ newG[xIndex*n+yIndex]=-1; } } } } } void ising( int *G, double *w, int k, int n){ int *newG,*swapG,*G2; double *w2; hipMallocManaged(&newG,n*n*sizeof(int)); hipMallocManaged(&G2,n*n*sizeof(int)); hipMallocManaged(&w2,25*sizeof(double)); hipMemcpy( w2, w, 25*sizeof(double),hipMemcpyHostToDevice); hipMemcpy( G2, G, n*n*sizeof(int),hipMemcpyHostToDevice); double total_time=0; for(int iter=0;iter<k;iter++){ int grid_dimension; bool repeat=true; dim3 grid(GRID_SIZE, GRID_SIZE); dim3 block(BLOCK_SIZE, BLOCK_SIZE); // struct timeval startwtime, endwtime; // gettimeofday (&startwtime, NULL); ising_kernel<<<grid,block>>>(G2,newG,w2,n); hipDeviceSynchronize(); // gettimeofday (&endwtime, NULL); // double time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); // total_time+=time; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(repeat&&newG[i*n+j]!=G2[i*n+j]){ repeat=false; } int temp=newG[i*n+j]; newG[i*n+j]=G2[i*n+j]; G2[i*n+j]=temp; } } if(repeat){ break; } } hipMemcpy(G, G2, n*n*sizeof(int),hipMemcpyDeviceToHost); // printf("Seconds are %lf ",total_time); } int main() { printf("==========================START=============================\n"); double weight[]={0.004,0.016,0.026,0.016,0.004,0.016,0.071,0.117,0.071,0.016,0.026,0.117,0,0.117,0.026,0.016,0.071,0.117,0.071,0.016,0.004,0.016,0.026,0.016,0.004}; int n=517; int X[n*n]; size_t size; FILE *fp = fopen("conf-init.bin", "rb"); size = fread(X, sizeof(int), n * n, fp); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp); int k=1; ising(X,weight,k,n); int checkX[n*n]; printf("k=1:\n"); FILE *fp2 = fopen("conf-1.bin", "rb"); size = fread(checkX, sizeof(int), n * n, fp2); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp2); bool flag=true; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX[i*n+j]!=X[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("k=4:\n"); k=4 ; int X2[n*n]; FILE *fpA = fopen("conf-init.bin", "rb"); size = fread(X2, sizeof(int), n * n, fpA); if(size!=n*n) exit(EXIT_FAILURE); fclose(fpA); int checkX2[n*n]; FILE *fp3 = fopen("conf-4.bin", "rb"); size = fread(checkX2, sizeof(int), n * n, fp3); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp3); ising(X2,weight,k,n); flag=true; for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX2[i*n+j]!=X2[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("k=11:\n"); k=11 ; int X3[n*n]; FILE *fpB = fopen("conf-init.bin", "rb"); size = fread(X3, sizeof(int), n * n, fpB); if(size!=n*n) exit(EXIT_FAILURE); fclose(fpB); int checkX3[n*n]; FILE *fp4 = fopen("conf-11.bin", "rb"); size = fread(checkX3, sizeof(int), n * n, fp4); if(size!=n*n) exit(EXIT_FAILURE); fclose(fp4); flag=true; ising(X3,weight,k,n); for(int i=0;i<n;i++){ for(int j=0;j<n;j++){ if(checkX3[i*n+j]!=X3[i*n+j]){ printf("\nWRONG IMPLEMENTATION\n"); flag=false; break; } } if(!flag){ break; } } if(flag){ printf("\nCORRECT IMPLEMENTATION\n"); } printf("\n=========================END=========================="); return 0; }
.text .file "ising-v3.hip" .globl _Z27__device_stub__ising_kernelPiS_Pdi # -- Begin function _Z27__device_stub__ising_kernelPiS_Pdi .p2align 4, 0x90 .type _Z27__device_stub__ising_kernelPiS_Pdi,@function _Z27__device_stub__ising_kernelPiS_Pdi: # @_Z27__device_stub__ising_kernelPiS_Pdi .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12ising_kernelPiS_Pdi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z27__device_stub__ising_kernelPiS_Pdi, .Lfunc_end0-_Z27__device_stub__ising_kernelPiS_Pdi .cfi_endproc # -- End function .globl _Z5isingPiPdii # -- Begin function _Z5isingPiPdii .p2align 4, 0x90 .type _Z5isingPiPdii,@function _Z5isingPiPdii: # @_Z5isingPiPdii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $168, %rsp .cfi_def_cfa_offset 224 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %ecx, %ebp movl %edx, %r12d movq %rsi, %r14 movq %rdi, %r15 movl %ecx, %ebx imull %ebx, %ebx shlq $2, %rbx leaq 8(%rsp), %rdi movq %rbx, %rsi movl $1, %edx callq hipMallocManaged movq %rsp, %rdi movq %rbx, %rsi movl $1, %edx callq hipMallocManaged leaq 32(%rsp), %rdi movl $200, %esi movl $1, %edx callq hipMallocManaged movq 32(%rsp), %rdi movl $200, %edx movq %r14, %rsi movl $1, %ecx callq hipMemcpy movq (%rsp), %rdi movq %r15, 40(%rsp) # 8-byte Spill movq %r15, %rsi movq %rbx, 48(%rsp) # 8-byte Spill movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movl %r12d, 20(%rsp) # 4-byte Spill testl %r12d, %r12d jle .LBB1_8 # %bb.1: # %.lr.ph66 movl %ebp, %r14d xorl %r13d, %r13d xorl %ebx, %ebx .p2align 4, 0x90 .LBB1_2: # =>This Loop Header: Depth=1 # Child Loop BB1_10 Depth 2 # Child Loop BB1_11 Depth 3 movabsq $34359738376, %rdi # imm = 0x800000008 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: # in Loop: Header=BB1_2 Depth=1 movq (%rsp), %rax movq 8(%rsp), %rcx movq 32(%rsp), %rdx movq %rax, 120(%rsp) movq %rcx, 112(%rsp) movq %rdx, 104(%rsp) movl %ebp, 28(%rsp) leaq 120(%rsp), %rax movq %rax, 128(%rsp) leaq 112(%rsp), %rax movq %rax, 136(%rsp) leaq 104(%rsp), %rax movq %rax, 144(%rsp) leaq 28(%rsp), %rax movq %rax, 152(%rsp) leaq 88(%rsp), %rdi leaq 72(%rsp), %rsi leaq 64(%rsp), %rdx leaq 56(%rsp), %rcx callq __hipPopCallConfiguration movq 88(%rsp), %rsi movl 96(%rsp), %edx movq 72(%rsp), %rcx movl 80(%rsp), %r8d movl $_Z12ising_kernelPiS_Pdi, %edi leaq 128(%rsp), %r9 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 pushq 72(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: # in Loop: Header=BB1_2 Depth=1 movl %ebx, 24(%rsp) # 4-byte Spill callq hipDeviceSynchronize testl %ebp, %ebp jle .LBB1_5 # %bb.9: # %.preheader.lr.ph # in Loop: Header=BB1_2 Depth=1 movq 8(%rsp), %rcx movb $1, %al xorl %edx, %edx movq (%rsp), %rsi xorl %edi, %edi jmp .LBB1_10 .p2align 4, 0x90 .LBB1_14: # %._crit_edge # in Loop: Header=BB1_10 Depth=2 incq %rdi addl %ebp, %edx cmpq %r14, %rdi je .LBB1_6 .LBB1_10: # %.preheader # Parent Loop BB1_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_11 Depth 3 movl %edx, %r10d leaq (,%r10,4), %r8 leaq (%rsi,%r10,4), %r9 leaq (%rcx,%r10,4), %r10 movq (%rsp), %r11 addq %r8, %r11 addq 8(%rsp), %r8 xorl %r15d, %r15d jmp .LBB1_11 .p2align 4, 0x90 .LBB1_13: # in Loop: Header=BB1_11 Depth=3 movl (%r10,%r15,4), %r12d movl (%r9,%r15,4), %ebx movl %ebx, (%r10,%r15,4) movl %r12d, (%r9,%r15,4) incq %r15 cmpq %r15, %r14 je .LBB1_14 .LBB1_11: # Parent Loop BB1_2 Depth=1 # Parent Loop BB1_10 Depth=2 # => This Inner Loop Header: Depth=3 testb $1, %al je .LBB1_13 # %bb.12: # in Loop: Header=BB1_11 Depth=3 movl (%r8,%r15,4), %r12d cmpl (%r11,%r15,4), %r12d movzbl %al, %eax cmovnel %r13d, %eax jmp .LBB1_13 .p2align 4, 0x90 .LBB1_5: # in Loop: Header=BB1_2 Depth=1 movb $1, %al .LBB1_6: # %._crit_edge62 # in Loop: Header=BB1_2 Depth=1 testb $1, %al movl 24(%rsp), %ebx # 4-byte Reload jne .LBB1_8 # %bb.7: # %._crit_edge62 # in Loop: Header=BB1_2 Depth=1 incl %ebx cmpl 20(%rsp), %ebx # 4-byte Folded Reload jne .LBB1_2 .LBB1_8: # %._crit_edge67 movq (%rsp), %rsi movq 40(%rsp), %rdi # 8-byte Reload movq 48(%rsp), %rdx # 8-byte Reload movl $2, %ecx callq hipMemcpy addq $168, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z5isingPiPdii, .Lfunc_end1-_Z5isingPiPdii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset %rbp, -16 movq %rsp, %rbp .cfi_def_cfa_register %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $1069376, %rsp # imm = 0x105140 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 movl $.Lstr, %edi callq puts@PLT leaq -240(%rbp), %rdi movl $.L__const.main.weight, %esi movl $200, %edx callq memcpy@PLT movq %rsp, %r12 movl $.L.str.1, %edi movl $.L.str.2, %esi callq fopen movq %rax, %rbx leaq -1069408(%rbp), %rdi movl $4, %esi movl $267289, %edx # imm = 0x41419 movq %rax, %rcx callq fread cmpq $267289, %rax # imm = 0x41419 jne .LBB2_34 # %bb.1: movq %rbx, %rdi callq fclose leaq -1069408(%rbp), %rbx leaq -240(%rbp), %rsi movq %rbx, %rdi movl $1, %edx movl $517, %ecx # imm = 0x205 callq _Z5isingPiPdii movq %rsp, %r14 addq $-1069168, %r14 # imm = 0xFFEFAF90 movq %r14, %rsp movl $.Lstr.1, %edi callq puts@PLT movl $.L.str.4, %edi movl $.L.str.2, %esi callq fopen movq %rax, %r15 movl $4, %esi movl $267289, %edx # imm = 0x41419 movq %r14, %rdi movq %rax, %rcx callq fread cmpq $267289, %rax # imm = 0x41419 jne .LBB2_34 # %bb.2: movq %r15, %rdi callq fclose movb $1, %al xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_3: # %.preheader134 # =>This Loop Header: Depth=1 # Child Loop BB2_5 Depth 2 xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_5: # Parent Loop BB2_3 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r14,%rcx,4), %edx cmpl (%rbx,%rcx,4), %edx jne .LBB2_6 # %bb.4: # in Loop: Header=BB2_5 Depth=2 incq %rcx cmpq $517, %rcx # imm = 0x205 jne .LBB2_5 # %bb.7: # %.loopexit135 # in Loop: Header=BB2_3 Depth=1 testb $1, %al jne .LBB2_8 jmp .LBB2_9 .p2align 4, 0x90 .LBB2_6: # in Loop: Header=BB2_3 Depth=1 movl $.Lstr.8, %edi callq puts@PLT xorl %eax, %eax testb $1, %al je .LBB2_9 .LBB2_8: # %.loopexit135 # in Loop: Header=BB2_3 Depth=1 incq %r15 addq $2068, %rbx # imm = 0x814 addq $2068, %r14 # imm = 0x814 cmpq $517, %r15 # imm = 0x205 jne .LBB2_3 .LBB2_9: testb $1, %al je .LBB2_11 # %bb.10: movl $.Lstr.9, %edi callq puts@PLT .LBB2_11: movl $.Lstr.4, %edi callq puts@PLT movq %rsp, %rbx addq $-1069168, %rbx # imm = 0xFFEFAF90 movq %rbx, %rsp movl $.L.str.1, %edi movl $.L.str.2, %esi callq fopen movq %rax, %r14 movl $4, %esi movl $267289, %edx # imm = 0x41419 movq %rbx, %rdi movq %rax, %rcx callq fread cmpq $267289, %rax # imm = 0x41419 jne .LBB2_34 # %bb.12: movq %r14, %rdi callq fclose movq %rsp, %r14 addq $-1069168, %r14 # imm = 0xFFEFAF90 movq %r14, %rsp movl $.L.str.8, %edi movl $.L.str.2, %esi callq fopen movq %rax, %r15 movl $4, %esi movl $267289, %edx # imm = 0x41419 movq %r14, %rdi movq %rax, %rcx callq fread cmpq $267289, %rax # imm = 0x41419 jne .LBB2_34 # %bb.13: movq %r15, %rdi callq fclose leaq -240(%rbp), %rsi movq %rbx, %rdi movl $4, %edx movl $517, %ecx # imm = 0x205 callq _Z5isingPiPdii movb $1, %al xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_14: # %.preheader132 # =>This Loop Header: Depth=1 # Child Loop BB2_16 Depth 2 xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_16: # Parent Loop BB2_14 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r14,%rcx,4), %edx cmpl (%rbx,%rcx,4), %edx jne .LBB2_17 # %bb.15: # in Loop: Header=BB2_16 Depth=2 incq %rcx cmpq $517, %rcx # imm = 0x205 jne .LBB2_16 # %bb.18: # %.loopexit133 # in Loop: Header=BB2_14 Depth=1 testb $1, %al jne .LBB2_19 jmp .LBB2_20 .p2align 4, 0x90 .LBB2_17: # in Loop: Header=BB2_14 Depth=1 movl $.Lstr.8, %edi callq puts@PLT xorl %eax, %eax testb $1, %al je .LBB2_20 .LBB2_19: # %.loopexit133 # in Loop: Header=BB2_14 Depth=1 incq %r15 addq $2068, %rbx # imm = 0x814 addq $2068, %r14 # imm = 0x814 cmpq $517, %r15 # imm = 0x205 jne .LBB2_14 .LBB2_20: testb $1, %al je .LBB2_22 # %bb.21: movl $.Lstr.9, %edi callq puts@PLT .LBB2_22: movl $.Lstr.7, %edi callq puts@PLT movq %rsp, %rbx addq $-1069168, %rbx # imm = 0xFFEFAF90 movq %rbx, %rsp movl $.L.str.1, %edi movl $.L.str.2, %esi callq fopen movq %rax, %r14 movl $4, %esi movl $267289, %edx # imm = 0x41419 movq %rbx, %rdi movq %rax, %rcx callq fread cmpq $267289, %rax # imm = 0x41419 jne .LBB2_34 # %bb.23: movq %r14, %rdi callq fclose movq %rsp, %r14 addq $-1069168, %r14 # imm = 0xFFEFAF90 movq %r14, %rsp movl $.L.str.10, %edi movl $.L.str.2, %esi callq fopen movq %rax, %r15 movl $4, %esi movl $267289, %edx # imm = 0x41419 movq %r14, %rdi movq %rax, %rcx callq fread cmpq $267289, %rax # imm = 0x41419 jne .LBB2_34 # %bb.24: movq %r15, %rdi callq fclose leaq -240(%rbp), %rsi movq %rbx, %rdi movl $11, %edx movl $517, %ecx # imm = 0x205 callq _Z5isingPiPdii movb $1, %al xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_25: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB2_27 Depth 2 xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_27: # Parent Loop BB2_25 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r14,%rcx,4), %edx cmpl (%rbx,%rcx,4), %edx jne .LBB2_28 # %bb.26: # in Loop: Header=BB2_27 Depth=2 incq %rcx cmpq $517, %rcx # imm = 0x205 jne .LBB2_27 # %bb.29: # %.loopexit # in Loop: Header=BB2_25 Depth=1 testb $1, %al jne .LBB2_30 jmp .LBB2_31 .p2align 4, 0x90 .LBB2_28: # in Loop: Header=BB2_25 Depth=1 movl $.Lstr.8, %edi callq puts@PLT xorl %eax, %eax testb $1, %al je .LBB2_31 .LBB2_30: # %.loopexit # in Loop: Header=BB2_25 Depth=1 incq %r15 addq $2068, %rbx # imm = 0x814 addq $2068, %r14 # imm = 0x814 cmpq $517, %r15 # imm = 0x205 jne .LBB2_25 .LBB2_31: testb $1, %al je .LBB2_33 # %bb.32: movl $.Lstr.9, %edi callq puts@PLT .LBB2_33: movl $.L.str.11, %edi xorl %eax, %eax callq printf movq %r12, %rsp xorl %eax, %eax leaq -32(%rbp), %rsp popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp .cfi_def_cfa %rsp, 8 retq .LBB2_34: .cfi_def_cfa %rbp, 16 movl $1, %edi callq exit .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12ising_kernelPiS_Pdi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z12ising_kernelPiS_Pdi,@object # @_Z12ising_kernelPiS_Pdi .section .rodata,"a",@progbits .globl _Z12ising_kernelPiS_Pdi .p2align 3, 0x0 _Z12ising_kernelPiS_Pdi: .quad _Z27__device_stub__ising_kernelPiS_Pdi .size _Z12ising_kernelPiS_Pdi, 8 .type .L__const.main.weight,@object # @__const.main.weight .p2align 4, 0x0 .L__const.main.weight: .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x0000000000000000 # double 0 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .size .L__const.main.weight, 200 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "conf-init.bin" .size .L.str.1, 14 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "rb" .size .L.str.2, 3 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "conf-1.bin" .size .L.str.4, 11 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "conf-4.bin" .size .L.str.8, 11 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "conf-11.bin" .size .L.str.10, 12 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "\n=========================END==========================" .size .L.str.11, 56 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z12ising_kernelPiS_Pdi" .size .L__unnamed_1, 24 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "==========================START=============================" .size .Lstr, 61 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "k=1:" .size .Lstr.1, 5 .type .Lstr.4,@object # @str.4 .Lstr.4: .asciz "k=4:" .size .Lstr.4, 5 .type .Lstr.7,@object # @str.7 .Lstr.7: .asciz "k=11:" .size .Lstr.7, 6 .type .Lstr.8,@object # @str.8 .Lstr.8: .asciz "\nWRONG IMPLEMENTATION" .size .Lstr.8, 22 .type .Lstr.9,@object # @str.9 .Lstr.9: .asciz "\nCORRECT IMPLEMENTATION" .size .Lstr.9, 24 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__ising_kernelPiS_Pdi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12ising_kernelPiS_Pdi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00085fcc_00000000-6_ising-v3.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi .type _Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi, @function _Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi: .LFB2083: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12ising_kernelPiS_Pdi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi, .-_Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi .globl _Z12ising_kernelPiS_Pdi .type _Z12ising_kernelPiS_Pdi, @function _Z12ising_kernelPiS_Pdi: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z12ising_kernelPiS_Pdi, .-_Z12ising_kernelPiS_Pdi .globl _Z5isingPiPdii .type _Z5isingPiPdii, @function _Z5isingPiPdii: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $88, %rsp .cfi_def_cfa_offset 144 movq %rdi, %r15 movq %rdi, 8(%rsp) movq %rsi, %rbp movl %edx, %r13d movl %ecx, %ebx movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl %ecx, %r14d imull %ecx, %r14d movslq %r14d, %r14 salq $2, %r14 leaq 24(%rsp), %rdi movl $1, %edx movq %r14, %rsi call cudaMallocManaged@PLT leaq 32(%rsp), %rdi movl $1, %edx movq %r14, %rsi call cudaMallocManaged@PLT leaq 40(%rsp), %rdi movl $1, %edx movl $200, %esi call cudaMallocManaged@PLT movl $1, %ecx movl $200, %edx movq %rbp, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %r14, %rdx movq %r15, %rsi movq 32(%rsp), %rdi call cudaMemcpy@PLT testl %r13d, %r13d jle .L12 movslq %ebx, %rbp salq $2, %rbp movl $0, %r12d movl $1, %r15d movq %r14, (%rsp) jmp .L19 .L28: movl %ebx, %ecx movq 40(%rsp), %rdx movq 24(%rsp), %rsi movq 32(%rsp), %rdi call _Z37__device_stub__Z12ising_kernelPiS_PdiPiS_Pdi jmp .L13 .L16: movq %rax, %rsi addq 24(%rsp), %rsi movl (%rsi), %r11d movq 32(%rsp), %r14 movl (%r14,%rax), %r14d movl %r14d, (%rsi) movq 32(%rsp), %rsi movl %r11d, (%rsi,%rax) addq $4, %rax addl $1, %edx cmpl %r9d, %edx je .L26 .L17: testb %r8b, %r8b je .L16 movslq %edx, %rsi movq 24(%rsp), %r11 movq 32(%rsp), %r8 movl (%r8,%rsi,4), %r8d cmpl %r8d, (%r11,%rsi,4) sete %r8b jmp .L16 .L26: addl $1, %ecx addl %ebx, %edi addl %ebx, %r9d addq %rbp, %r10 cmpl %ecx, %ebx je .L18 .L15: movl %edi, %edx movq %r10, %rax jmp .L17 .L18: testb %r8b, %r8b jne .L24 addl $1, %r12d cmpl %r12d, %r13d je .L27 .L19: movl $8, 48(%rsp) movl $8, 52(%rsp) movl $1, 56(%rsp) movl $8, 60(%rsp) movl $8, 64(%rsp) movl $1, 68(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L28 .L13: call cudaDeviceSynchronize@PLT testl %ebx, %ebx jle .L23 movl %ebx, %r9d movl $0, %r10d movl $0, %edi movl $0, %ecx movl %r15d, %r8d jmp .L15 .L27: movq (%rsp), %r14 jmp .L12 .L23: movq (%rsp), %r14 .L12: movl $2, %ecx movq %r14, %rdx movq 32(%rsp), %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movq 72(%rsp), %rax subq %fs:40, %rax jne .L29 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L24: .cfi_restore_state movq (%rsp), %r14 jmp .L12 .L29: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z5isingPiPdii, .-_Z5isingPiPdii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "==========================START=============================\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC7: .string "rb" .LC8: .string "conf-init.bin" .LC9: .string "k=1:\n" .LC10: .string "conf-1.bin" .LC11: .string "\nWRONG IMPLEMENTATION\n" .LC12: .string "\nCORRECT IMPLEMENTATION\n" .LC13: .string "k=4:\n" .LC14: .string "conf-4.bin" .LC15: .string "k=11:\n" .LC16: .string "conf-11.bin" .section .rodata.str1.8 .align 8 .LC17: .string "\n=========================END==========================" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movq %rsp, %rbp .cfi_def_cfa_register 6 pushq %r13 pushq %r12 pushq %rbx subq $216, %rsp .cfi_offset 13, -24 .cfi_offset 12, -32 .cfi_offset 3, -40 movq %fs:40, %rax movq %rax, -40(%rbp) xorl %eax, %eax leaq .LC0(%rip), %rsi movl $2, %edi call __printf_chk@PLT movsd .LC1(%rip), %xmm1 movsd %xmm1, -240(%rbp) movsd .LC2(%rip), %xmm0 movsd %xmm0, -232(%rbp) movsd .LC3(%rip), %xmm2 movsd %xmm2, -224(%rbp) movsd %xmm0, -216(%rbp) movsd %xmm1, -208(%rbp) movsd %xmm0, -200(%rbp) movsd .LC4(%rip), %xmm3 movsd %xmm3, -192(%rbp) movsd .LC5(%rip), %xmm4 movsd %xmm4, -184(%rbp) movsd %xmm3, -176(%rbp) movsd %xmm0, -168(%rbp) movsd %xmm2, -160(%rbp) movsd %xmm4, -152(%rbp) movq $0x000000000, -144(%rbp) movsd %xmm4, -136(%rbp) movsd %xmm2, -128(%rbp) movsd %xmm0, -120(%rbp) movsd %xmm3, -112(%rbp) movsd %xmm4, -104(%rbp) movsd %xmm3, -96(%rbp) movsd %xmm0, -88(%rbp) movsd %xmm1, -80(%rbp) movsd %xmm0, -72(%rbp) movsd %xmm2, -64(%rbp) movsd %xmm0, -56(%rbp) movsd %xmm1, -48(%rbp) leaq -1069056(%rsp), %rax .L31: cmpq %rax, %rsp je .L32 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L31 .L32: subq $112, %rsp orq $0, 104(%rsp) movq %rsp, %rbx leaq .LC7(%rip), %rsi leaq .LC8(%rip), %rdi call fopen@PLT movq %rax, %r12 movq %rax, %r8 movl $267289, %ecx movl $4, %edx movl $1069156, %esi movq %rbx, %rdi call __fread_chk@PLT cmpq $267289, %rax jne .L75 movq %r12, %rdi call fclose@PLT leaq -240(%rbp), %rsi movl $517, %ecx movl $1, %edx movq %rbx, %rdi call _Z5isingPiPdii leaq -1069056(%rsp), %rax .L35: cmpq %rax, %rsp je .L36 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L35 .L75: movl $1, %edi call exit@PLT .L36: subq $112, %rsp orq $0, 104(%rsp) movq %rsp, %r12 leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC7(%rip), %rsi leaq .LC10(%rip), %rdi call fopen@PLT movq %rax, %r13 movq %rax, %r8 movl $267289, %ecx movl $4, %edx movl $1069156, %esi movq %r12, %rdi call __fread_chk@PLT cmpq $267289, %rax jne .L76 movq %r13, %rdi call fclose@PLT movl $2068, %edx .L39: leaq -2068(%rdx), %rax .L42: movl (%rbx,%rax), %ecx cmpl %ecx, (%r12,%rax) jne .L77 addq $4, %rax cmpq %rdx, %rax jne .L42 addq $2068, %rdx cmpq $1071224, %rdx jne .L39 leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L41 .L76: movl $1, %edi call exit@PLT .L77: leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L41: leaq .LC13(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq -1069056(%rsp), %rax .L44: cmpq %rax, %rsp je .L45 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L44 .L45: subq $112, %rsp orq $0, 104(%rsp) movq %rsp, %rbx leaq .LC7(%rip), %rsi leaq .LC8(%rip), %rdi call fopen@PLT movq %rax, %r12 movq %rax, %r8 movl $267289, %ecx movl $4, %edx movl $1069156, %esi movq %rbx, %rdi call __fread_chk@PLT cmpq $267289, %rax jne .L78 movq %r12, %rdi call fclose@PLT leaq -1069056(%rsp), %rax .L48: cmpq %rax, %rsp je .L49 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L48 .L78: movl $1, %edi call exit@PLT .L49: subq $112, %rsp orq $0, 104(%rsp) movq %rsp, %r12 leaq .LC7(%rip), %rsi leaq .LC14(%rip), %rdi call fopen@PLT movq %rax, %r13 movq %rax, %r8 movl $267289, %ecx movl $4, %edx movl $1069156, %esi movq %r12, %rdi call __fread_chk@PLT cmpq $267289, %rax jne .L79 movq %r13, %rdi call fclose@PLT leaq -240(%rbp), %rsi movl $517, %ecx movl $4, %edx movq %rbx, %rdi call _Z5isingPiPdii movl $2068, %edx .L52: leaq -2068(%rdx), %rax .L55: movl (%rbx,%rax), %edi cmpl %edi, (%r12,%rax) jne .L80 addq $4, %rax cmpq %rdx, %rax jne .L55 addq $2068, %rdx cmpq $1071224, %rdx jne .L52 leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L54 .L79: movl $1, %edi call exit@PLT .L80: leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L54: leaq .LC15(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq -1069056(%rsp), %rax .L57: cmpq %rax, %rsp je .L58 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L57 .L58: subq $112, %rsp orq $0, 104(%rsp) movq %rsp, %rbx leaq .LC7(%rip), %rsi leaq .LC8(%rip), %rdi call fopen@PLT movq %rax, %r12 movq %rax, %r8 movl $267289, %ecx movl $4, %edx movl $1069156, %esi movq %rbx, %rdi call __fread_chk@PLT cmpq $267289, %rax jne .L81 movq %r12, %rdi call fclose@PLT leaq -1069056(%rsp), %rax .L61: cmpq %rax, %rsp je .L62 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L61 .L81: movl $1, %edi call exit@PLT .L62: subq $112, %rsp orq $0, 104(%rsp) movq %rsp, %r12 leaq .LC7(%rip), %rsi leaq .LC16(%rip), %rdi call fopen@PLT movq %rax, %r13 movq %rax, %r8 movl $267289, %ecx movl $4, %edx movl $1069156, %esi movq %r12, %rdi call __fread_chk@PLT cmpq $267289, %rax jne .L82 movq %r13, %rdi call fclose@PLT leaq -240(%rbp), %rsi movl $517, %ecx movl $11, %edx movq %rbx, %rdi call _Z5isingPiPdii movl $2068, %edx .L65: leaq -2068(%rdx), %rax .L68: movl (%rbx,%rax), %esi cmpl %esi, (%r12,%rax) jne .L83 addq $4, %rax cmpq %rax, %rdx jne .L68 addq $2068, %rdx cmpq $1071224, %rdx jne .L65 leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L67 .L82: movl $1, %edi call exit@PLT .L83: leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L67: leaq .LC17(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq -40(%rbp), %rax subq %fs:40, %rax jne .L84 movl $0, %eax leaq -24(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %rbp .cfi_remember_state .cfi_def_cfa 7, 8 ret .L84: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.1 .LC18: .string "_Z12ising_kernelPiS_Pdi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC18(%rip), %rdx movq %rdx, %rcx leaq _Z12ising_kernelPiS_Pdi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long -755914244 .long 1064329805 .align 8 .LC2: .long -755914244 .long 1066426957 .align 8 .LC3: .long 1992864825 .long 1067098046 .align 8 .LC4: .long 1443109011 .long 1068641550 .align 8 .LC5: .long 1168231105 .long 1069413302 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "ising-v3.hip" .globl _Z27__device_stub__ising_kernelPiS_Pdi # -- Begin function _Z27__device_stub__ising_kernelPiS_Pdi .p2align 4, 0x90 .type _Z27__device_stub__ising_kernelPiS_Pdi,@function _Z27__device_stub__ising_kernelPiS_Pdi: # @_Z27__device_stub__ising_kernelPiS_Pdi .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12ising_kernelPiS_Pdi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z27__device_stub__ising_kernelPiS_Pdi, .Lfunc_end0-_Z27__device_stub__ising_kernelPiS_Pdi .cfi_endproc # -- End function .globl _Z5isingPiPdii # -- Begin function _Z5isingPiPdii .p2align 4, 0x90 .type _Z5isingPiPdii,@function _Z5isingPiPdii: # @_Z5isingPiPdii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $168, %rsp .cfi_def_cfa_offset 224 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %ecx, %ebp movl %edx, %r12d movq %rsi, %r14 movq %rdi, %r15 movl %ecx, %ebx imull %ebx, %ebx shlq $2, %rbx leaq 8(%rsp), %rdi movq %rbx, %rsi movl $1, %edx callq hipMallocManaged movq %rsp, %rdi movq %rbx, %rsi movl $1, %edx callq hipMallocManaged leaq 32(%rsp), %rdi movl $200, %esi movl $1, %edx callq hipMallocManaged movq 32(%rsp), %rdi movl $200, %edx movq %r14, %rsi movl $1, %ecx callq hipMemcpy movq (%rsp), %rdi movq %r15, 40(%rsp) # 8-byte Spill movq %r15, %rsi movq %rbx, 48(%rsp) # 8-byte Spill movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movl %r12d, 20(%rsp) # 4-byte Spill testl %r12d, %r12d jle .LBB1_8 # %bb.1: # %.lr.ph66 movl %ebp, %r14d xorl %r13d, %r13d xorl %ebx, %ebx .p2align 4, 0x90 .LBB1_2: # =>This Loop Header: Depth=1 # Child Loop BB1_10 Depth 2 # Child Loop BB1_11 Depth 3 movabsq $34359738376, %rdi # imm = 0x800000008 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_4 # %bb.3: # in Loop: Header=BB1_2 Depth=1 movq (%rsp), %rax movq 8(%rsp), %rcx movq 32(%rsp), %rdx movq %rax, 120(%rsp) movq %rcx, 112(%rsp) movq %rdx, 104(%rsp) movl %ebp, 28(%rsp) leaq 120(%rsp), %rax movq %rax, 128(%rsp) leaq 112(%rsp), %rax movq %rax, 136(%rsp) leaq 104(%rsp), %rax movq %rax, 144(%rsp) leaq 28(%rsp), %rax movq %rax, 152(%rsp) leaq 88(%rsp), %rdi leaq 72(%rsp), %rsi leaq 64(%rsp), %rdx leaq 56(%rsp), %rcx callq __hipPopCallConfiguration movq 88(%rsp), %rsi movl 96(%rsp), %edx movq 72(%rsp), %rcx movl 80(%rsp), %r8d movl $_Z12ising_kernelPiS_Pdi, %edi leaq 128(%rsp), %r9 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 pushq 72(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_4: # in Loop: Header=BB1_2 Depth=1 movl %ebx, 24(%rsp) # 4-byte Spill callq hipDeviceSynchronize testl %ebp, %ebp jle .LBB1_5 # %bb.9: # %.preheader.lr.ph # in Loop: Header=BB1_2 Depth=1 movq 8(%rsp), %rcx movb $1, %al xorl %edx, %edx movq (%rsp), %rsi xorl %edi, %edi jmp .LBB1_10 .p2align 4, 0x90 .LBB1_14: # %._crit_edge # in Loop: Header=BB1_10 Depth=2 incq %rdi addl %ebp, %edx cmpq %r14, %rdi je .LBB1_6 .LBB1_10: # %.preheader # Parent Loop BB1_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_11 Depth 3 movl %edx, %r10d leaq (,%r10,4), %r8 leaq (%rsi,%r10,4), %r9 leaq (%rcx,%r10,4), %r10 movq (%rsp), %r11 addq %r8, %r11 addq 8(%rsp), %r8 xorl %r15d, %r15d jmp .LBB1_11 .p2align 4, 0x90 .LBB1_13: # in Loop: Header=BB1_11 Depth=3 movl (%r10,%r15,4), %r12d movl (%r9,%r15,4), %ebx movl %ebx, (%r10,%r15,4) movl %r12d, (%r9,%r15,4) incq %r15 cmpq %r15, %r14 je .LBB1_14 .LBB1_11: # Parent Loop BB1_2 Depth=1 # Parent Loop BB1_10 Depth=2 # => This Inner Loop Header: Depth=3 testb $1, %al je .LBB1_13 # %bb.12: # in Loop: Header=BB1_11 Depth=3 movl (%r8,%r15,4), %r12d cmpl (%r11,%r15,4), %r12d movzbl %al, %eax cmovnel %r13d, %eax jmp .LBB1_13 .p2align 4, 0x90 .LBB1_5: # in Loop: Header=BB1_2 Depth=1 movb $1, %al .LBB1_6: # %._crit_edge62 # in Loop: Header=BB1_2 Depth=1 testb $1, %al movl 24(%rsp), %ebx # 4-byte Reload jne .LBB1_8 # %bb.7: # %._crit_edge62 # in Loop: Header=BB1_2 Depth=1 incl %ebx cmpl 20(%rsp), %ebx # 4-byte Folded Reload jne .LBB1_2 .LBB1_8: # %._crit_edge67 movq (%rsp), %rsi movq 40(%rsp), %rdi # 8-byte Reload movq 48(%rsp), %rdx # 8-byte Reload movl $2, %ecx callq hipMemcpy addq $168, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z5isingPiPdii, .Lfunc_end1-_Z5isingPiPdii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset %rbp, -16 movq %rsp, %rbp .cfi_def_cfa_register %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $1069376, %rsp # imm = 0x105140 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 movl $.Lstr, %edi callq puts@PLT leaq -240(%rbp), %rdi movl $.L__const.main.weight, %esi movl $200, %edx callq memcpy@PLT movq %rsp, %r12 movl $.L.str.1, %edi movl $.L.str.2, %esi callq fopen movq %rax, %rbx leaq -1069408(%rbp), %rdi movl $4, %esi movl $267289, %edx # imm = 0x41419 movq %rax, %rcx callq fread cmpq $267289, %rax # imm = 0x41419 jne .LBB2_34 # %bb.1: movq %rbx, %rdi callq fclose leaq -1069408(%rbp), %rbx leaq -240(%rbp), %rsi movq %rbx, %rdi movl $1, %edx movl $517, %ecx # imm = 0x205 callq _Z5isingPiPdii movq %rsp, %r14 addq $-1069168, %r14 # imm = 0xFFEFAF90 movq %r14, %rsp movl $.Lstr.1, %edi callq puts@PLT movl $.L.str.4, %edi movl $.L.str.2, %esi callq fopen movq %rax, %r15 movl $4, %esi movl $267289, %edx # imm = 0x41419 movq %r14, %rdi movq %rax, %rcx callq fread cmpq $267289, %rax # imm = 0x41419 jne .LBB2_34 # %bb.2: movq %r15, %rdi callq fclose movb $1, %al xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_3: # %.preheader134 # =>This Loop Header: Depth=1 # Child Loop BB2_5 Depth 2 xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_5: # Parent Loop BB2_3 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r14,%rcx,4), %edx cmpl (%rbx,%rcx,4), %edx jne .LBB2_6 # %bb.4: # in Loop: Header=BB2_5 Depth=2 incq %rcx cmpq $517, %rcx # imm = 0x205 jne .LBB2_5 # %bb.7: # %.loopexit135 # in Loop: Header=BB2_3 Depth=1 testb $1, %al jne .LBB2_8 jmp .LBB2_9 .p2align 4, 0x90 .LBB2_6: # in Loop: Header=BB2_3 Depth=1 movl $.Lstr.8, %edi callq puts@PLT xorl %eax, %eax testb $1, %al je .LBB2_9 .LBB2_8: # %.loopexit135 # in Loop: Header=BB2_3 Depth=1 incq %r15 addq $2068, %rbx # imm = 0x814 addq $2068, %r14 # imm = 0x814 cmpq $517, %r15 # imm = 0x205 jne .LBB2_3 .LBB2_9: testb $1, %al je .LBB2_11 # %bb.10: movl $.Lstr.9, %edi callq puts@PLT .LBB2_11: movl $.Lstr.4, %edi callq puts@PLT movq %rsp, %rbx addq $-1069168, %rbx # imm = 0xFFEFAF90 movq %rbx, %rsp movl $.L.str.1, %edi movl $.L.str.2, %esi callq fopen movq %rax, %r14 movl $4, %esi movl $267289, %edx # imm = 0x41419 movq %rbx, %rdi movq %rax, %rcx callq fread cmpq $267289, %rax # imm = 0x41419 jne .LBB2_34 # %bb.12: movq %r14, %rdi callq fclose movq %rsp, %r14 addq $-1069168, %r14 # imm = 0xFFEFAF90 movq %r14, %rsp movl $.L.str.8, %edi movl $.L.str.2, %esi callq fopen movq %rax, %r15 movl $4, %esi movl $267289, %edx # imm = 0x41419 movq %r14, %rdi movq %rax, %rcx callq fread cmpq $267289, %rax # imm = 0x41419 jne .LBB2_34 # %bb.13: movq %r15, %rdi callq fclose leaq -240(%rbp), %rsi movq %rbx, %rdi movl $4, %edx movl $517, %ecx # imm = 0x205 callq _Z5isingPiPdii movb $1, %al xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_14: # %.preheader132 # =>This Loop Header: Depth=1 # Child Loop BB2_16 Depth 2 xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_16: # Parent Loop BB2_14 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r14,%rcx,4), %edx cmpl (%rbx,%rcx,4), %edx jne .LBB2_17 # %bb.15: # in Loop: Header=BB2_16 Depth=2 incq %rcx cmpq $517, %rcx # imm = 0x205 jne .LBB2_16 # %bb.18: # %.loopexit133 # in Loop: Header=BB2_14 Depth=1 testb $1, %al jne .LBB2_19 jmp .LBB2_20 .p2align 4, 0x90 .LBB2_17: # in Loop: Header=BB2_14 Depth=1 movl $.Lstr.8, %edi callq puts@PLT xorl %eax, %eax testb $1, %al je .LBB2_20 .LBB2_19: # %.loopexit133 # in Loop: Header=BB2_14 Depth=1 incq %r15 addq $2068, %rbx # imm = 0x814 addq $2068, %r14 # imm = 0x814 cmpq $517, %r15 # imm = 0x205 jne .LBB2_14 .LBB2_20: testb $1, %al je .LBB2_22 # %bb.21: movl $.Lstr.9, %edi callq puts@PLT .LBB2_22: movl $.Lstr.7, %edi callq puts@PLT movq %rsp, %rbx addq $-1069168, %rbx # imm = 0xFFEFAF90 movq %rbx, %rsp movl $.L.str.1, %edi movl $.L.str.2, %esi callq fopen movq %rax, %r14 movl $4, %esi movl $267289, %edx # imm = 0x41419 movq %rbx, %rdi movq %rax, %rcx callq fread cmpq $267289, %rax # imm = 0x41419 jne .LBB2_34 # %bb.23: movq %r14, %rdi callq fclose movq %rsp, %r14 addq $-1069168, %r14 # imm = 0xFFEFAF90 movq %r14, %rsp movl $.L.str.10, %edi movl $.L.str.2, %esi callq fopen movq %rax, %r15 movl $4, %esi movl $267289, %edx # imm = 0x41419 movq %r14, %rdi movq %rax, %rcx callq fread cmpq $267289, %rax # imm = 0x41419 jne .LBB2_34 # %bb.24: movq %r15, %rdi callq fclose leaq -240(%rbp), %rsi movq %rbx, %rdi movl $11, %edx movl $517, %ecx # imm = 0x205 callq _Z5isingPiPdii movb $1, %al xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_25: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB2_27 Depth 2 xorl %ecx, %ecx .p2align 4, 0x90 .LBB2_27: # Parent Loop BB2_25 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r14,%rcx,4), %edx cmpl (%rbx,%rcx,4), %edx jne .LBB2_28 # %bb.26: # in Loop: Header=BB2_27 Depth=2 incq %rcx cmpq $517, %rcx # imm = 0x205 jne .LBB2_27 # %bb.29: # %.loopexit # in Loop: Header=BB2_25 Depth=1 testb $1, %al jne .LBB2_30 jmp .LBB2_31 .p2align 4, 0x90 .LBB2_28: # in Loop: Header=BB2_25 Depth=1 movl $.Lstr.8, %edi callq puts@PLT xorl %eax, %eax testb $1, %al je .LBB2_31 .LBB2_30: # %.loopexit # in Loop: Header=BB2_25 Depth=1 incq %r15 addq $2068, %rbx # imm = 0x814 addq $2068, %r14 # imm = 0x814 cmpq $517, %r15 # imm = 0x205 jne .LBB2_25 .LBB2_31: testb $1, %al je .LBB2_33 # %bb.32: movl $.Lstr.9, %edi callq puts@PLT .LBB2_33: movl $.L.str.11, %edi xorl %eax, %eax callq printf movq %r12, %rsp xorl %eax, %eax leaq -32(%rbp), %rsp popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp .cfi_def_cfa %rsp, 8 retq .LBB2_34: .cfi_def_cfa %rbp, 16 movl $1, %edi callq exit .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12ising_kernelPiS_Pdi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z12ising_kernelPiS_Pdi,@object # @_Z12ising_kernelPiS_Pdi .section .rodata,"a",@progbits .globl _Z12ising_kernelPiS_Pdi .p2align 3, 0x0 _Z12ising_kernelPiS_Pdi: .quad _Z27__device_stub__ising_kernelPiS_Pdi .size _Z12ising_kernelPiS_Pdi, 8 .type .L__const.main.weight,@object # @__const.main.weight .p2align 4, 0x0 .L__const.main.weight: .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x0000000000000000 # double 0 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3fbdf3b645a1cac1 # double 0.11700000000000001 .quad 0x3fb22d0e56041893 # double 0.070999999999999994 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f9a9fbe76c8b439 # double 0.025999999999999999 .quad 0x3f90624dd2f1a9fc # double 0.016 .quad 0x3f70624dd2f1a9fc # double 0.0040000000000000001 .size .L__const.main.weight, 200 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "conf-init.bin" .size .L.str.1, 14 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "rb" .size .L.str.2, 3 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "conf-1.bin" .size .L.str.4, 11 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "conf-4.bin" .size .L.str.8, 11 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "conf-11.bin" .size .L.str.10, 12 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "\n=========================END==========================" .size .L.str.11, 56 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z12ising_kernelPiS_Pdi" .size .L__unnamed_1, 24 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "==========================START=============================" .size .Lstr, 61 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "k=1:" .size .Lstr.1, 5 .type .Lstr.4,@object # @str.4 .Lstr.4: .asciz "k=4:" .size .Lstr.4, 5 .type .Lstr.7,@object # @str.7 .Lstr.7: .asciz "k=11:" .size .Lstr.7, 6 .type .Lstr.8,@object # @str.8 .Lstr.8: .asciz "\nWRONG IMPLEMENTATION" .size .Lstr.8, 22 .type .Lstr.9,@object # @str.9 .Lstr.9: .asciz "\nCORRECT IMPLEMENTATION" .size .Lstr.9, 24 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__ising_kernelPiS_Pdi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12ising_kernelPiS_Pdi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <cuda.h> #define ITERATION 500 #define BLKSIZE 512 typedef unsigned long long bint; float * allocate(bint n){ bint size = (n+1)*(n+1); float *m = (float *)calloc(size, sizeof(float)); bint i; for (i=0; i<n+1; i++){ m[i] = 80; m[size-1-i] = 80; m[(n+1)*i] = 80; m[(n+1)*(i+1)-1] = 80; m[i] = (i >=10 && i<=30) ? 150 : m[i]; } return m; } float avg(float *m, bint dim){ bint size = dim*dim; float sum = 0; bint i; for (i=0; i<size; i++){ sum += m[i]; //if (i % dim==0) // printf("\n"); //printf("%f ", m[i]); } //printf("\n"); return sum/size; } __global__ void simulateKernel(float *s, float *d, bint dim){ //dim is one side length of matrix bint tid = threadIdx.x + blockIdx.x * blockDim.x; bint i = tid + dim + 1 + 2*(tid/(dim-2)); if (i < dim*(dim-1)-1) d[i] = (s[i-1] + s[i+1] + s[i-dim] + s[i+dim]) / 4; } int main(int argc, char *argv[]){ if (argc < 2){ printf("Please indicate matrix size.\n"); exit(0); } bint n = atoi(argv[1]); float *m = allocate(n); //float mean = avg(m, n+1); //printf("%f===>",mean); // allocation and copy to DEVICE float * a, *b; bint mem = (n+1)*(n+1)*sizeof(float); cudaMalloc((void **)&a, mem); cudaMalloc((void **)&b, mem); cudaMemcpy(a, m, mem, cudaMemcpyHostToDevice); cudaMemcpy(b, m, mem, cudaMemcpyHostToDevice); // call kernel function bint gridSize = ((n+1)*(n+1) % BLKSIZE == 0)? (n+1)*(n+1)/BLKSIZE : (n+1)*(n+1)/BLKSIZE+1; int i; for (i=0; i<ITERATION/2; i++){ simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1); simulateKernel<<<gridSize, BLKSIZE>>>(b, a, n+1); } if (ITERATION%2 !=0){ simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1); cudaMemcpy(m, b, mem, cudaMemcpyDeviceToHost); } else{ cudaMemcpy(m, a, mem, cudaMemcpyDeviceToHost); } //mean = avg(m, n+1); //printf("%f\n", mean); free(m); cudaFree(a); cudaFree(b); return 0; }
code for sm_80 Function : _Z14simulateKernelPfS_y .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x170] ; /* 0x00005c0000047ab9 */ /* 0x000fe40000000a00 */ /*0030*/ UIADD3 UR4, UP0, UR4, -0x2, URZ ; /* 0xfffffffe04047890 */ /* 0x000fe2000ff1e03f */ /*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e260000002100 */ /*0050*/ UIADD3.X UR5, UR5, -0x1, URZ, UP0, !UPT ; /* 0xffffffff05057890 */ /* 0x000fcc00087fe43f */ /*0060*/ ISETP.NE.U32.AND P0, PT, RZ, UR5, PT ; /* 0x00000005ff007c0c */ /* 0x000fe2000bf05070 */ /*0070*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */ /* 0x001fca00078e0200 */ /*0080*/ IADD3 R0, P1, R3, c[0x0][0x170], RZ ; /* 0x00005c0003007a10 */ /* 0x000fca0007f3e0ff */ /*0090*/ IMAD.X R2, RZ, RZ, c[0x0][0x174], P1 ; /* 0x00005d00ff027624 */ /* 0x000fe400008e06ff */ /*00a0*/ @!P0 BRA 0x100 ; /* 0x0000005000008947 */ /* 0x000fea0003800000 */ /*00b0*/ MOV R4, 0xd0 ; /* 0x000000d000047802 */ /* 0x000fe40000000f00 */ /*00c0*/ CALL.REL.NOINC 0x4b0 ; /* 0x000003e000007944 */ /* 0x000fea0003c00000 */ /*00d0*/ IMAD.MOV.U32 R4, RZ, RZ, R3 ; /* 0x000000ffff047224 */ /* 0x000fe400078e0003 */ /*00e0*/ IMAD.MOV.U32 R5, RZ, RZ, R6 ; /* 0x000000ffff057224 */ /* 0x000fe200078e0006 */ /*00f0*/ BRA 0x230 ; /* 0x0000013000007947 */ /* 0x000fea0003800000 */ /*0100*/ I2F.U32.RP R6, UR4 ; /* 0x0000000400067d06 */ /* 0x000e220008209000 */ /*0110*/ ISETP.NE.U32.AND P2, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fce000bf45070 */ /*0120*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */ /* 0x001e240000001000 */ /*0130*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */ /* 0x001fcc0007ffe0ff */ /*0140*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */ /* 0x000064000021f000 */ /*0150*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */ /* 0x001fe400078e00ff */ /*0160*/ IMAD.MOV R7, RZ, RZ, -R5 ; /* 0x000000ffff077224 */ /* 0x002fc800078e0a05 */ /*0170*/ IMAD R7, R7, UR4, RZ ; /* 0x0000000407077c24 */ /* 0x000fc8000f8e02ff */ /*0180*/ IMAD.HI.U32 R8, R5, R7, R4 ; /* 0x0000000705087227 */ /* 0x000fc800078e0004 */ /*0190*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */ /* 0x000fe400078e00ff */ /*01a0*/ IMAD.HI.U32 R4, R8, R3, RZ ; /* 0x0000000308047227 */ /* 0x000fc800078e00ff */ /*01b0*/ IMAD.MOV R8, RZ, RZ, -R4 ; /* 0x000000ffff087224 */ /* 0x000fc800078e0a04 */ /*01c0*/ IMAD R3, R8, UR4, R3 ; /* 0x0000000408037c24 */ /* 0x000fca000f8e0203 */ /*01d0*/ ISETP.GE.U32.AND P0, PT, R3, UR4, PT ; /* 0x0000000403007c0c */ /* 0x000fda000bf06070 */ /*01e0*/ @P0 IADD3 R3, R3, -UR4, RZ ; /* 0x8000000403030c10 */ /* 0x000fe4000fffe0ff */ /*01f0*/ @P0 IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104040810 */ /* 0x000fe40007ffe0ff */ /*0200*/ ISETP.GE.U32.AND P1, PT, R3, UR4, PT ; /* 0x0000000403007c0c */ /* 0x000fda000bf26070 */ /*0210*/ @P1 IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104041810 */ /* 0x000fe40007ffe0ff */ /*0220*/ @!P2 LOP3.LUT R4, RZ, UR4, RZ, 0x33, !PT ; /* 0x00000004ff04ac12 */ /* 0x000fc8000f8e33ff */ /*0230*/ ULDC.64 UR8, c[0x0][0x170] ; /* 0x00005c0000087ab9 */ /* 0x000fe20000000a00 */ /*0240*/ IMAD.SHL.U32 R3, R4.reuse, 0x2, RZ ; /* 0x0000000204037824 */ /* 0x040fe200078e00ff */ /*0250*/ UIADD3 UR6, UP0, UR8, -0x1, URZ ; /* 0xffffffff08067890 */ /* 0x000fe2000ff1e03f */ /*0260*/ SHF.L.U64.HI R5, R4, 0x1, R5 ; /* 0x0000000104057819 */ /* 0x000fe20000010205 */ /*0270*/ ULDC.64 UR4, c[0x2][0x0] ; /* 0x0080000000047ab9 */ /* 0x000fe40000000a00 */ /*0280*/ UIADD3.X UR7, UR9, -0x1, URZ, UP0, !UPT ; /* 0xffffffff09077890 */ /* 0x000fe200087fe43f */ /*0290*/ IADD3 R10, P0, P1, R0, 0x1, R3 ; /* 0x00000001000a7810 */ /* 0x000fe2000791e003 */ /*02a0*/ UIMAD.WIDE.U32 UR4, UR6, UR8, UR4 ; /* 0x00000008060472a5 */ /* 0x000fc4000f8e0004 */ /*02b0*/ UIMAD UR7, UR7, UR8, URZ ; /* 0x00000008070772a4 */ /* 0x000fe2000f8e023f */ /*02c0*/ IADD3.X R11, R2, RZ, R5, P0, P1 ; /* 0x000000ff020b7210 */ /* 0x000fc600007e2405 */ /*02d0*/ UIMAD UR7, UR6, UR9, UR7 ; /* 0x00000009060772a4 */ /* 0x000fe2000f8e0207 */ /*02e0*/ ISETP.GE.U32.AND P0, PT, R10, UR4, PT ; /* 0x000000040a007c0c */ /* 0x000fc6000bf06070 */ /*02f0*/ UIADD3 UR5, UR5, UR7, URZ ; /* 0x0000000705057290 */ /* 0x000fcc000fffe03f */ /*0300*/ ISETP.GE.U32.AND.EX P0, PT, R11, UR5, PT, P0 ; /* 0x000000050b007c0c */ /* 0x000fda000bf06100 */ /*0310*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0320*/ IADD3 R3, P0, R3, R0, RZ ; /* 0x0000000003037210 */ /* 0x000fe20007f1e0ff */ /*0330*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0340*/ IADD3 R0, P1, R10.reuse, -c[0x0][0x170], RZ ; /* 0x80005c000a007a10 */ /* 0x040fe40007f3e0ff */ /*0350*/ IADD3 R7, P2, R10, c[0x0][0x170], RZ ; /* 0x00005c000a077a10 */ /* 0x000fe20007f5e0ff */ /*0360*/ IMAD.X R6, R5, 0x1, R2, P0 ; /* 0x0000000105067824 */ /* 0x000fe200000e0602 */ /*0370*/ LEA R2, P0, R3, c[0x0][0x160], 0x2 ; /* 0x0000580003027a11 */ /* 0x000fe400078010ff */ /*0380*/ IADD3.X R5, R11, ~c[0x0][0x174], RZ, P1, !PT ; /* 0x80005d000b057a10 */ /* 0x000fe40000ffe4ff */ /*0390*/ LEA R4, P1, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000047a11 */ /* 0x000fc400078210ff */ /*03a0*/ LEA.HI.X R3, R3, c[0x0][0x164], R6, 0x2, P0 ; /* 0x0000590003037a11 */ /* 0x000fe400000f1406 */ /*03b0*/ LEA.HI.X R5, R0, c[0x0][0x164], R5, 0x2, P1 ; /* 0x0000590000057a11 */ /* 0x000fe400008f1405 */ /*03c0*/ IADD3.X R8, R11, c[0x0][0x174], RZ, P2, !PT ; /* 0x00005d000b087a10 */ /* 0x000fe200017fe4ff */ /*03d0*/ LDG.E R0, [R2.64+0x8] ; /* 0x0000080402007981 */ /* 0x000ea2000c1e1900 */ /*03e0*/ LEA R6, P0, R7, c[0x0][0x160], 0x2 ; /* 0x0000580007067a11 */ /* 0x000fc600078010ff */ /*03f0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */ /* 0x000ea2000c1e1900 */ /*0400*/ LEA.HI.X R7, R7, c[0x0][0x164], R8, 0x2, P0 ; /* 0x0000590007077a11 */ /* 0x000fc600000f1408 */ /*0410*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ee8000c1e1900 */ /*0420*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000f22000c1e1900 */ /*0430*/ LEA R8, P0, R10, c[0x0][0x168], 0x2 ; /* 0x00005a000a087a11 */ /* 0x000fe200078010ff */ /*0440*/ FADD R0, R0, R9 ; /* 0x0000000900007221 */ /* 0x004fc80000000000 */ /*0450*/ FADD R0, R0, R5 ; /* 0x0000000500007221 */ /* 0x008fe20000000000 */ /*0460*/ LEA.HI.X R9, R10, c[0x0][0x16c], R11, 0x2, P0 ; /* 0x00005b000a097a11 */ /* 0x000fc600000f140b */ /*0470*/ FADD R0, R0, R7 ; /* 0x0000000700007221 */ /* 0x010fc80000000000 */ /*0480*/ FMUL R11, R0, 0.25 ; /* 0x3e800000000b7820 */ /* 0x000fca0000400000 */ /*0490*/ STG.E [R8.64], R11 ; /* 0x0000000b08007986 */ /* 0x000fe2000c101904 */ /*04a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*04b0*/ I2F.U64.RP R5, UR4 ; /* 0x0000000400057d12 */ /* 0x000e300008309000 */ /*04c0*/ MUFU.RCP R5, R5 ; /* 0x0000000500057308 */ /* 0x001e240000001000 */ /*04d0*/ IADD3 R6, R5, 0x1ffffffe, RZ ; /* 0x1ffffffe05067810 */ /* 0x001fcc0007ffe0ff */ /*04e0*/ F2I.U64.TRUNC R6, R6 ; /* 0x0000000600067311 */ /* 0x000e24000020d800 */ /*04f0*/ IMAD.WIDE.U32 R8, R6, UR4, RZ ; /* 0x0000000406087c25 */ /* 0x001fc8000f8e00ff */ /*0500*/ IMAD R9, R6, UR5, R9 ; /* 0x0000000506097c24 */ /* 0x000fe2000f8e0209 */ /*0510*/ IADD3 R11, P0, RZ, -R8, RZ ; /* 0x80000008ff0b7210 */ /* 0x000fc60007f1e0ff */ /*0520*/ IMAD R9, R7, UR4, R9 ; /* 0x0000000407097c24 */ /* 0x000fe4000f8e0209 */ /*0530*/ IMAD.HI.U32 R8, R6, R11, RZ ; /* 0x0000000b06087227 */ /* 0x000fc800078e00ff */ /*0540*/ IMAD.X R13, RZ, RZ, ~R9, P0 ; /* 0x000000ffff0d7224 */ /* 0x000fe400000e0e09 */ /*0550*/ IMAD.MOV.U32 R9, RZ, RZ, R6 ; /* 0x000000ffff097224 */ /* 0x000fe400078e0006 */ /*0560*/ IMAD R15, R7, R13.reuse, RZ ; /* 0x0000000d070f7224 */ /* 0x080fe400078e02ff */ /*0570*/ IMAD.WIDE.U32 R8, P0, R6, R13, R8 ; /* 0x0000000d06087225 */ /* 0x000fc80007800008 */ /*0580*/ IMAD.HI.U32 R5, R7, R13, RZ ; /* 0x0000000d07057227 */ /* 0x000fc800078e00ff */ /*0590*/ IMAD.HI.U32 R8, P1, R7, R11, R8 ; /* 0x0000000b07087227 */ /* 0x000fc80007820008 */ /*05a0*/ IMAD.X R5, R5, 0x1, R7, P0 ; /* 0x0000000105057824 */ /* 0x000fe200000e0607 */ /*05b0*/ IADD3 R9, P2, R15, R8, RZ ; /* 0x000000080f097210 */ /* 0x000fc80007f5e0ff */ /*05c0*/ IADD3.X R5, RZ, RZ, R5, P2, P1 ; /* 0x000000ffff057210 */ /* 0x000fe200017e2405 */ /*05d0*/ IMAD.WIDE.U32 R6, R9, UR4, RZ ; /* 0x0000000409067c25 */ /* 0x000fc8000f8e00ff */ /*05e0*/ IMAD R8, R9, UR5, R7 ; /* 0x0000000509087c24 */ /* 0x000fe2000f8e0207 */ /*05f0*/ IADD3 R7, P0, RZ, -R6, RZ ; /* 0x80000006ff077210 */ /* 0x000fc60007f1e0ff */ /*0600*/ IMAD R6, R5, UR4, R8 ; /* 0x0000000405067c24 */ /* 0x000fe4000f8e0208 */ /*0610*/ IMAD.HI.U32 R8, R9, R7, RZ ; /* 0x0000000709087227 */ /* 0x000fc800078e00ff */ /*0620*/ IMAD.X R6, RZ, RZ, ~R6, P0 ; /* 0x000000ffff067224 */ /* 0x000fc800000e0e06 */ /*0630*/ IMAD.WIDE.U32 R8, P0, R9, R6, R8 ; /* 0x0000000609087225 */ /* 0x000fc80007800008 */ /*0640*/ IMAD R11, R5.reuse, R6, RZ ; /* 0x00000006050b7224 */ /* 0x040fe400078e02ff */ /*0650*/ IMAD.HI.U32 R8, P1, R5, R7, R8 ; /* 0x0000000705087227 */ /* 0x000fc80007820008 */ /*0660*/ IMAD.HI.U32 R10, R5, R6, RZ ; /* 0x00000006050a7227 */ /* 0x000fe200078e00ff */ /*0670*/ IADD3 R8, P2, R11, R8, RZ ; /* 0x000000080b087210 */ /* 0x000fc60007f5e0ff */ /*0680*/ IMAD.X R5, R10, 0x1, R5, P0 ; /* 0x000000010a057824 */ /* 0x000fe400000e0605 */ /*0690*/ IMAD.HI.U32 R6, R8, R3, RZ ; /* 0x0000000308067227 */ /* 0x000fc600078e00ff */ /*06a0*/ IADD3.X R5, RZ, RZ, R5, P2, P1 ; /* 0x000000ffff057210 */ /* 0x000fe200017e2405 */ /*06b0*/ IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff077224 */ /* 0x000fc800078e00ff */ /*06c0*/ IMAD.WIDE.U32 R6, RZ, R8, R6 ; /* 0x00000008ff067225 */ /* 0x000fcc00078e0006 */ /*06d0*/ IMAD.HI.U32 R5, P0, R5, R3, R6 ; /* 0x0000000305057227 */ /* 0x000fca0007800006 */ /*06e0*/ IADD3 R9, P1, RZ, R5, RZ ; /* 0x00000005ff097210 */ /* 0x000fe20007f3e0ff */ /*06f0*/ IMAD.X R5, RZ, RZ, RZ, P0 ; /* 0x000000ffff057224 */ /* 0x000fc800000e06ff */ /*0700*/ IMAD.WIDE.U32 R6, R9, UR4, RZ ; /* 0x0000000409067c25 */ /* 0x000fc8000f8e00ff */ /*0710*/ IMAD.X R5, RZ, RZ, R5, P1 ; /* 0x000000ffff057224 */ /* 0x000fe200008e0605 */ /*0720*/ IADD3 R10, P1, -R6, R3, RZ ; /* 0x00000003060a7210 */ /* 0x000fe20007f3e1ff */ /*0730*/ IMAD R8, R9, UR5, R7 ; /* 0x0000000509087c24 */ /* 0x000fc6000f8e0207 */ /*0740*/ ISETP.GE.U32.AND P0, PT, R10.reuse, UR4, PT ; /* 0x000000040a007c0c */ /* 0x040fe2000bf06070 */ /*0750*/ IMAD R8, R5, UR4, R8 ; /* 0x0000000405087c24 */ /* 0x000fe2000f8e0208 */ /*0760*/ IADD3 R3, P2, R10, -UR4, RZ ; /* 0x800000040a037c10 */ /* 0x000fc6000ff5e0ff */ /*0770*/ IMAD.X R12, RZ, RZ, ~R8, P1 ; /* 0x000000ffff0c7224 */ /* 0x000fe200008e0e08 */ /*0780*/ IADD3 R8, P1, R9, 0x1, RZ ; /* 0x0000000109087810 */ /* 0x000fc80007f3e0ff */ /*0790*/ ISETP.GE.U32.AND.EX P0, PT, R12.reuse, UR5, PT, P0 ; /* 0x000000050c007c0c */ /* 0x040fe2000bf06100 */ /*07a0*/ IMAD.X R6, RZ, RZ, R5, P1 ; /* 0x000000ffff067224 */ /* 0x000fe200008e0605 */ /*07b0*/ IADD3.X R7, R12, ~UR5, RZ, P2, !PT ; /* 0x800000050c077c10 */ /* 0x000fe400097fe4ff */ /*07c0*/ SEL R3, R3, R10, P0 ; /* 0x0000000a03037207 */ /* 0x000fe40000000000 */ /*07d0*/ SEL R8, R8, R9, P0 ; /* 0x0000000908087207 */ /* 0x000fe40000000000 */ /*07e0*/ SEL R7, R7, R12, P0 ; /* 0x0000000c07077207 */ /* 0x000fe40000000000 */ /*07f0*/ SEL R5, R6, R5, P0 ; /* 0x0000000506057207 */ /* 0x000fc40000000000 */ /*0800*/ ISETP.GE.U32.AND P0, PT, R3, UR4, PT ; /* 0x0000000403007c0c */ /* 0x000fe4000bf06070 */ /*0810*/ IADD3 R3, P2, R8, 0x1, RZ ; /* 0x0000000108037810 */ /* 0x000fe40007f5e0ff */ /*0820*/ ISETP.GE.U32.AND.EX P0, PT, R7, UR5, PT, P0 ; /* 0x0000000507007c0c */ /* 0x000fe4000bf06100 */ /*0830*/ ISETP.NE.U32.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fe2000bf25070 */ /*0840*/ IMAD.X R6, RZ, RZ, R5, P2 ; /* 0x000000ffff067224 */ /* 0x000fe200010e0605 */ /*0850*/ SEL R3, R3, R8, P0 ; /* 0x0000000803037207 */ /* 0x000fe40000000000 */ /*0860*/ ISETP.NE.AND.EX P1, PT, RZ, UR5, PT, P1 ; /* 0x00000005ff007c0c */ /* 0x000fc4000bf25310 */ /*0870*/ SEL R6, R6, R5, P0 ; /* 0x0000000506067207 */ /* 0x000fe20000000000 */ /*0880*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */ /* 0x000fe200078e00ff */ /*0890*/ SEL R3, R3, 0xffffffff, P1 ; /* 0xffffffff03037807 */ /* 0x000fe40000800000 */ /*08a0*/ SEL R6, R6, 0xffffffff, P1 ; /* 0xffffffff06067807 */ /* 0x000fe20000800000 */ /*08b0*/ RET.REL.NODEC R4 0x0 ; /* 0xfffff74004007950 */ /* 0x000fec0003c3ffff */ /*08c0*/ BRA 0x8c0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*08d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0900*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0910*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0920*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0930*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0940*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0950*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0960*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0970*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <cuda.h> #define ITERATION 500 #define BLKSIZE 512 typedef unsigned long long bint; float * allocate(bint n){ bint size = (n+1)*(n+1); float *m = (float *)calloc(size, sizeof(float)); bint i; for (i=0; i<n+1; i++){ m[i] = 80; m[size-1-i] = 80; m[(n+1)*i] = 80; m[(n+1)*(i+1)-1] = 80; m[i] = (i >=10 && i<=30) ? 150 : m[i]; } return m; } float avg(float *m, bint dim){ bint size = dim*dim; float sum = 0; bint i; for (i=0; i<size; i++){ sum += m[i]; //if (i % dim==0) // printf("\n"); //printf("%f ", m[i]); } //printf("\n"); return sum/size; } __global__ void simulateKernel(float *s, float *d, bint dim){ //dim is one side length of matrix bint tid = threadIdx.x + blockIdx.x * blockDim.x; bint i = tid + dim + 1 + 2*(tid/(dim-2)); if (i < dim*(dim-1)-1) d[i] = (s[i-1] + s[i+1] + s[i-dim] + s[i+dim]) / 4; } int main(int argc, char *argv[]){ if (argc < 2){ printf("Please indicate matrix size.\n"); exit(0); } bint n = atoi(argv[1]); float *m = allocate(n); //float mean = avg(m, n+1); //printf("%f===>",mean); // allocation and copy to DEVICE float * a, *b; bint mem = (n+1)*(n+1)*sizeof(float); cudaMalloc((void **)&a, mem); cudaMalloc((void **)&b, mem); cudaMemcpy(a, m, mem, cudaMemcpyHostToDevice); cudaMemcpy(b, m, mem, cudaMemcpyHostToDevice); // call kernel function bint gridSize = ((n+1)*(n+1) % BLKSIZE == 0)? (n+1)*(n+1)/BLKSIZE : (n+1)*(n+1)/BLKSIZE+1; int i; for (i=0; i<ITERATION/2; i++){ simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1); simulateKernel<<<gridSize, BLKSIZE>>>(b, a, n+1); } if (ITERATION%2 !=0){ simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1); cudaMemcpy(m, b, mem, cudaMemcpyDeviceToHost); } else{ cudaMemcpy(m, a, mem, cudaMemcpyDeviceToHost); } //mean = avg(m, n+1); //printf("%f\n", mean); free(m); cudaFree(a); cudaFree(b); return 0; }
.file "tmpxft_00094fb9_00000000-6_cuda.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z8allocatey .type _Z8allocatey, @function _Z8allocatey: .LFB2057: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movq %rdi, %rbp leaq 1(%rdi), %rbx movq %rbx, %r12 imulq %rbx, %r12 movl $4, %esi movq %r12, %rdi call calloc@PLT testq %rbx, %rbx je .L3 leaq -4(%rax,%r12,4), %rsi leaq 0(,%rbx,4), %r8 movq %rax, %rcx leaq -9(%rbp), %rdi movq $-10, %rdx movss .LC0(%rip), %xmm0 movaps %xmm0, %xmm2 jmp .L6 .L5: movss %xmm1, 40(%rax,%rdx,4) subq $4, %rsi addq %r8, %rcx addq $1, %rdx cmpq %rdi, %rdx je .L3 .L6: movss %xmm0, (%rsi) movss %xmm0, (%rcx) movss %xmm0, -4(%rcx,%rbx,4) movaps %xmm2, %xmm1 cmpq $20, %rdx ja .L5 movss .LC1(%rip), %xmm1 jmp .L5 .L3: popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z8allocatey, .-_Z8allocatey .globl _Z3avgPfy .type _Z3avgPfy, @function _Z3avgPfy: .LFB2058: .cfi_startproc endbr64 imulq %rsi, %rsi testq %rsi, %rsi je .L15 movq %rdi, %rax leaq (%rdi,%rsi,4), %rdx pxor %xmm0, %xmm0 .L12: addss (%rax), %xmm0 addq $4, %rax cmpq %rdx, %rax jne .L12 .L11: testq %rsi, %rsi js .L13 pxor %xmm1, %xmm1 cvtsi2ssq %rsi, %xmm1 .L14: divss %xmm1, %xmm0 ret .L15: pxor %xmm0, %xmm0 jmp .L11 .L13: movq %rsi, %rax shrq %rax andl $1, %esi orq %rsi, %rax pxor %xmm1, %xmm1 cvtsi2ssq %rax, %xmm1 addss %xmm1, %xmm1 jmp .L14 .cfi_endproc .LFE2058: .size _Z3avgPfy, .-_Z3avgPfy .globl _Z37__device_stub__Z14simulateKernelPfS_yPfS_y .type _Z37__device_stub__Z14simulateKernelPfS_yPfS_y, @function _Z37__device_stub__Z14simulateKernelPfS_yPfS_y: .LFB2084: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L21 .L17: movq 120(%rsp), %rax subq %fs:40, %rax jne .L22 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L21: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14simulateKernelPfS_y(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L17 .L22: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z37__device_stub__Z14simulateKernelPfS_yPfS_y, .-_Z37__device_stub__Z14simulateKernelPfS_yPfS_y .globl _Z14simulateKernelPfS_y .type _Z14simulateKernelPfS_y, @function _Z14simulateKernelPfS_y: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z14simulateKernelPfS_yPfS_y addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z14simulateKernelPfS_y, .-_Z14simulateKernelPfS_y .section .rodata.str1.1,"aMS",@progbits,1 .LC3: .string "Please indicate matrix size.\n" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $48, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax cmpl $1, %edi jle .L35 movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movslq %eax, %r12 movq %r12, %rdi call _Z8allocatey movq %rax, %r13 addq $1, %r12 movq %r12, %rbp imulq %r12, %rbp leaq 0(,%rbp,4), %r14 movq %rsp, %rdi movq %r14, %rsi call cudaMalloc@PLT leaq 8(%rsp), %rdi movq %r14, %rsi call cudaMalloc@PLT movl $1, %ecx movq %r14, %rdx movq %r13, %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %r14, %rdx movq %r13, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT testl $511, %ebp jne .L27 shrq $9, %rbp .L28: movl $250, %ebx jmp .L31 .L35: leaq .LC3(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $0, %edi call exit@PLT .L27: shrq $9, %rbp addq $1, %rbp jmp .L28 .L37: movq %r12, %rdx movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z37__device_stub__Z14simulateKernelPfS_yPfS_y jmp .L29 .L30: subl $1, %ebx je .L36 .L31: movl $512, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl %ebp, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L37 .L29: movl $512, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl %ebp, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L30 movq %r12, %rdx movq (%rsp), %rsi movq 8(%rsp), %rdi call _Z37__device_stub__Z14simulateKernelPfS_yPfS_y jmp .L30 .L36: movl $2, %ecx movq %r14, %rdx movq (%rsp), %rsi movq %r13, %rdi call cudaMemcpy@PLT movq %r13, %rdi call free@PLT movq (%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L38 movl $0, %eax addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L38: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1 .LC4: .string "_Z14simulateKernelPfS_y" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z14simulateKernelPfS_y(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1117782016 .align 4 .LC1: .long 1125515264 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <cuda.h> #define ITERATION 500 #define BLKSIZE 512 typedef unsigned long long bint; float * allocate(bint n){ bint size = (n+1)*(n+1); float *m = (float *)calloc(size, sizeof(float)); bint i; for (i=0; i<n+1; i++){ m[i] = 80; m[size-1-i] = 80; m[(n+1)*i] = 80; m[(n+1)*(i+1)-1] = 80; m[i] = (i >=10 && i<=30) ? 150 : m[i]; } return m; } float avg(float *m, bint dim){ bint size = dim*dim; float sum = 0; bint i; for (i=0; i<size; i++){ sum += m[i]; //if (i % dim==0) // printf("\n"); //printf("%f ", m[i]); } //printf("\n"); return sum/size; } __global__ void simulateKernel(float *s, float *d, bint dim){ //dim is one side length of matrix bint tid = threadIdx.x + blockIdx.x * blockDim.x; bint i = tid + dim + 1 + 2*(tid/(dim-2)); if (i < dim*(dim-1)-1) d[i] = (s[i-1] + s[i+1] + s[i-dim] + s[i+dim]) / 4; } int main(int argc, char *argv[]){ if (argc < 2){ printf("Please indicate matrix size.\n"); exit(0); } bint n = atoi(argv[1]); float *m = allocate(n); //float mean = avg(m, n+1); //printf("%f===>",mean); // allocation and copy to DEVICE float * a, *b; bint mem = (n+1)*(n+1)*sizeof(float); cudaMalloc((void **)&a, mem); cudaMalloc((void **)&b, mem); cudaMemcpy(a, m, mem, cudaMemcpyHostToDevice); cudaMemcpy(b, m, mem, cudaMemcpyHostToDevice); // call kernel function bint gridSize = ((n+1)*(n+1) % BLKSIZE == 0)? (n+1)*(n+1)/BLKSIZE : (n+1)*(n+1)/BLKSIZE+1; int i; for (i=0; i<ITERATION/2; i++){ simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1); simulateKernel<<<gridSize, BLKSIZE>>>(b, a, n+1); } if (ITERATION%2 !=0){ simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1); cudaMemcpy(m, b, mem, cudaMemcpyDeviceToHost); } else{ cudaMemcpy(m, a, mem, cudaMemcpyDeviceToHost); } //mean = avg(m, n+1); //printf("%f\n", mean); free(m); cudaFree(a); cudaFree(b); return 0; }
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <hip/hip_runtime.h> #define ITERATION 500 #define BLKSIZE 512 typedef unsigned long long bint; float * allocate(bint n){ bint size = (n+1)*(n+1); float *m = (float *)calloc(size, sizeof(float)); bint i; for (i=0; i<n+1; i++){ m[i] = 80; m[size-1-i] = 80; m[(n+1)*i] = 80; m[(n+1)*(i+1)-1] = 80; m[i] = (i >=10 && i<=30) ? 150 : m[i]; } return m; } float avg(float *m, bint dim){ bint size = dim*dim; float sum = 0; bint i; for (i=0; i<size; i++){ sum += m[i]; //if (i % dim==0) // printf("\n"); //printf("%f ", m[i]); } //printf("\n"); return sum/size; } __global__ void simulateKernel(float *s, float *d, bint dim){ //dim is one side length of matrix bint tid = threadIdx.x + blockIdx.x * blockDim.x; bint i = tid + dim + 1 + 2*(tid/(dim-2)); if (i < dim*(dim-1)-1) d[i] = (s[i-1] + s[i+1] + s[i-dim] + s[i+dim]) / 4; } int main(int argc, char *argv[]){ if (argc < 2){ printf("Please indicate matrix size.\n"); exit(0); } bint n = atoi(argv[1]); float *m = allocate(n); //float mean = avg(m, n+1); //printf("%f===>",mean); // allocation and copy to DEVICE float * a, *b; bint mem = (n+1)*(n+1)*sizeof(float); hipMalloc((void **)&a, mem); hipMalloc((void **)&b, mem); hipMemcpy(a, m, mem, hipMemcpyHostToDevice); hipMemcpy(b, m, mem, hipMemcpyHostToDevice); // call kernel function bint gridSize = ((n+1)*(n+1) % BLKSIZE == 0)? (n+1)*(n+1)/BLKSIZE : (n+1)*(n+1)/BLKSIZE+1; int i; for (i=0; i<ITERATION/2; i++){ simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1); simulateKernel<<<gridSize, BLKSIZE>>>(b, a, n+1); } if (ITERATION%2 !=0){ simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1); hipMemcpy(m, b, mem, hipMemcpyDeviceToHost); } else{ hipMemcpy(m, a, mem, hipMemcpyDeviceToHost); } //mean = avg(m, n+1); //printf("%f\n", mean); free(m); hipFree(a); hipFree(b); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <hip/hip_runtime.h> #define ITERATION 500 #define BLKSIZE 512 typedef unsigned long long bint; float * allocate(bint n){ bint size = (n+1)*(n+1); float *m = (float *)calloc(size, sizeof(float)); bint i; for (i=0; i<n+1; i++){ m[i] = 80; m[size-1-i] = 80; m[(n+1)*i] = 80; m[(n+1)*(i+1)-1] = 80; m[i] = (i >=10 && i<=30) ? 150 : m[i]; } return m; } float avg(float *m, bint dim){ bint size = dim*dim; float sum = 0; bint i; for (i=0; i<size; i++){ sum += m[i]; //if (i % dim==0) // printf("\n"); //printf("%f ", m[i]); } //printf("\n"); return sum/size; } __global__ void simulateKernel(float *s, float *d, bint dim){ //dim is one side length of matrix bint tid = threadIdx.x + blockIdx.x * blockDim.x; bint i = tid + dim + 1 + 2*(tid/(dim-2)); if (i < dim*(dim-1)-1) d[i] = (s[i-1] + s[i+1] + s[i-dim] + s[i+dim]) / 4; } int main(int argc, char *argv[]){ if (argc < 2){ printf("Please indicate matrix size.\n"); exit(0); } bint n = atoi(argv[1]); float *m = allocate(n); //float mean = avg(m, n+1); //printf("%f===>",mean); // allocation and copy to DEVICE float * a, *b; bint mem = (n+1)*(n+1)*sizeof(float); hipMalloc((void **)&a, mem); hipMalloc((void **)&b, mem); hipMemcpy(a, m, mem, hipMemcpyHostToDevice); hipMemcpy(b, m, mem, hipMemcpyHostToDevice); // call kernel function bint gridSize = ((n+1)*(n+1) % BLKSIZE == 0)? (n+1)*(n+1)/BLKSIZE : (n+1)*(n+1)/BLKSIZE+1; int i; for (i=0; i<ITERATION/2; i++){ simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1); simulateKernel<<<gridSize, BLKSIZE>>>(b, a, n+1); } if (ITERATION%2 !=0){ simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1); hipMemcpy(m, b, mem, hipMemcpyDeviceToHost); } else{ hipMemcpy(m, a, mem, hipMemcpyDeviceToHost); } //mean = avg(m, n+1); //printf("%f\n", mean); free(m); hipFree(a); hipFree(b); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z14simulateKernelPfS_y .globl _Z14simulateKernelPfS_y .p2align 8 .type _Z14simulateKernelPfS_y,@function _Z14simulateKernelPfS_y: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b64 s[4:5], s[0:1], 0x10 v_mov_b32_e32 v3, 0 v_mov_b32_e32 v4, 0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] v_mov_b32_e32 v2, 0 s_add_u32 s2, s4, -2 s_addc_u32 s3, s5, -1 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_cmp_le_u64_e32 vcc_lo, s[2:3], v[1:2] s_and_saveexec_b32 s3, vcc_lo s_cbranch_execz .LBB0_2 v_cvt_f32_u32_e32 v0, s2 s_sub_i32 s6, 0, s2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_rcp_iflag_f32_e32 v0, v0 s_waitcnt_depctr 0xfff v_mul_f32_e32 v0, 0x4f7ffffe, v0 v_cvt_u32_f32_e32 v0, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v3, s6, v0 v_mul_hi_u32 v3, v0, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v0, v0, v3 v_mul_hi_u32 v0, v1, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mul_lo_u32 v3, v0, s2 v_add_nc_u32_e32 v4, 1, v0 v_sub_nc_u32_e32 v3, v1, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v5, s2, v3 v_cmp_le_u32_e32 vcc_lo, s2, v3 v_dual_cndmask_b32 v3, v3, v5 :: v_dual_cndmask_b32 v0, v0, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_le_u32_e32 vcc_lo, s2, v3 v_add_nc_u32_e32 v4, 1, v0 s_delay_alu instid0(VALU_DEP_1) v_dual_cndmask_b32 v3, v0, v4 :: v_dual_mov_b32 v4, v2 .LBB0_2: s_or_b32 exec_lo, exec_lo, s3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2) v_lshlrev_b64 v[3:4], 1, v[3:4] v_add_co_u32 v0, vcc_lo, v1, s4 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v2, vcc_lo s_add_u32 s2, s4, -1 v_add_co_u32 v2, vcc_lo, v0, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v6, vcc_lo, v1, v4, vcc_lo s_mul_i32 s3, s2, s5 s_mul_hi_u32 s6, s2, s4 s_addc_u32 s7, s5, -1 v_add_co_u32 v5, vcc_lo, v2, 1 s_add_i32 s3, s6, s3 s_mul_i32 s7, s7, s4 v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo s_mul_i32 s2, s2, s4 s_add_i32 s3, s3, s7 s_add_u32 s2, s2, -1 s_addc_u32 s3, s3, -1 s_delay_alu instid0(SALU_CYCLE_1) v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[5:6] s_and_saveexec_b32 s2, vcc_lo s_cbranch_execz .LBB0_4 s_load_b128 s[0:3], s[0:1], 0x0 v_sub_co_u32 v7, vcc_lo, v5, s4 v_lshlrev_b64 v[0:1], 2, v[0:1] v_subrev_co_ci_u32_e32 v8, vcc_lo, s5, v6, vcc_lo v_lshlrev_b64 v[2:3], 2, v[3:4] v_lshlrev_b64 v[4:5], 2, v[5:6] s_lshl_b64 s[4:5], s[4:5], 2 s_delay_alu instid0(VALU_DEP_3) v_lshlrev_b64 v[6:7], 2, v[7:8] s_waitcnt lgkmcnt(0) v_add_co_u32 v8, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v9, vcc_lo, s1, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v4 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v5, vcc_lo s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v2, vcc_lo, v8, v2 v_add_co_ci_u32_e32 v3, vcc_lo, v9, v3, vcc_lo v_add_co_u32 v6, vcc_lo, s0, v6 v_add_co_ci_u32_e32 v7, vcc_lo, s1, v7, vcc_lo s_clause 0x1 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[0:1], off offset:4 v_add_co_u32 v0, vcc_lo, v0, s4 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo s_clause 0x1 global_load_b32 v6, v[6:7], off global_load_b32 v0, v[0:1], off s_waitcnt vmcnt(2) v_add_f32_e32 v1, v2, v3 s_waitcnt vmcnt(1) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_add_f32_e32 v1, v1, v6 s_waitcnt vmcnt(0) v_add_f32_e32 v0, v1, v0 s_delay_alu instid0(VALU_DEP_1) v_mul_f32_e32 v2, 0x3e800000, v0 v_add_co_u32 v0, vcc_lo, s2, v4 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v5, vcc_lo global_store_b32 v[0:1], v2, off .LBB0_4: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14simulateKernelPfS_y .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 10 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z14simulateKernelPfS_y, .Lfunc_end0-_Z14simulateKernelPfS_y .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14simulateKernelPfS_y .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14simulateKernelPfS_y.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 10 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <hip/hip_runtime.h> #define ITERATION 500 #define BLKSIZE 512 typedef unsigned long long bint; float * allocate(bint n){ bint size = (n+1)*(n+1); float *m = (float *)calloc(size, sizeof(float)); bint i; for (i=0; i<n+1; i++){ m[i] = 80; m[size-1-i] = 80; m[(n+1)*i] = 80; m[(n+1)*(i+1)-1] = 80; m[i] = (i >=10 && i<=30) ? 150 : m[i]; } return m; } float avg(float *m, bint dim){ bint size = dim*dim; float sum = 0; bint i; for (i=0; i<size; i++){ sum += m[i]; //if (i % dim==0) // printf("\n"); //printf("%f ", m[i]); } //printf("\n"); return sum/size; } __global__ void simulateKernel(float *s, float *d, bint dim){ //dim is one side length of matrix bint tid = threadIdx.x + blockIdx.x * blockDim.x; bint i = tid + dim + 1 + 2*(tid/(dim-2)); if (i < dim*(dim-1)-1) d[i] = (s[i-1] + s[i+1] + s[i-dim] + s[i+dim]) / 4; } int main(int argc, char *argv[]){ if (argc < 2){ printf("Please indicate matrix size.\n"); exit(0); } bint n = atoi(argv[1]); float *m = allocate(n); //float mean = avg(m, n+1); //printf("%f===>",mean); // allocation and copy to DEVICE float * a, *b; bint mem = (n+1)*(n+1)*sizeof(float); hipMalloc((void **)&a, mem); hipMalloc((void **)&b, mem); hipMemcpy(a, m, mem, hipMemcpyHostToDevice); hipMemcpy(b, m, mem, hipMemcpyHostToDevice); // call kernel function bint gridSize = ((n+1)*(n+1) % BLKSIZE == 0)? (n+1)*(n+1)/BLKSIZE : (n+1)*(n+1)/BLKSIZE+1; int i; for (i=0; i<ITERATION/2; i++){ simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1); simulateKernel<<<gridSize, BLKSIZE>>>(b, a, n+1); } if (ITERATION%2 !=0){ simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1); hipMemcpy(m, b, mem, hipMemcpyDeviceToHost); } else{ hipMemcpy(m, a, mem, hipMemcpyDeviceToHost); } //mean = avg(m, n+1); //printf("%f\n", mean); free(m); hipFree(a); hipFree(b); return 0; }
.text .file "cuda.hip" .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z8allocatey .LCPI0_0: .long 0x43160000 # float 150 .text .globl _Z8allocatey .p2align 4, 0x90 .type _Z8allocatey,@function _Z8allocatey: # @_Z8allocatey .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movq %rdi, %rbx leaq 1(%rdi), %r14 imulq %r14, %r14 movl $4, %esi movq %r14, %rdi callq calloc movq %rbx, %rcx incq %rcx jne .LBB0_1 .LBB0_5: # %._crit_edge addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .LBB0_1: # %.lr.ph .cfi_def_cfa_offset 32 movq %rbx, %rcx negq %rcx leaq 4(,%rbx,4), %rdx shlq $2, %rbx decq %r14 movq $-10, %rsi movss .LCPI0_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero movq %rax, %rdi jmp .LBB0_2 .p2align 4, 0x90 .LBB0_4: # in Loop: Header=BB0_2 Depth=1 movss %xmm1, 40(%rax,%rsi,4) leaq (%rcx,%rsi), %r8 incq %r8 incq %rsi addq %rdx, %rdi decq %r14 cmpq $-9, %r8 je .LBB0_5 .LBB0_2: # =>This Inner Loop Header: Depth=1 movl $1117782016, 40(%rax,%rsi,4) # imm = 0x42A00000 movl $1117782016, (%rax,%r14,4) # imm = 0x42A00000 movl $1117782016, (%rdi) # imm = 0x42A00000 movl $1117782016, (%rdi,%rbx) # imm = 0x42A00000 movaps %xmm0, %xmm1 cmpq $21, %rsi jb .LBB0_4 # %bb.3: # in Loop: Header=BB0_2 Depth=1 movss 40(%rax,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero jmp .LBB0_4 .Lfunc_end0: .size _Z8allocatey, .Lfunc_end0-_Z8allocatey .cfi_endproc # -- End function .globl _Z3avgPfy # -- Begin function _Z3avgPfy .p2align 4, 0x90 .type _Z3avgPfy,@function _Z3avgPfy: # @_Z3avgPfy .cfi_startproc # %bb.0: imulq %rsi, %rsi xorps %xmm0, %xmm0 testq %rsi, %rsi je .LBB1_3 # %bb.1: # %.lr.ph.preheader xorl %eax, %eax .p2align 4, 0x90 .LBB1_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 addss (%rdi,%rax,4), %xmm0 incq %rax cmpq %rax, %rsi jne .LBB1_2 .LBB1_3: # %._crit_edge testq %rsi, %rsi js .LBB1_4 # %bb.5: # %._crit_edge cvtsi2ss %rsi, %xmm1 divss %xmm1, %xmm0 retq .LBB1_4: movq %rsi, %rax shrq %rax andl $1, %esi orq %rax, %rsi cvtsi2ss %rsi, %xmm1 addss %xmm1, %xmm1 divss %xmm1, %xmm0 retq .Lfunc_end1: .size _Z3avgPfy, .Lfunc_end1-_Z3avgPfy .cfi_endproc # -- End function .globl _Z29__device_stub__simulateKernelPfS_y # -- Begin function _Z29__device_stub__simulateKernelPfS_y .p2align 4, 0x90 .type _Z29__device_stub__simulateKernelPfS_y,@function _Z29__device_stub__simulateKernelPfS_y: # @_Z29__device_stub__simulateKernelPfS_y .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14simulateKernelPfS_y, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size _Z29__device_stub__simulateKernelPfS_y, .Lfunc_end2-_Z29__device_stub__simulateKernelPfS_y .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI3_0: .long 0x43160000 # float 150 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $136, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 cmpl $1, %edi jle .LBB3_13 # %bb.1: movq 8(%rsi), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movslq %eax, %r15 leaq 1(%r15), %r12 imulq %r12, %r12 movl $4, %esi movq %r12, %rdi callq calloc movq %rax, %rbx movq %r15, %r14 incq %r14 jne .LBB3_2 .LBB3_6: # %_Z8allocatey.exit movabsq $4294967808, %r15 # imm = 0x100000200 leaq (,%r12,4), %r13 leaq 8(%rsp), %rdi movq %r13, %rsi callq hipMalloc leaq 16(%rsp), %rdi movq %r13, %rsi callq hipMalloc movq 8(%rsp), %rdi movq %rbx, %rsi movq %r13, %rdx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movq %rbx, %rsi movq %r13, 128(%rsp) # 8-byte Spill movq %r13, %rdx movl $1, %ecx callq hipMemcpy movl %r12d, %eax andl $509, %eax # imm = 0x1FD shrq $9, %r12 cmpq $1, %rax sbbl $-1, %r12d addq %r15, %r12 addq $-512, %r12 # imm = 0xFE00 movl $250, %r13d leaq 96(%rsp), %rbp jmp .LBB3_7 .p2align 4, 0x90 .LBB3_11: # in Loop: Header=BB3_7 Depth=1 decl %r13d je .LBB3_12 .LBB3_7: # =>This Inner Loop Header: Depth=1 movq %r12, %rdi movl $1, %esi movq %r15, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_9 # %bb.8: # in Loop: Header=BB3_7 Depth=1 movq 8(%rsp), %rax movq 16(%rsp), %rcx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %r14, 72(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d movl $_Z14simulateKernelPfS_y, %edi movq %rbp, %r9 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_9: # in Loop: Header=BB3_7 Depth=1 movq %r12, %rdi movl $1, %esi movq %r15, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_11 # %bb.10: # in Loop: Header=BB3_7 Depth=1 movq 16(%rsp), %rax movq 8(%rsp), %rcx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %r14, 72(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d movl $_Z14simulateKernelPfS_y, %edi movq %rbp, %r9 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 jmp .LBB3_11 .LBB3_12: movq 8(%rsp), %rsi movq %rbx, %rdi movq 128(%rsp), %rdx # 8-byte Reload movl $2, %ecx callq hipMemcpy movq %rbx, %rdi callq free movq 8(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree xorl %eax, %eax addq $136, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB3_2: # %.lr.ph.i .cfi_def_cfa_offset 192 movq %r15, %rax negq %rax leaq 4(,%r15,4), %rcx shlq $2, %r15 movq %r14, %rdx imulq %r14, %rdx leaq (%rbx,%rdx,4), %rdx addq $-4, %rdx movq $-10, %rsi movss .LCPI3_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero movq %rbx, %rdi jmp .LBB3_3 .p2align 4, 0x90 .LBB3_5: # in Loop: Header=BB3_3 Depth=1 movss %xmm1, 40(%rbx,%rsi,4) leaq (%rax,%rsi), %r8 incq %r8 incq %rsi addq %rcx, %rdi addq $-4, %rdx cmpq $-9, %r8 je .LBB3_6 .LBB3_3: # =>This Inner Loop Header: Depth=1 movl $1117782016, 40(%rbx,%rsi,4) # imm = 0x42A00000 movl $1117782016, (%rdx) # imm = 0x42A00000 movl $1117782016, (%rdi) # imm = 0x42A00000 movl $1117782016, (%rdi,%r15) # imm = 0x42A00000 movaps %xmm0, %xmm1 cmpq $21, %rsi jb .LBB3_5 # %bb.4: # in Loop: Header=BB3_3 Depth=1 movss 40(%rbx,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero jmp .LBB3_5 .LBB3_13: movl $.Lstr, %edi callq puts@PLT xorl %edi, %edi callq exit .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14simulateKernelPfS_y, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z14simulateKernelPfS_y,@object # @_Z14simulateKernelPfS_y .section .rodata,"a",@progbits .globl _Z14simulateKernelPfS_y .p2align 3, 0x0 _Z14simulateKernelPfS_y: .quad _Z29__device_stub__simulateKernelPfS_y .size _Z14simulateKernelPfS_y, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z14simulateKernelPfS_y" .size .L__unnamed_1, 24 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Please indicate matrix size." .size .Lstr, 29 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z29__device_stub__simulateKernelPfS_y .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z14simulateKernelPfS_y .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z14simulateKernelPfS_y .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e220000002500 */ /*0020*/ ULDC.64 UR4, c[0x0][0x170] ; /* 0x00005c0000047ab9 */ /* 0x000fe40000000a00 */ /*0030*/ UIADD3 UR4, UP0, UR4, -0x2, URZ ; /* 0xfffffffe04047890 */ /* 0x000fe2000ff1e03f */ /*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e260000002100 */ /*0050*/ UIADD3.X UR5, UR5, -0x1, URZ, UP0, !UPT ; /* 0xffffffff05057890 */ /* 0x000fcc00087fe43f */ /*0060*/ ISETP.NE.U32.AND P0, PT, RZ, UR5, PT ; /* 0x00000005ff007c0c */ /* 0x000fe2000bf05070 */ /*0070*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */ /* 0x001fca00078e0200 */ /*0080*/ IADD3 R0, P1, R3, c[0x0][0x170], RZ ; /* 0x00005c0003007a10 */ /* 0x000fca0007f3e0ff */ /*0090*/ IMAD.X R2, RZ, RZ, c[0x0][0x174], P1 ; /* 0x00005d00ff027624 */ /* 0x000fe400008e06ff */ /*00a0*/ @!P0 BRA 0x100 ; /* 0x0000005000008947 */ /* 0x000fea0003800000 */ /*00b0*/ MOV R4, 0xd0 ; /* 0x000000d000047802 */ /* 0x000fe40000000f00 */ /*00c0*/ CALL.REL.NOINC 0x4b0 ; /* 0x000003e000007944 */ /* 0x000fea0003c00000 */ /*00d0*/ IMAD.MOV.U32 R4, RZ, RZ, R3 ; /* 0x000000ffff047224 */ /* 0x000fe400078e0003 */ /*00e0*/ IMAD.MOV.U32 R5, RZ, RZ, R6 ; /* 0x000000ffff057224 */ /* 0x000fe200078e0006 */ /*00f0*/ BRA 0x230 ; /* 0x0000013000007947 */ /* 0x000fea0003800000 */ /*0100*/ I2F.U32.RP R6, UR4 ; /* 0x0000000400067d06 */ /* 0x000e220008209000 */ /*0110*/ ISETP.NE.U32.AND P2, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fce000bf45070 */ /*0120*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */ /* 0x001e240000001000 */ /*0130*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */ /* 0x001fcc0007ffe0ff */ /*0140*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */ /* 0x000064000021f000 */ /*0150*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */ /* 0x001fe400078e00ff */ /*0160*/ IMAD.MOV R7, RZ, RZ, -R5 ; /* 0x000000ffff077224 */ /* 0x002fc800078e0a05 */ /*0170*/ IMAD R7, R7, UR4, RZ ; /* 0x0000000407077c24 */ /* 0x000fc8000f8e02ff */ /*0180*/ IMAD.HI.U32 R8, R5, R7, R4 ; /* 0x0000000705087227 */ /* 0x000fc800078e0004 */ /*0190*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */ /* 0x000fe400078e00ff */ /*01a0*/ IMAD.HI.U32 R4, R8, R3, RZ ; /* 0x0000000308047227 */ /* 0x000fc800078e00ff */ /*01b0*/ IMAD.MOV R8, RZ, RZ, -R4 ; /* 0x000000ffff087224 */ /* 0x000fc800078e0a04 */ /*01c0*/ IMAD R3, R8, UR4, R3 ; /* 0x0000000408037c24 */ /* 0x000fca000f8e0203 */ /*01d0*/ ISETP.GE.U32.AND P0, PT, R3, UR4, PT ; /* 0x0000000403007c0c */ /* 0x000fda000bf06070 */ /*01e0*/ @P0 IADD3 R3, R3, -UR4, RZ ; /* 0x8000000403030c10 */ /* 0x000fe4000fffe0ff */ /*01f0*/ @P0 IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104040810 */ /* 0x000fe40007ffe0ff */ /*0200*/ ISETP.GE.U32.AND P1, PT, R3, UR4, PT ; /* 0x0000000403007c0c */ /* 0x000fda000bf26070 */ /*0210*/ @P1 IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104041810 */ /* 0x000fe40007ffe0ff */ /*0220*/ @!P2 LOP3.LUT R4, RZ, UR4, RZ, 0x33, !PT ; /* 0x00000004ff04ac12 */ /* 0x000fc8000f8e33ff */ /*0230*/ ULDC.64 UR8, c[0x0][0x170] ; /* 0x00005c0000087ab9 */ /* 0x000fe20000000a00 */ /*0240*/ IMAD.SHL.U32 R3, R4.reuse, 0x2, RZ ; /* 0x0000000204037824 */ /* 0x040fe200078e00ff */ /*0250*/ UIADD3 UR6, UP0, UR8, -0x1, URZ ; /* 0xffffffff08067890 */ /* 0x000fe2000ff1e03f */ /*0260*/ SHF.L.U64.HI R5, R4, 0x1, R5 ; /* 0x0000000104057819 */ /* 0x000fe20000010205 */ /*0270*/ ULDC.64 UR4, c[0x2][0x0] ; /* 0x0080000000047ab9 */ /* 0x000fe40000000a00 */ /*0280*/ UIADD3.X UR7, UR9, -0x1, URZ, UP0, !UPT ; /* 0xffffffff09077890 */ /* 0x000fe200087fe43f */ /*0290*/ IADD3 R10, P0, P1, R0, 0x1, R3 ; /* 0x00000001000a7810 */ /* 0x000fe2000791e003 */ /*02a0*/ UIMAD.WIDE.U32 UR4, UR6, UR8, UR4 ; /* 0x00000008060472a5 */ /* 0x000fc4000f8e0004 */ /*02b0*/ UIMAD UR7, UR7, UR8, URZ ; /* 0x00000008070772a4 */ /* 0x000fe2000f8e023f */ /*02c0*/ IADD3.X R11, R2, RZ, R5, P0, P1 ; /* 0x000000ff020b7210 */ /* 0x000fc600007e2405 */ /*02d0*/ UIMAD UR7, UR6, UR9, UR7 ; /* 0x00000009060772a4 */ /* 0x000fe2000f8e0207 */ /*02e0*/ ISETP.GE.U32.AND P0, PT, R10, UR4, PT ; /* 0x000000040a007c0c */ /* 0x000fc6000bf06070 */ /*02f0*/ UIADD3 UR5, UR5, UR7, URZ ; /* 0x0000000705057290 */ /* 0x000fcc000fffe03f */ /*0300*/ ISETP.GE.U32.AND.EX P0, PT, R11, UR5, PT, P0 ; /* 0x000000050b007c0c */ /* 0x000fda000bf06100 */ /*0310*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0320*/ IADD3 R3, P0, R3, R0, RZ ; /* 0x0000000003037210 */ /* 0x000fe20007f1e0ff */ /*0330*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0340*/ IADD3 R0, P1, R10.reuse, -c[0x0][0x170], RZ ; /* 0x80005c000a007a10 */ /* 0x040fe40007f3e0ff */ /*0350*/ IADD3 R7, P2, R10, c[0x0][0x170], RZ ; /* 0x00005c000a077a10 */ /* 0x000fe20007f5e0ff */ /*0360*/ IMAD.X R6, R5, 0x1, R2, P0 ; /* 0x0000000105067824 */ /* 0x000fe200000e0602 */ /*0370*/ LEA R2, P0, R3, c[0x0][0x160], 0x2 ; /* 0x0000580003027a11 */ /* 0x000fe400078010ff */ /*0380*/ IADD3.X R5, R11, ~c[0x0][0x174], RZ, P1, !PT ; /* 0x80005d000b057a10 */ /* 0x000fe40000ffe4ff */ /*0390*/ LEA R4, P1, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000047a11 */ /* 0x000fc400078210ff */ /*03a0*/ LEA.HI.X R3, R3, c[0x0][0x164], R6, 0x2, P0 ; /* 0x0000590003037a11 */ /* 0x000fe400000f1406 */ /*03b0*/ LEA.HI.X R5, R0, c[0x0][0x164], R5, 0x2, P1 ; /* 0x0000590000057a11 */ /* 0x000fe400008f1405 */ /*03c0*/ IADD3.X R8, R11, c[0x0][0x174], RZ, P2, !PT ; /* 0x00005d000b087a10 */ /* 0x000fe200017fe4ff */ /*03d0*/ LDG.E R0, [R2.64+0x8] ; /* 0x0000080402007981 */ /* 0x000ea2000c1e1900 */ /*03e0*/ LEA R6, P0, R7, c[0x0][0x160], 0x2 ; /* 0x0000580007067a11 */ /* 0x000fc600078010ff */ /*03f0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */ /* 0x000ea2000c1e1900 */ /*0400*/ LEA.HI.X R7, R7, c[0x0][0x164], R8, 0x2, P0 ; /* 0x0000590007077a11 */ /* 0x000fc600000f1408 */ /*0410*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ee8000c1e1900 */ /*0420*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000f22000c1e1900 */ /*0430*/ LEA R8, P0, R10, c[0x0][0x168], 0x2 ; /* 0x00005a000a087a11 */ /* 0x000fe200078010ff */ /*0440*/ FADD R0, R0, R9 ; /* 0x0000000900007221 */ /* 0x004fc80000000000 */ /*0450*/ FADD R0, R0, R5 ; /* 0x0000000500007221 */ /* 0x008fe20000000000 */ /*0460*/ LEA.HI.X R9, R10, c[0x0][0x16c], R11, 0x2, P0 ; /* 0x00005b000a097a11 */ /* 0x000fc600000f140b */ /*0470*/ FADD R0, R0, R7 ; /* 0x0000000700007221 */ /* 0x010fc80000000000 */ /*0480*/ FMUL R11, R0, 0.25 ; /* 0x3e800000000b7820 */ /* 0x000fca0000400000 */ /*0490*/ STG.E [R8.64], R11 ; /* 0x0000000b08007986 */ /* 0x000fe2000c101904 */ /*04a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*04b0*/ I2F.U64.RP R5, UR4 ; /* 0x0000000400057d12 */ /* 0x000e300008309000 */ /*04c0*/ MUFU.RCP R5, R5 ; /* 0x0000000500057308 */ /* 0x001e240000001000 */ /*04d0*/ IADD3 R6, R5, 0x1ffffffe, RZ ; /* 0x1ffffffe05067810 */ /* 0x001fcc0007ffe0ff */ /*04e0*/ F2I.U64.TRUNC R6, R6 ; /* 0x0000000600067311 */ /* 0x000e24000020d800 */ /*04f0*/ IMAD.WIDE.U32 R8, R6, UR4, RZ ; /* 0x0000000406087c25 */ /* 0x001fc8000f8e00ff */ /*0500*/ IMAD R9, R6, UR5, R9 ; /* 0x0000000506097c24 */ /* 0x000fe2000f8e0209 */ /*0510*/ IADD3 R11, P0, RZ, -R8, RZ ; /* 0x80000008ff0b7210 */ /* 0x000fc60007f1e0ff */ /*0520*/ IMAD R9, R7, UR4, R9 ; /* 0x0000000407097c24 */ /* 0x000fe4000f8e0209 */ /*0530*/ IMAD.HI.U32 R8, R6, R11, RZ ; /* 0x0000000b06087227 */ /* 0x000fc800078e00ff */ /*0540*/ IMAD.X R13, RZ, RZ, ~R9, P0 ; /* 0x000000ffff0d7224 */ /* 0x000fe400000e0e09 */ /*0550*/ IMAD.MOV.U32 R9, RZ, RZ, R6 ; /* 0x000000ffff097224 */ /* 0x000fe400078e0006 */ /*0560*/ IMAD R15, R7, R13.reuse, RZ ; /* 0x0000000d070f7224 */ /* 0x080fe400078e02ff */ /*0570*/ IMAD.WIDE.U32 R8, P0, R6, R13, R8 ; /* 0x0000000d06087225 */ /* 0x000fc80007800008 */ /*0580*/ IMAD.HI.U32 R5, R7, R13, RZ ; /* 0x0000000d07057227 */ /* 0x000fc800078e00ff */ /*0590*/ IMAD.HI.U32 R8, P1, R7, R11, R8 ; /* 0x0000000b07087227 */ /* 0x000fc80007820008 */ /*05a0*/ IMAD.X R5, R5, 0x1, R7, P0 ; /* 0x0000000105057824 */ /* 0x000fe200000e0607 */ /*05b0*/ IADD3 R9, P2, R15, R8, RZ ; /* 0x000000080f097210 */ /* 0x000fc80007f5e0ff */ /*05c0*/ IADD3.X R5, RZ, RZ, R5, P2, P1 ; /* 0x000000ffff057210 */ /* 0x000fe200017e2405 */ /*05d0*/ IMAD.WIDE.U32 R6, R9, UR4, RZ ; /* 0x0000000409067c25 */ /* 0x000fc8000f8e00ff */ /*05e0*/ IMAD R8, R9, UR5, R7 ; /* 0x0000000509087c24 */ /* 0x000fe2000f8e0207 */ /*05f0*/ IADD3 R7, P0, RZ, -R6, RZ ; /* 0x80000006ff077210 */ /* 0x000fc60007f1e0ff */ /*0600*/ IMAD R6, R5, UR4, R8 ; /* 0x0000000405067c24 */ /* 0x000fe4000f8e0208 */ /*0610*/ IMAD.HI.U32 R8, R9, R7, RZ ; /* 0x0000000709087227 */ /* 0x000fc800078e00ff */ /*0620*/ IMAD.X R6, RZ, RZ, ~R6, P0 ; /* 0x000000ffff067224 */ /* 0x000fc800000e0e06 */ /*0630*/ IMAD.WIDE.U32 R8, P0, R9, R6, R8 ; /* 0x0000000609087225 */ /* 0x000fc80007800008 */ /*0640*/ IMAD R11, R5.reuse, R6, RZ ; /* 0x00000006050b7224 */ /* 0x040fe400078e02ff */ /*0650*/ IMAD.HI.U32 R8, P1, R5, R7, R8 ; /* 0x0000000705087227 */ /* 0x000fc80007820008 */ /*0660*/ IMAD.HI.U32 R10, R5, R6, RZ ; /* 0x00000006050a7227 */ /* 0x000fe200078e00ff */ /*0670*/ IADD3 R8, P2, R11, R8, RZ ; /* 0x000000080b087210 */ /* 0x000fc60007f5e0ff */ /*0680*/ IMAD.X R5, R10, 0x1, R5, P0 ; /* 0x000000010a057824 */ /* 0x000fe400000e0605 */ /*0690*/ IMAD.HI.U32 R6, R8, R3, RZ ; /* 0x0000000308067227 */ /* 0x000fc600078e00ff */ /*06a0*/ IADD3.X R5, RZ, RZ, R5, P2, P1 ; /* 0x000000ffff057210 */ /* 0x000fe200017e2405 */ /*06b0*/ IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff077224 */ /* 0x000fc800078e00ff */ /*06c0*/ IMAD.WIDE.U32 R6, RZ, R8, R6 ; /* 0x00000008ff067225 */ /* 0x000fcc00078e0006 */ /*06d0*/ IMAD.HI.U32 R5, P0, R5, R3, R6 ; /* 0x0000000305057227 */ /* 0x000fca0007800006 */ /*06e0*/ IADD3 R9, P1, RZ, R5, RZ ; /* 0x00000005ff097210 */ /* 0x000fe20007f3e0ff */ /*06f0*/ IMAD.X R5, RZ, RZ, RZ, P0 ; /* 0x000000ffff057224 */ /* 0x000fc800000e06ff */ /*0700*/ IMAD.WIDE.U32 R6, R9, UR4, RZ ; /* 0x0000000409067c25 */ /* 0x000fc8000f8e00ff */ /*0710*/ IMAD.X R5, RZ, RZ, R5, P1 ; /* 0x000000ffff057224 */ /* 0x000fe200008e0605 */ /*0720*/ IADD3 R10, P1, -R6, R3, RZ ; /* 0x00000003060a7210 */ /* 0x000fe20007f3e1ff */ /*0730*/ IMAD R8, R9, UR5, R7 ; /* 0x0000000509087c24 */ /* 0x000fc6000f8e0207 */ /*0740*/ ISETP.GE.U32.AND P0, PT, R10.reuse, UR4, PT ; /* 0x000000040a007c0c */ /* 0x040fe2000bf06070 */ /*0750*/ IMAD R8, R5, UR4, R8 ; /* 0x0000000405087c24 */ /* 0x000fe2000f8e0208 */ /*0760*/ IADD3 R3, P2, R10, -UR4, RZ ; /* 0x800000040a037c10 */ /* 0x000fc6000ff5e0ff */ /*0770*/ IMAD.X R12, RZ, RZ, ~R8, P1 ; /* 0x000000ffff0c7224 */ /* 0x000fe200008e0e08 */ /*0780*/ IADD3 R8, P1, R9, 0x1, RZ ; /* 0x0000000109087810 */ /* 0x000fc80007f3e0ff */ /*0790*/ ISETP.GE.U32.AND.EX P0, PT, R12.reuse, UR5, PT, P0 ; /* 0x000000050c007c0c */ /* 0x040fe2000bf06100 */ /*07a0*/ IMAD.X R6, RZ, RZ, R5, P1 ; /* 0x000000ffff067224 */ /* 0x000fe200008e0605 */ /*07b0*/ IADD3.X R7, R12, ~UR5, RZ, P2, !PT ; /* 0x800000050c077c10 */ /* 0x000fe400097fe4ff */ /*07c0*/ SEL R3, R3, R10, P0 ; /* 0x0000000a03037207 */ /* 0x000fe40000000000 */ /*07d0*/ SEL R8, R8, R9, P0 ; /* 0x0000000908087207 */ /* 0x000fe40000000000 */ /*07e0*/ SEL R7, R7, R12, P0 ; /* 0x0000000c07077207 */ /* 0x000fe40000000000 */ /*07f0*/ SEL R5, R6, R5, P0 ; /* 0x0000000506057207 */ /* 0x000fc40000000000 */ /*0800*/ ISETP.GE.U32.AND P0, PT, R3, UR4, PT ; /* 0x0000000403007c0c */ /* 0x000fe4000bf06070 */ /*0810*/ IADD3 R3, P2, R8, 0x1, RZ ; /* 0x0000000108037810 */ /* 0x000fe40007f5e0ff */ /*0820*/ ISETP.GE.U32.AND.EX P0, PT, R7, UR5, PT, P0 ; /* 0x0000000507007c0c */ /* 0x000fe4000bf06100 */ /*0830*/ ISETP.NE.U32.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fe2000bf25070 */ /*0840*/ IMAD.X R6, RZ, RZ, R5, P2 ; /* 0x000000ffff067224 */ /* 0x000fe200010e0605 */ /*0850*/ SEL R3, R3, R8, P0 ; /* 0x0000000803037207 */ /* 0x000fe40000000000 */ /*0860*/ ISETP.NE.AND.EX P1, PT, RZ, UR5, PT, P1 ; /* 0x00000005ff007c0c */ /* 0x000fc4000bf25310 */ /*0870*/ SEL R6, R6, R5, P0 ; /* 0x0000000506067207 */ /* 0x000fe20000000000 */ /*0880*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */ /* 0x000fe200078e00ff */ /*0890*/ SEL R3, R3, 0xffffffff, P1 ; /* 0xffffffff03037807 */ /* 0x000fe40000800000 */ /*08a0*/ SEL R6, R6, 0xffffffff, P1 ; /* 0xffffffff06067807 */ /* 0x000fe20000800000 */ /*08b0*/ RET.REL.NODEC R4 0x0 ; /* 0xfffff74004007950 */ /* 0x000fec0003c3ffff */ /*08c0*/ BRA 0x8c0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*08d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0900*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0910*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0920*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0930*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0940*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0950*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0960*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0970*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z14simulateKernelPfS_y .globl _Z14simulateKernelPfS_y .p2align 8 .type _Z14simulateKernelPfS_y,@function _Z14simulateKernelPfS_y: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b64 s[4:5], s[0:1], 0x10 v_mov_b32_e32 v3, 0 v_mov_b32_e32 v4, 0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] v_mov_b32_e32 v2, 0 s_add_u32 s2, s4, -2 s_addc_u32 s3, s5, -1 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_cmp_le_u64_e32 vcc_lo, s[2:3], v[1:2] s_and_saveexec_b32 s3, vcc_lo s_cbranch_execz .LBB0_2 v_cvt_f32_u32_e32 v0, s2 s_sub_i32 s6, 0, s2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_rcp_iflag_f32_e32 v0, v0 s_waitcnt_depctr 0xfff v_mul_f32_e32 v0, 0x4f7ffffe, v0 v_cvt_u32_f32_e32 v0, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v3, s6, v0 v_mul_hi_u32 v3, v0, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v0, v0, v3 v_mul_hi_u32 v0, v1, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mul_lo_u32 v3, v0, s2 v_add_nc_u32_e32 v4, 1, v0 v_sub_nc_u32_e32 v3, v1, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v5, s2, v3 v_cmp_le_u32_e32 vcc_lo, s2, v3 v_dual_cndmask_b32 v3, v3, v5 :: v_dual_cndmask_b32 v0, v0, v4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_le_u32_e32 vcc_lo, s2, v3 v_add_nc_u32_e32 v4, 1, v0 s_delay_alu instid0(VALU_DEP_1) v_dual_cndmask_b32 v3, v0, v4 :: v_dual_mov_b32 v4, v2 .LBB0_2: s_or_b32 exec_lo, exec_lo, s3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2) v_lshlrev_b64 v[3:4], 1, v[3:4] v_add_co_u32 v0, vcc_lo, v1, s4 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v2, vcc_lo s_add_u32 s2, s4, -1 v_add_co_u32 v2, vcc_lo, v0, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v6, vcc_lo, v1, v4, vcc_lo s_mul_i32 s3, s2, s5 s_mul_hi_u32 s6, s2, s4 s_addc_u32 s7, s5, -1 v_add_co_u32 v5, vcc_lo, v2, 1 s_add_i32 s3, s6, s3 s_mul_i32 s7, s7, s4 v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo s_mul_i32 s2, s2, s4 s_add_i32 s3, s3, s7 s_add_u32 s2, s2, -1 s_addc_u32 s3, s3, -1 s_delay_alu instid0(SALU_CYCLE_1) v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[5:6] s_and_saveexec_b32 s2, vcc_lo s_cbranch_execz .LBB0_4 s_load_b128 s[0:3], s[0:1], 0x0 v_sub_co_u32 v7, vcc_lo, v5, s4 v_lshlrev_b64 v[0:1], 2, v[0:1] v_subrev_co_ci_u32_e32 v8, vcc_lo, s5, v6, vcc_lo v_lshlrev_b64 v[2:3], 2, v[3:4] v_lshlrev_b64 v[4:5], 2, v[5:6] s_lshl_b64 s[4:5], s[4:5], 2 s_delay_alu instid0(VALU_DEP_3) v_lshlrev_b64 v[6:7], 2, v[7:8] s_waitcnt lgkmcnt(0) v_add_co_u32 v8, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v9, vcc_lo, s1, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v4 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v5, vcc_lo s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v2, vcc_lo, v8, v2 v_add_co_ci_u32_e32 v3, vcc_lo, v9, v3, vcc_lo v_add_co_u32 v6, vcc_lo, s0, v6 v_add_co_ci_u32_e32 v7, vcc_lo, s1, v7, vcc_lo s_clause 0x1 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[0:1], off offset:4 v_add_co_u32 v0, vcc_lo, v0, s4 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo s_clause 0x1 global_load_b32 v6, v[6:7], off global_load_b32 v0, v[0:1], off s_waitcnt vmcnt(2) v_add_f32_e32 v1, v2, v3 s_waitcnt vmcnt(1) s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_add_f32_e32 v1, v1, v6 s_waitcnt vmcnt(0) v_add_f32_e32 v0, v1, v0 s_delay_alu instid0(VALU_DEP_1) v_mul_f32_e32 v2, 0x3e800000, v0 v_add_co_u32 v0, vcc_lo, s2, v4 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v5, vcc_lo global_store_b32 v[0:1], v2, off .LBB0_4: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14simulateKernelPfS_y .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 10 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z14simulateKernelPfS_y, .Lfunc_end0-_Z14simulateKernelPfS_y .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14simulateKernelPfS_y .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14simulateKernelPfS_y.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 10 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00094fb9_00000000-6_cuda.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z8allocatey .type _Z8allocatey, @function _Z8allocatey: .LFB2057: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movq %rdi, %rbp leaq 1(%rdi), %rbx movq %rbx, %r12 imulq %rbx, %r12 movl $4, %esi movq %r12, %rdi call calloc@PLT testq %rbx, %rbx je .L3 leaq -4(%rax,%r12,4), %rsi leaq 0(,%rbx,4), %r8 movq %rax, %rcx leaq -9(%rbp), %rdi movq $-10, %rdx movss .LC0(%rip), %xmm0 movaps %xmm0, %xmm2 jmp .L6 .L5: movss %xmm1, 40(%rax,%rdx,4) subq $4, %rsi addq %r8, %rcx addq $1, %rdx cmpq %rdi, %rdx je .L3 .L6: movss %xmm0, (%rsi) movss %xmm0, (%rcx) movss %xmm0, -4(%rcx,%rbx,4) movaps %xmm2, %xmm1 cmpq $20, %rdx ja .L5 movss .LC1(%rip), %xmm1 jmp .L5 .L3: popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z8allocatey, .-_Z8allocatey .globl _Z3avgPfy .type _Z3avgPfy, @function _Z3avgPfy: .LFB2058: .cfi_startproc endbr64 imulq %rsi, %rsi testq %rsi, %rsi je .L15 movq %rdi, %rax leaq (%rdi,%rsi,4), %rdx pxor %xmm0, %xmm0 .L12: addss (%rax), %xmm0 addq $4, %rax cmpq %rdx, %rax jne .L12 .L11: testq %rsi, %rsi js .L13 pxor %xmm1, %xmm1 cvtsi2ssq %rsi, %xmm1 .L14: divss %xmm1, %xmm0 ret .L15: pxor %xmm0, %xmm0 jmp .L11 .L13: movq %rsi, %rax shrq %rax andl $1, %esi orq %rsi, %rax pxor %xmm1, %xmm1 cvtsi2ssq %rax, %xmm1 addss %xmm1, %xmm1 jmp .L14 .cfi_endproc .LFE2058: .size _Z3avgPfy, .-_Z3avgPfy .globl _Z37__device_stub__Z14simulateKernelPfS_yPfS_y .type _Z37__device_stub__Z14simulateKernelPfS_yPfS_y, @function _Z37__device_stub__Z14simulateKernelPfS_yPfS_y: .LFB2084: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L21 .L17: movq 120(%rsp), %rax subq %fs:40, %rax jne .L22 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L21: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14simulateKernelPfS_y(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L17 .L22: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z37__device_stub__Z14simulateKernelPfS_yPfS_y, .-_Z37__device_stub__Z14simulateKernelPfS_yPfS_y .globl _Z14simulateKernelPfS_y .type _Z14simulateKernelPfS_y, @function _Z14simulateKernelPfS_y: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z14simulateKernelPfS_yPfS_y addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z14simulateKernelPfS_y, .-_Z14simulateKernelPfS_y .section .rodata.str1.1,"aMS",@progbits,1 .LC3: .string "Please indicate matrix size.\n" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $48, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax cmpl $1, %edi jle .L35 movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movslq %eax, %r12 movq %r12, %rdi call _Z8allocatey movq %rax, %r13 addq $1, %r12 movq %r12, %rbp imulq %r12, %rbp leaq 0(,%rbp,4), %r14 movq %rsp, %rdi movq %r14, %rsi call cudaMalloc@PLT leaq 8(%rsp), %rdi movq %r14, %rsi call cudaMalloc@PLT movl $1, %ecx movq %r14, %rdx movq %r13, %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %r14, %rdx movq %r13, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT testl $511, %ebp jne .L27 shrq $9, %rbp .L28: movl $250, %ebx jmp .L31 .L35: leaq .LC3(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $0, %edi call exit@PLT .L27: shrq $9, %rbp addq $1, %rbp jmp .L28 .L37: movq %r12, %rdx movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z37__device_stub__Z14simulateKernelPfS_yPfS_y jmp .L29 .L30: subl $1, %ebx je .L36 .L31: movl $512, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl %ebp, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L37 .L29: movl $512, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl %ebp, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L30 movq %r12, %rdx movq (%rsp), %rsi movq 8(%rsp), %rdi call _Z37__device_stub__Z14simulateKernelPfS_yPfS_y jmp .L30 .L36: movl $2, %ecx movq %r14, %rdx movq (%rsp), %rsi movq %r13, %rdi call cudaMemcpy@PLT movq %r13, %rdi call free@PLT movq (%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L38 movl $0, %eax addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L38: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1 .LC4: .string "_Z14simulateKernelPfS_y" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z14simulateKernelPfS_y(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1117782016 .align 4 .LC1: .long 1125515264 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "cuda.hip" .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z8allocatey .LCPI0_0: .long 0x43160000 # float 150 .text .globl _Z8allocatey .p2align 4, 0x90 .type _Z8allocatey,@function _Z8allocatey: # @_Z8allocatey .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movq %rdi, %rbx leaq 1(%rdi), %r14 imulq %r14, %r14 movl $4, %esi movq %r14, %rdi callq calloc movq %rbx, %rcx incq %rcx jne .LBB0_1 .LBB0_5: # %._crit_edge addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .LBB0_1: # %.lr.ph .cfi_def_cfa_offset 32 movq %rbx, %rcx negq %rcx leaq 4(,%rbx,4), %rdx shlq $2, %rbx decq %r14 movq $-10, %rsi movss .LCPI0_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero movq %rax, %rdi jmp .LBB0_2 .p2align 4, 0x90 .LBB0_4: # in Loop: Header=BB0_2 Depth=1 movss %xmm1, 40(%rax,%rsi,4) leaq (%rcx,%rsi), %r8 incq %r8 incq %rsi addq %rdx, %rdi decq %r14 cmpq $-9, %r8 je .LBB0_5 .LBB0_2: # =>This Inner Loop Header: Depth=1 movl $1117782016, 40(%rax,%rsi,4) # imm = 0x42A00000 movl $1117782016, (%rax,%r14,4) # imm = 0x42A00000 movl $1117782016, (%rdi) # imm = 0x42A00000 movl $1117782016, (%rdi,%rbx) # imm = 0x42A00000 movaps %xmm0, %xmm1 cmpq $21, %rsi jb .LBB0_4 # %bb.3: # in Loop: Header=BB0_2 Depth=1 movss 40(%rax,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero jmp .LBB0_4 .Lfunc_end0: .size _Z8allocatey, .Lfunc_end0-_Z8allocatey .cfi_endproc # -- End function .globl _Z3avgPfy # -- Begin function _Z3avgPfy .p2align 4, 0x90 .type _Z3avgPfy,@function _Z3avgPfy: # @_Z3avgPfy .cfi_startproc # %bb.0: imulq %rsi, %rsi xorps %xmm0, %xmm0 testq %rsi, %rsi je .LBB1_3 # %bb.1: # %.lr.ph.preheader xorl %eax, %eax .p2align 4, 0x90 .LBB1_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 addss (%rdi,%rax,4), %xmm0 incq %rax cmpq %rax, %rsi jne .LBB1_2 .LBB1_3: # %._crit_edge testq %rsi, %rsi js .LBB1_4 # %bb.5: # %._crit_edge cvtsi2ss %rsi, %xmm1 divss %xmm1, %xmm0 retq .LBB1_4: movq %rsi, %rax shrq %rax andl $1, %esi orq %rax, %rsi cvtsi2ss %rsi, %xmm1 addss %xmm1, %xmm1 divss %xmm1, %xmm0 retq .Lfunc_end1: .size _Z3avgPfy, .Lfunc_end1-_Z3avgPfy .cfi_endproc # -- End function .globl _Z29__device_stub__simulateKernelPfS_y # -- Begin function _Z29__device_stub__simulateKernelPfS_y .p2align 4, 0x90 .type _Z29__device_stub__simulateKernelPfS_y,@function _Z29__device_stub__simulateKernelPfS_y: # @_Z29__device_stub__simulateKernelPfS_y .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14simulateKernelPfS_y, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size _Z29__device_stub__simulateKernelPfS_y, .Lfunc_end2-_Z29__device_stub__simulateKernelPfS_y .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI3_0: .long 0x43160000 # float 150 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $136, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 cmpl $1, %edi jle .LBB3_13 # %bb.1: movq 8(%rsi), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movslq %eax, %r15 leaq 1(%r15), %r12 imulq %r12, %r12 movl $4, %esi movq %r12, %rdi callq calloc movq %rax, %rbx movq %r15, %r14 incq %r14 jne .LBB3_2 .LBB3_6: # %_Z8allocatey.exit movabsq $4294967808, %r15 # imm = 0x100000200 leaq (,%r12,4), %r13 leaq 8(%rsp), %rdi movq %r13, %rsi callq hipMalloc leaq 16(%rsp), %rdi movq %r13, %rsi callq hipMalloc movq 8(%rsp), %rdi movq %rbx, %rsi movq %r13, %rdx movl $1, %ecx callq hipMemcpy movq 16(%rsp), %rdi movq %rbx, %rsi movq %r13, 128(%rsp) # 8-byte Spill movq %r13, %rdx movl $1, %ecx callq hipMemcpy movl %r12d, %eax andl $509, %eax # imm = 0x1FD shrq $9, %r12 cmpq $1, %rax sbbl $-1, %r12d addq %r15, %r12 addq $-512, %r12 # imm = 0xFE00 movl $250, %r13d leaq 96(%rsp), %rbp jmp .LBB3_7 .p2align 4, 0x90 .LBB3_11: # in Loop: Header=BB3_7 Depth=1 decl %r13d je .LBB3_12 .LBB3_7: # =>This Inner Loop Header: Depth=1 movq %r12, %rdi movl $1, %esi movq %r15, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_9 # %bb.8: # in Loop: Header=BB3_7 Depth=1 movq 8(%rsp), %rax movq 16(%rsp), %rcx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %r14, 72(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d movl $_Z14simulateKernelPfS_y, %edi movq %rbp, %r9 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_9: # in Loop: Header=BB3_7 Depth=1 movq %r12, %rdi movl $1, %esi movq %r15, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_11 # %bb.10: # in Loop: Header=BB3_7 Depth=1 movq 16(%rsp), %rax movq 8(%rsp), %rcx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %r14, 72(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d movl $_Z14simulateKernelPfS_y, %edi movq %rbp, %r9 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 jmp .LBB3_11 .LBB3_12: movq 8(%rsp), %rsi movq %rbx, %rdi movq 128(%rsp), %rdx # 8-byte Reload movl $2, %ecx callq hipMemcpy movq %rbx, %rdi callq free movq 8(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree xorl %eax, %eax addq $136, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB3_2: # %.lr.ph.i .cfi_def_cfa_offset 192 movq %r15, %rax negq %rax leaq 4(,%r15,4), %rcx shlq $2, %r15 movq %r14, %rdx imulq %r14, %rdx leaq (%rbx,%rdx,4), %rdx addq $-4, %rdx movq $-10, %rsi movss .LCPI3_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero movq %rbx, %rdi jmp .LBB3_3 .p2align 4, 0x90 .LBB3_5: # in Loop: Header=BB3_3 Depth=1 movss %xmm1, 40(%rbx,%rsi,4) leaq (%rax,%rsi), %r8 incq %r8 incq %rsi addq %rcx, %rdi addq $-4, %rdx cmpq $-9, %r8 je .LBB3_6 .LBB3_3: # =>This Inner Loop Header: Depth=1 movl $1117782016, 40(%rbx,%rsi,4) # imm = 0x42A00000 movl $1117782016, (%rdx) # imm = 0x42A00000 movl $1117782016, (%rdi) # imm = 0x42A00000 movl $1117782016, (%rdi,%r15) # imm = 0x42A00000 movaps %xmm0, %xmm1 cmpq $21, %rsi jb .LBB3_5 # %bb.4: # in Loop: Header=BB3_3 Depth=1 movss 40(%rbx,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero jmp .LBB3_5 .LBB3_13: movl $.Lstr, %edi callq puts@PLT xorl %edi, %edi callq exit .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14simulateKernelPfS_y, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z14simulateKernelPfS_y,@object # @_Z14simulateKernelPfS_y .section .rodata,"a",@progbits .globl _Z14simulateKernelPfS_y .p2align 3, 0x0 _Z14simulateKernelPfS_y: .quad _Z29__device_stub__simulateKernelPfS_y .size _Z14simulateKernelPfS_y, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z14simulateKernelPfS_y" .size .L__unnamed_1, 24 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Please indicate matrix size." .size .Lstr, 29 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z29__device_stub__simulateKernelPfS_y .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z14simulateKernelPfS_y .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <vector_types.h> typedef unsigned char uint8; typedef unsigned int uint32; typedef int int32; #define COLOR_COMPONENT_MASK 0x3FF #define COLOR_COMPONENT_BIT_SIZE 10 __constant__ uint32 constAlpha; #define MUL(x,y) (x*y) __constant__ float HueColorSpaceAry[9]; __device__ void YUV2RGB(uint32 *yuv, float *red, float *green, float *blue) { float luma, chromaCb, chromaCr; // Prepare for hue adjustment luma = (float)yuv[0]; chromaCb = (float)((int32)yuv[1] - 512.0f); chromaCr = (float)((int32)yuv[2] - 512.0f); // Convert YUV To RGB with hue adjustment *red = MUL(luma, HueColorSpaceAry[0]) + MUL(chromaCb, HueColorSpaceAry[1]) + MUL(chromaCr, HueColorSpaceAry[2]); *green= MUL(luma, HueColorSpaceAry[3]) + MUL(chromaCb, HueColorSpaceAry[4]) + MUL(chromaCr, HueColorSpaceAry[5]); *blue = MUL(luma, HueColorSpaceAry[6]) + MUL(chromaCb, HueColorSpaceAry[7]) + MUL(chromaCr, HueColorSpaceAry[8]); } __device__ uint32 ARGB_10bit(float red, float green, float blue, uint32 alpha) { uint32 ARGBpixel = 0; // Clamp final 10 bit results red = min(max(red, 0.0f), 1023.f); green = min(max(green, 0.0f), 1023.f); blue = min(max(blue, 0.0f), 1023.f); // Convert to 8 bit unsigned integers per color component ARGBpixel = (((uint32)blue >> 2) | (((uint32)green >> 2) << 8) | (((uint32)red >> 2) << 16) | (uint32)alpha); return ARGBpixel; } // CUDA kernel for outputing the final ARGB output from NV12; extern "C" __global__ void NV12ToARGB(uint32 *srcImage, size_t nSourcePitch, uint32 *dstImage, size_t nDestPitch, uint32 width, uint32 height) { int32 x, y; uint32 yuv101010Pel[2]; uint32 processingPitch = ((width) + 63) & ~63; uint32 dstImagePitch = nDestPitch >> 2; uint8 *srcImageU8 = (uint8 *)srcImage; processingPitch = nSourcePitch; // Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1); y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width) return; if (y >= height) return; // Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way. // if we move to texture we could read 4 luminance values yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2; yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2; uint32 chromaOffset = processingPitch * height; int32 y_chroma = y >> 1; if (y & 1) // odd scanline ? { uint32 chromaCb; uint32 chromaCr; chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ]; chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1]; if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically { chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1; chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1; } yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } else { yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } // this steps performs the color conversion uint32 yuv[6]; float red[2], green[2], blue[2]; yuv[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK); yuv[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); yuv[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK); yuv[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); // YUV to RGB Transformation conversion YUV2RGB(&yuv[0], &red[0], &green[0], &blue[0]); YUV2RGB(&yuv[3], &red[1], &green[1], &blue[1]); // Clamp the results to RGBA dstImage[y * dstImagePitch + x ] = ARGB_10bit(red[0], green[0], blue[0], constAlpha); dstImage[y * dstImagePitch + x + 1 ] = ARGB_10bit(red[1], green[1], blue[1], constAlpha); }
code for sm_80 Function : NV12ToARGB .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e220000002100 */ /*0020*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe40000000800 */ /*0030*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */ /* 0x000fe2000800063f */ /*0040*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */ /* 0x000e680000002600 */ /*0050*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */ /* 0x000e680000002200 */ /*0060*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000ea20000002500 */ /*0070*/ IMAD.SHL.U32 R3, R3, 0x2, RZ ; /* 0x0000000203037824 */ /* 0x001fc400078e00ff */ /*0080*/ IMAD R0, R0, c[0x0][0x4], R5 ; /* 0x0000010000007a24 */ /* 0x002fe400078e0205 */ /*0090*/ IMAD R3, R2, UR4, R3 ; /* 0x0000000402037c24 */ /* 0x004fc6000f8e0203 */ /*00a0*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x184], PT ; /* 0x0000610000007a0c */ /* 0x000fc80003f06070 */ /*00b0*/ ISETP.GE.U32.OR P0, PT, R3, c[0x0][0x180], P0 ; /* 0x0000600003007a0c */ /* 0x000fda0000706470 */ /*00c0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00d0*/ SHF.R.S32.HI R14, RZ, 0x1, R0 ; /* 0x00000001ff0e7819 */ /* 0x000fe20000011400 */ /*00e0*/ IMAD R6, R0, c[0x0][0x168], R3.reuse ; /* 0x00005a0000067a24 */ /* 0x100fe200078e0203 */ /*00f0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fe20000000a00 */ /*0100*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff0d7624 */ /* 0x000fe400078e00ff */ /*0110*/ IMAD R2, R14, c[0x0][0x168], R3 ; /* 0x00005a000e027a24 */ /* 0x000fe200078e0203 */ /*0120*/ IADD3 R8, R6.reuse, 0x1, RZ ; /* 0x0000000106087810 */ /* 0x040fe40007ffe0ff */ /*0130*/ IADD3 R6, P0, R6, c[0x0][0x160], RZ ; /* 0x0000580006067a10 */ /* 0x000fe20007f1e0ff */ /*0140*/ IMAD R13, R13, c[0x0][0x184], R2 ; /* 0x000061000d0d7a24 */ /* 0x000fe200078e0202 */ /*0150*/ IADD3 R8, P1, R8, c[0x0][0x160], RZ ; /* 0x0000580008087a10 */ /* 0x000fc60007f3e0ff */ /*0160*/ IMAD.X R7, RZ, RZ, c[0x0][0x164], P0 ; /* 0x00005900ff077624 */ /* 0x000fe200000e06ff */ /*0170*/ IADD3 R10, P0, R13, c[0x0][0x160], RZ ; /* 0x000058000d0a7a10 */ /* 0x000fe20007f1e0ff */ /*0180*/ IMAD.X R9, RZ, RZ, c[0x0][0x164], P1 ; /* 0x00005900ff097624 */ /* 0x000fc600008e06ff */ /*0190*/ LDG.E.U8 R6, [R6.64] ; /* 0x0000000606067981 */ /* 0x000ea2000c1e1100 */ /*01a0*/ IMAD.X R11, RZ, RZ, c[0x0][0x164], P0 ; /* 0x00005900ff0b7624 */ /* 0x000fc600000e06ff */ /*01b0*/ LDG.E.U8 R8, [R8.64] ; /* 0x0000000608087981 */ /* 0x000ee8000c1e1100 */ /*01c0*/ LDG.E.U8 R11, [R10.64] ; /* 0x000000060a0b7981 */ /* 0x000162000c1e1100 */ /*01d0*/ LOP3.LUT R2, R0, 0x1, RZ, 0xc0, !PT ; /* 0x0000000100027812 */ /* 0x000fe200078ec0ff */ /*01e0*/ BSSY B0, 0x430 ; /* 0x0000024000007945 */ /* 0x000fe20003800000 */ /*01f0*/ IADD3 R4, R13, 0x1, RZ ; /* 0x000000010d047810 */ /* 0x000fe40007ffe0ff */ /*0200*/ ISETP.NE.U32.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */ /* 0x000fe40003f05070 */ /*0210*/ IADD3 R4, P1, R4, c[0x0][0x160], RZ ; /* 0x0000580004047a10 */ /* 0x000fca0007f3e0ff */ /*0220*/ IMAD.X R5, RZ, RZ, c[0x0][0x164], P1 ; /* 0x00005900ff057624 */ /* 0x000fe400008e06ff */ /*0230*/ IMAD.SHL.U32 R2, R6, 0x4, RZ ; /* 0x0000000406027824 */ /* 0x004fe400078e00ff */ /*0240*/ IMAD.SHL.U32 R12, R8, 0x4, RZ ; /* 0x00000004080c7824 */ /* 0x008fe400078e00ff */ /*0250*/ @P0 BRA 0x3c0 ; /* 0x0000016000000947 */ /* 0x000fea0003800000 */ /*0260*/ ULDC UR4, c[0x0][0x184] ; /* 0x0000610000047ab9 */ /* 0x001fe20000000800 */ /*0270*/ LDG.E.U8 R10, [R4.64] ; /* 0x00000006040a7981 */ /* 0x000ea2000c1e1100 */ /*0280*/ ULEA.HI UR4, UR4, 0xffffffff, URZ, 0x1f ; /* 0xffffffff04047891 */ /* 0x000fcc000f8ff83f */ /*0290*/ ISETP.GE.U32.AND P0, PT, R14, UR4, PT ; /* 0x000000040e007c0c */ /* 0x000fda000bf06070 */ /*02a0*/ @!P0 IADD3 R6, R13, c[0x0][0x168], RZ ; /* 0x00005a000d068a10 */ /* 0x000fc80007ffe0ff */ /*02b0*/ @!P0 IADD3 R8, R6.reuse, 0x1, RZ ; /* 0x0000000106088810 */ /* 0x040fe40007ffe0ff */ /*02c0*/ @!P0 IADD3 R6, P1, R6, c[0x0][0x160], RZ ; /* 0x0000580006068a10 */ /* 0x000fe40007f3e0ff */ /*02d0*/ @!P0 IADD3 R8, P2, R8, c[0x0][0x160], RZ ; /* 0x0000580008088a10 */ /* 0x000fc60007f5e0ff */ /*02e0*/ @!P0 IMAD.X R7, RZ, RZ, c[0x0][0x164], P1 ; /* 0x00005900ff078624 */ /* 0x000fe400008e06ff */ /*02f0*/ @!P0 IMAD.X R9, RZ, RZ, c[0x0][0x164], P2 ; /* 0x00005900ff098624 */ /* 0x000fc600010e06ff */ /*0300*/ @!P0 LDG.E.U8 R6, [R6.64] ; /* 0x0000000606068981 */ /* 0x000ee8000c1e1100 */ /*0310*/ @!P0 LDG.E.U8 R9, [R8.64] ; /* 0x0000000608098981 */ /* 0x000ea2000c1e1100 */ /*0320*/ @!P0 IADD3 R13, R11, 0x1, R6 ; /* 0x000000010b0d8810 */ /* 0x028fe40007ffe006 */ /*0330*/ @!P0 IADD3 R14, R10, 0x1, R9 ; /* 0x000000010a0e8810 */ /* 0x004fe40007ffe009 */ /*0340*/ @!P0 SHF.R.U32.HI R11, RZ, 0x1, R13 ; /* 0x00000001ff0b8819 */ /* 0x000fe4000001160d */ /*0350*/ @!P0 SHF.R.U32.HI R10, RZ, 0x1, R14 ; /* 0x00000001ff0a8819 */ /* 0x000fc6000001160e */ /*0360*/ IMAD R2, R11.reuse, 0x1000, R2 ; /* 0x000010000b027824 */ /* 0x040fe400078e0202 */ /*0370*/ IMAD.SHL.U32 R13, R10, 0x400000, RZ ; /* 0x004000000a0d7824 */ /* 0x000fe400078e00ff */ /*0380*/ IMAD R12, R11, 0x1000, R12 ; /* 0x000010000b0c7824 */ /* 0x000fc600078e020c */ /*0390*/ LOP3.LUT R10, R2, R13.reuse, RZ, 0xfc, !PT ; /* 0x0000000d020a7212 */ /* 0x080fe400078efcff */ /*03a0*/ LOP3.LUT R12, R12, R13, RZ, 0xfc, !PT ; /* 0x0000000d0c0c7212 */ /* 0x000fe200078efcff */ /*03b0*/ BRA 0x420 ; /* 0x0000006000007947 */ /* 0x000fea0003800000 */ /*03c0*/ LDG.E.U8 R7, [R4.64] ; /* 0x0000000604077981 */ /* 0x001ea2000c1e1100 */ /*03d0*/ IMAD R2, R11.reuse, 0x1000, R2 ; /* 0x000010000b027824 */ /* 0x060fe400078e0202 */ /*03e0*/ IMAD R12, R11, 0x1000, R12 ; /* 0x000010000b0c7824 */ /* 0x000fe400078e020c */ /*03f0*/ IMAD.SHL.U32 R7, R7, 0x400000, RZ ; /* 0x0040000007077824 */ /* 0x004fca00078e00ff */ /*0400*/ LOP3.LUT R10, R2, R7.reuse, RZ, 0xfc, !PT ; /* 0x00000007020a7212 */ /* 0x080fe400078efcff */ /*0410*/ LOP3.LUT R12, R12, R7, RZ, 0xfc, !PT ; /* 0x000000070c0c7212 */ /* 0x000fe400078efcff */ /*0420*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0430*/ SHF.R.U32.HI R4, RZ, 0xa, R10.reuse ; /* 0x0000000aff047819 */ /* 0x100fe2000001160a */ /*0440*/ ULDC.64 UR4, c[0x0][0x178] ; /* 0x00005e0000047ab9 */ /* 0x000fe20000000a00 */ /*0450*/ LOP3.LUT R2, R10, 0x3ff, RZ, 0xc0, !PT ; /* 0x000003ff0a027812 */ /* 0x000fe200078ec0ff */ /*0460*/ USHF.R.U64 UR4, UR4, 0x2, UR5 ; /* 0x0000000204047899 */ /* 0x000fe20008001205 */ /*0470*/ LOP3.LUT R4, R4, 0x3ff, RZ, 0xc0, !PT ; /* 0x000003ff04047812 */ /* 0x000fe400078ec0ff */ /*0480*/ SHF.R.U32.HI R10, RZ, 0x14, R10 ; /* 0x00000014ff0a7819 */ /* 0x000fe4000001160a */ /*0490*/ SHF.R.U32.HI R5, RZ, 0xa, R12.reuse ; /* 0x0000000aff057819 */ /* 0x100fe2000001160c */ /*04a0*/ I2F.U32 R2, R2 ; /* 0x0000000200027306 */ /* 0x000fe20000201000 */ /*04b0*/ LOP3.LUT R10, R10, 0x3ff, RZ, 0xc0, !PT ; /* 0x000003ff0a0a7812 */ /* 0x000fe200078ec0ff */ /*04c0*/ IMAD R3, R0, UR4, R3 ; /* 0x0000000400037c24 */ /* 0x000fe2000f8e0203 */ /*04d0*/ LOP3.LUT R6, R5, 0x3ff, RZ, 0xc0, !PT ; /* 0x000003ff05067812 */ /* 0x000fe200078ec0ff */ /*04e0*/ IMAD.MOV.U32 R0, RZ, RZ, 0x4 ; /* 0x00000004ff007424 */ /* 0x000fe200078e00ff */ /*04f0*/ SHF.R.U32.HI R5, RZ, 0x14, R12 ; /* 0x00000014ff057819 */ /* 0x000fc6000001160c */ /*0500*/ I2F R4, R4 ; /* 0x0000000400047306 */ /* 0x000e220000201400 */ /*0510*/ LOP3.LUT R7, R5, 0x3ff, RZ, 0xc0, !PT ; /* 0x000003ff05077812 */ /* 0x000fe400078ec0ff */ /*0520*/ LOP3.LUT R5, R12, 0x3ff, RZ, 0xc0, !PT ; /* 0x000003ff0c057812 */ /* 0x000fca00078ec0ff */ /*0530*/ I2F R10, R10 ; /* 0x0000000a000a7306 */ /* 0x000e700000201400 */ /*0540*/ I2F R6, R6 ; /* 0x0000000600067306 */ /* 0x000ea20000201400 */ /*0550*/ FADD R8, R4, -512 ; /* 0xc400000004087421 */ /* 0x001fc80000000000 */ /*0560*/ FMUL R9, R8, c[0x3][0x20] ; /* 0x00c0080008097a20 */ /* 0x000fc60000400000 */ /*0570*/ I2F R7, R7 ; /* 0x0000000700077306 */ /* 0x000e220000201400 */ /*0580*/ FFMA R11, R2, c[0x3][0x1c], R9 ; /* 0x00c00700020b7a23 */ /* 0x000fe40000000009 */ /*0590*/ FMUL R9, R8, c[0x3][0x14] ; /* 0x00c0050008097a20 */ /* 0x000fe40000400000 */ /*05a0*/ FADD R4, R10, -512 ; /* 0xc40000000a047421 */ /* 0x002fe40000000000 */ /*05b0*/ FFMA R9, R2, c[0x3][0x10], R9 ; /* 0x00c0040002097a23 */ /* 0x000fe20000000009 */ /*05c0*/ I2F.U32 R5, R5 ; /* 0x0000000500057306 */ /* 0x000e620000201000 */ /*05d0*/ FADD R10, R6, -512 ; /* 0xc4000000060a7421 */ /* 0x004fe40000000000 */ /*05e0*/ FFMA R6, R4, c[0x3][0x18], R9 ; /* 0x00c0060004067a23 */ /* 0x000fc40000000009 */ /*05f0*/ FMUL R12, R10, c[0x3][0x20] ; /* 0x00c008000a0c7a20 */ /* 0x000fe40000400000 */ /*0600*/ FMUL R9, R8, c[0x3][0x8] ; /* 0x00c0020008097a20 */ /* 0x000fe40000400000 */ /*0610*/ FMUL R8, R10, c[0x3][0x14] ; /* 0x00c005000a087a20 */ /* 0x000fe40000400000 */ /*0620*/ FADD R7, R7, -512 ; /* 0xc400000007077421 */ /* 0x001fe40000000000 */ /*0630*/ FFMA R11, R4, c[0x3][0x24], R11 ; /* 0x00c00900040b7a23 */ /* 0x000fe4000000000b */ /*0640*/ FMUL R10, R10, c[0x3][0x8] ; /* 0x00c002000a0a7a20 */ /* 0x000fc40000400000 */ /*0650*/ FFMA R12, R5.reuse, c[0x3][0x1c], R12 ; /* 0x00c00700050c7a23 */ /* 0x042fe2000000000c */ /*0660*/ FMNMX R11, RZ, R11, !PT ; /* 0x0000000bff0b7209 */ /* 0x000fe20007800000 */ /*0670*/ FFMA R8, R5, c[0x3][0x10], R8 ; /* 0x00c0040005087a23 */ /* 0x000fe40000000008 */ /*0680*/ FFMA R12, R7, c[0x3][0x24], R12 ; /* 0x00c00900070c7a23 */ /* 0x000fe2000000000c */ /*0690*/ FMNMX R13, R11, 1023, PT ; /* 0x447fc0000b0d7809 */ /* 0x000fe20003800000 */ /*06a0*/ FFMA R9, R2, c[0x3][0x4], R9 ; /* 0x00c0010002097a23 */ /* 0x000fe20000000009 */ /*06b0*/ FMNMX R11, RZ, R6, !PT ; /* 0x00000006ff0b7209 */ /* 0x000fe20007800000 */ /*06c0*/ FFMA R8, R7, c[0x3][0x18], R8 ; /* 0x00c0060007087a23 */ /* 0x000fe20000000008 */ /*06d0*/ FMNMX R12, RZ, R12, !PT ; /* 0x0000000cff0c7209 */ /* 0x000fe20007800000 */ /*06e0*/ FFMA R10, R5, c[0x3][0x4], R10 ; /* 0x00c00100050a7a23 */ /* 0x000fe2000000000a */ /*06f0*/ FMNMX R11, R11, 1023, PT ; /* 0x447fc0000b0b7809 */ /* 0x000fe20003800000 */ /*0700*/ FFMA R9, R4, c[0x3][0xc], R9 ; /* 0x00c0030004097a23 */ /* 0x000fe20000000009 */ /*0710*/ FMNMX R8, RZ, R8, !PT ; /* 0x00000008ff087209 */ /* 0x000fe20007800000 */ /*0720*/ FFMA R10, R7, c[0x3][0xc], R10 ; /* 0x00c00300070a7a23 */ /* 0x000fe2000000000a */ /*0730*/ FMNMX R12, R12, 1023, PT ; /* 0x447fc0000c0c7809 */ /* 0x000fe20003800000 */ /*0740*/ F2I.U32.TRUNC.NTZ R6, R13 ; /* 0x0000000d00067305 */ /* 0x000e22000020f000 */ /*0750*/ FMNMX R8, R8, 1023, PT ; /* 0x447fc00008087809 */ /* 0x000fc40003800000 */ /*0760*/ FMNMX R9, RZ, R9, !PT ; /* 0x00000009ff097209 */ /* 0x000fe40007800000 */ /*0770*/ FMNMX R10, RZ, R10, !PT ; /* 0x0000000aff0a7209 */ /* 0x000fe40007800000 */ /*0780*/ FMNMX R9, R9, 1023, PT ; /* 0x447fc00009097809 */ /* 0x000fe20003800000 */ /*0790*/ F2I.U32.TRUNC.NTZ R12, R12 ; /* 0x0000000c000c7305 */ /* 0x000e62000020f000 */ /*07a0*/ FMNMX R10, R10, 1023, PT ; /* 0x447fc0000a0a7809 */ /* 0x000fe40003800000 */ /*07b0*/ IADD3 R4, R3.reuse, 0x1, RZ ; /* 0x0000000103047810 */ /* 0x040fe20007ffe0ff */ /*07c0*/ IMAD.WIDE.U32 R2, R3, R0, c[0x0][0x170] ; /* 0x00005c0003027625 */ /* 0x000fc800078e0000 */ /*07d0*/ F2I.U32.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */ /* 0x000ea2000020f000 */ /*07e0*/ SHF.R.U32.HI R6, RZ, 0x2, R6 ; /* 0x00000002ff067819 */ /* 0x001fc80000011606 */ /*07f0*/ LOP3.LUT R6, R6, c[0x3][0x0], RZ, 0xfc, !PT ; /* 0x00c0000006067a12 */ /* 0x000fc600078efcff */ /*0800*/ F2I.U32.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e22000020f000 */ /*0810*/ SHF.R.U32.HI R12, RZ, 0x2, R12 ; /* 0x00000002ff0c7819 */ /* 0x002fc8000001160c */ /*0820*/ LOP3.LUT R5, R12, c[0x3][0x0], RZ, 0xfc, !PT ; /* 0x00c000000c057a12 */ /* 0x000fc600078efcff */ /*0830*/ F2I.U32.TRUNC.NTZ R9, R9 ; /* 0x0000000900097305 */ /* 0x000e62000020f000 */ /*0840*/ IMAD.SHL.U32 R11, R11, 0x40, RZ ; /* 0x000000400b0b7824 */ /* 0x004fca00078e00ff */ /*0850*/ LOP3.LUT R6, R6, 0xffffff00, R11, 0xf8, !PT ; /* 0xffffff0006067812 */ /* 0x000fe400078ef80b */ /*0860*/ F2I.U32.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000ea2000020f000 */ /*0870*/ IMAD.SHL.U32 R8, R8, 0x40, RZ ; /* 0x0000004008087824 */ /* 0x001fca00078e00ff */ /*0880*/ LOP3.LUT R5, R5, 0xffffff00, R8, 0xf8, !PT ; /* 0xffffff0005057812 */ /* 0x000fe200078ef808 */ /*0890*/ IMAD.SHL.U32 R9, R9, 0x4000, RZ ; /* 0x0000400009097824 */ /* 0x002fca00078e00ff */ /*08a0*/ LOP3.LUT R9, R6, 0xffff0000, R9, 0xf8, !PT ; /* 0xffff000006097812 */ /* 0x000fe200078ef809 */ /*08b0*/ IMAD.SHL.U32 R10, R10, 0x4000, RZ ; /* 0x000040000a0a7824 */ /* 0x004fc800078e00ff */ /*08c0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x000fe2000c101906 */ /*08d0*/ LOP3.LUT R7, R5, 0xffff0000, R10, 0xf8, !PT ; /* 0xffff000005077812 */ /* 0x000fe200078ef80a */ /*08e0*/ IMAD.WIDE.U32 R4, R4, R0, c[0x0][0x170] ; /* 0x00005c0004047625 */ /* 0x000fca00078e0000 */ /*08f0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x000fe2000c101906 */ /*0900*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0910*/ BRA 0x910; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0920*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0930*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0940*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0950*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0960*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0970*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0980*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0990*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <vector_types.h> typedef unsigned char uint8; typedef unsigned int uint32; typedef int int32; #define COLOR_COMPONENT_MASK 0x3FF #define COLOR_COMPONENT_BIT_SIZE 10 __constant__ uint32 constAlpha; #define MUL(x,y) (x*y) __constant__ float HueColorSpaceAry[9]; __device__ void YUV2RGB(uint32 *yuv, float *red, float *green, float *blue) { float luma, chromaCb, chromaCr; // Prepare for hue adjustment luma = (float)yuv[0]; chromaCb = (float)((int32)yuv[1] - 512.0f); chromaCr = (float)((int32)yuv[2] - 512.0f); // Convert YUV To RGB with hue adjustment *red = MUL(luma, HueColorSpaceAry[0]) + MUL(chromaCb, HueColorSpaceAry[1]) + MUL(chromaCr, HueColorSpaceAry[2]); *green= MUL(luma, HueColorSpaceAry[3]) + MUL(chromaCb, HueColorSpaceAry[4]) + MUL(chromaCr, HueColorSpaceAry[5]); *blue = MUL(luma, HueColorSpaceAry[6]) + MUL(chromaCb, HueColorSpaceAry[7]) + MUL(chromaCr, HueColorSpaceAry[8]); } __device__ uint32 ARGB_10bit(float red, float green, float blue, uint32 alpha) { uint32 ARGBpixel = 0; // Clamp final 10 bit results red = min(max(red, 0.0f), 1023.f); green = min(max(green, 0.0f), 1023.f); blue = min(max(blue, 0.0f), 1023.f); // Convert to 8 bit unsigned integers per color component ARGBpixel = (((uint32)blue >> 2) | (((uint32)green >> 2) << 8) | (((uint32)red >> 2) << 16) | (uint32)alpha); return ARGBpixel; } // CUDA kernel for outputing the final ARGB output from NV12; extern "C" __global__ void NV12ToARGB(uint32 *srcImage, size_t nSourcePitch, uint32 *dstImage, size_t nDestPitch, uint32 width, uint32 height) { int32 x, y; uint32 yuv101010Pel[2]; uint32 processingPitch = ((width) + 63) & ~63; uint32 dstImagePitch = nDestPitch >> 2; uint8 *srcImageU8 = (uint8 *)srcImage; processingPitch = nSourcePitch; // Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1); y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width) return; if (y >= height) return; // Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way. // if we move to texture we could read 4 luminance values yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2; yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2; uint32 chromaOffset = processingPitch * height; int32 y_chroma = y >> 1; if (y & 1) // odd scanline ? { uint32 chromaCb; uint32 chromaCr; chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ]; chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1]; if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically { chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1; chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1; } yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } else { yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } // this steps performs the color conversion uint32 yuv[6]; float red[2], green[2], blue[2]; yuv[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK); yuv[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); yuv[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK); yuv[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); // YUV to RGB Transformation conversion YUV2RGB(&yuv[0], &red[0], &green[0], &blue[0]); YUV2RGB(&yuv[3], &red[1], &green[1], &blue[1]); // Clamp the results to RGBA dstImage[y * dstImagePitch + x ] = ARGB_10bit(red[0], green[0], blue[0], constAlpha); dstImage[y * dstImagePitch + x + 1 ] = ARGB_10bit(red[1], green[1], blue[1], constAlpha); }
.file "tmpxft_00101af6_00000000-6_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z7YUV2RGBPjPfS0_S0_ .type _Z7YUV2RGBPjPfS0_S0_, @function _Z7YUV2RGBPjPfS0_S0_: .LFB2057: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2057: .size _Z7YUV2RGBPjPfS0_S0_, .-_Z7YUV2RGBPjPfS0_S0_ .globl _Z10ARGB_10bitfffj .type _Z10ARGB_10bitfffj, @function _Z10ARGB_10bitfffj: .LFB2058: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2058: .size _Z10ARGB_10bitfffj, .-_Z10ARGB_10bitfffj .globl _Z36__device_stub__Z10NV12ToARGBPjmS_mjjPjmS_mjj .type _Z36__device_stub__Z10NV12ToARGBPjmS_mjjPjmS_mjj, @function _Z36__device_stub__Z10NV12ToARGBPjmS_mjjPjmS_mjj: .LFB2083: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movl %r8d, 12(%rsp) movl %r9d, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L11 .L7: movq 168(%rsp), %rax subq %fs:40, %rax jne .L12 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq NV12ToARGB(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L7 .L12: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z36__device_stub__Z10NV12ToARGBPjmS_mjjPjmS_mjj, .-_Z36__device_stub__Z10NV12ToARGBPjmS_mjjPjmS_mjj .globl NV12ToARGB .type NV12ToARGB, @function NV12ToARGB: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z10NV12ToARGBPjmS_mjjPjmS_mjj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size NV12ToARGB, .-NV12ToARGB .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "NV12ToARGB" .LC1: .string "constAlpha" .LC2: .string "HueColorSpaceAry" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq NV12ToARGB(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $1 .cfi_def_cfa_offset 32 movl $4, %r9d movl $0, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _ZL10constAlpha(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $1 .cfi_def_cfa_offset 32 movl $36, %r9d movl $0, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _ZL16HueColorSpaceAry(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .local _ZL16HueColorSpaceAry .comm _ZL16HueColorSpaceAry,36,32 .local _ZL10constAlpha .comm _ZL10constAlpha,4,4 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <vector_types.h> typedef unsigned char uint8; typedef unsigned int uint32; typedef int int32; #define COLOR_COMPONENT_MASK 0x3FF #define COLOR_COMPONENT_BIT_SIZE 10 __constant__ uint32 constAlpha; #define MUL(x,y) (x*y) __constant__ float HueColorSpaceAry[9]; __device__ void YUV2RGB(uint32 *yuv, float *red, float *green, float *blue) { float luma, chromaCb, chromaCr; // Prepare for hue adjustment luma = (float)yuv[0]; chromaCb = (float)((int32)yuv[1] - 512.0f); chromaCr = (float)((int32)yuv[2] - 512.0f); // Convert YUV To RGB with hue adjustment *red = MUL(luma, HueColorSpaceAry[0]) + MUL(chromaCb, HueColorSpaceAry[1]) + MUL(chromaCr, HueColorSpaceAry[2]); *green= MUL(luma, HueColorSpaceAry[3]) + MUL(chromaCb, HueColorSpaceAry[4]) + MUL(chromaCr, HueColorSpaceAry[5]); *blue = MUL(luma, HueColorSpaceAry[6]) + MUL(chromaCb, HueColorSpaceAry[7]) + MUL(chromaCr, HueColorSpaceAry[8]); } __device__ uint32 ARGB_10bit(float red, float green, float blue, uint32 alpha) { uint32 ARGBpixel = 0; // Clamp final 10 bit results red = min(max(red, 0.0f), 1023.f); green = min(max(green, 0.0f), 1023.f); blue = min(max(blue, 0.0f), 1023.f); // Convert to 8 bit unsigned integers per color component ARGBpixel = (((uint32)blue >> 2) | (((uint32)green >> 2) << 8) | (((uint32)red >> 2) << 16) | (uint32)alpha); return ARGBpixel; } // CUDA kernel for outputing the final ARGB output from NV12; extern "C" __global__ void NV12ToARGB(uint32 *srcImage, size_t nSourcePitch, uint32 *dstImage, size_t nDestPitch, uint32 width, uint32 height) { int32 x, y; uint32 yuv101010Pel[2]; uint32 processingPitch = ((width) + 63) & ~63; uint32 dstImagePitch = nDestPitch >> 2; uint8 *srcImageU8 = (uint8 *)srcImage; processingPitch = nSourcePitch; // Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1); y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width) return; if (y >= height) return; // Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way. // if we move to texture we could read 4 luminance values yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2; yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2; uint32 chromaOffset = processingPitch * height; int32 y_chroma = y >> 1; if (y & 1) // odd scanline ? { uint32 chromaCb; uint32 chromaCr; chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ]; chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1]; if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically { chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1; chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1; } yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } else { yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } // this steps performs the color conversion uint32 yuv[6]; float red[2], green[2], blue[2]; yuv[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK); yuv[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); yuv[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK); yuv[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); // YUV to RGB Transformation conversion YUV2RGB(&yuv[0], &red[0], &green[0], &blue[0]); YUV2RGB(&yuv[3], &red[1], &green[1], &blue[1]); // Clamp the results to RGBA dstImage[y * dstImagePitch + x ] = ARGB_10bit(red[0], green[0], blue[0], constAlpha); dstImage[y * dstImagePitch + x + 1 ] = ARGB_10bit(red[1], green[1], blue[1], constAlpha); }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_vector_types.h> typedef unsigned char uint8; typedef unsigned int uint32; typedef int int32; #define COLOR_COMPONENT_MASK 0x3FF #define COLOR_COMPONENT_BIT_SIZE 10 __constant__ uint32 constAlpha; #define MUL(x,y) (x*y) __constant__ float HueColorSpaceAry[9]; __device__ void YUV2RGB(uint32 *yuv, float *red, float *green, float *blue) { float luma, chromaCb, chromaCr; // Prepare for hue adjustment luma = (float)yuv[0]; chromaCb = (float)((int32)yuv[1] - 512.0f); chromaCr = (float)((int32)yuv[2] - 512.0f); // Convert YUV To RGB with hue adjustment *red = MUL(luma, HueColorSpaceAry[0]) + MUL(chromaCb, HueColorSpaceAry[1]) + MUL(chromaCr, HueColorSpaceAry[2]); *green= MUL(luma, HueColorSpaceAry[3]) + MUL(chromaCb, HueColorSpaceAry[4]) + MUL(chromaCr, HueColorSpaceAry[5]); *blue = MUL(luma, HueColorSpaceAry[6]) + MUL(chromaCb, HueColorSpaceAry[7]) + MUL(chromaCr, HueColorSpaceAry[8]); } __device__ uint32 ARGB_10bit(float red, float green, float blue, uint32 alpha) { uint32 ARGBpixel = 0; // Clamp final 10 bit results red = min(max(red, 0.0f), 1023.f); green = min(max(green, 0.0f), 1023.f); blue = min(max(blue, 0.0f), 1023.f); // Convert to 8 bit unsigned integers per color component ARGBpixel = (((uint32)blue >> 2) | (((uint32)green >> 2) << 8) | (((uint32)red >> 2) << 16) | (uint32)alpha); return ARGBpixel; } // CUDA kernel for outputing the final ARGB output from NV12; extern "C" __global__ void NV12ToARGB(uint32 *srcImage, size_t nSourcePitch, uint32 *dstImage, size_t nDestPitch, uint32 width, uint32 height) { int32 x, y; uint32 yuv101010Pel[2]; uint32 processingPitch = ((width) + 63) & ~63; uint32 dstImagePitch = nDestPitch >> 2; uint8 *srcImageU8 = (uint8 *)srcImage; processingPitch = nSourcePitch; // Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1); y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width) return; if (y >= height) return; // Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way. // if we move to texture we could read 4 luminance values yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2; yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2; uint32 chromaOffset = processingPitch * height; int32 y_chroma = y >> 1; if (y & 1) // odd scanline ? { uint32 chromaCb; uint32 chromaCr; chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ]; chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1]; if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically { chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1; chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1; } yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } else { yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } // this steps performs the color conversion uint32 yuv[6]; float red[2], green[2], blue[2]; yuv[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK); yuv[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); yuv[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK); yuv[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); // YUV to RGB Transformation conversion YUV2RGB(&yuv[0], &red[0], &green[0], &blue[0]); YUV2RGB(&yuv[3], &red[1], &green[1], &blue[1]); // Clamp the results to RGBA dstImage[y * dstImagePitch + x ] = ARGB_10bit(red[0], green[0], blue[0], constAlpha); dstImage[y * dstImagePitch + x + 1 ] = ARGB_10bit(red[1], green[1], blue[1], constAlpha); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_vector_types.h> typedef unsigned char uint8; typedef unsigned int uint32; typedef int int32; #define COLOR_COMPONENT_MASK 0x3FF #define COLOR_COMPONENT_BIT_SIZE 10 __constant__ uint32 constAlpha; #define MUL(x,y) (x*y) __constant__ float HueColorSpaceAry[9]; __device__ void YUV2RGB(uint32 *yuv, float *red, float *green, float *blue) { float luma, chromaCb, chromaCr; // Prepare for hue adjustment luma = (float)yuv[0]; chromaCb = (float)((int32)yuv[1] - 512.0f); chromaCr = (float)((int32)yuv[2] - 512.0f); // Convert YUV To RGB with hue adjustment *red = MUL(luma, HueColorSpaceAry[0]) + MUL(chromaCb, HueColorSpaceAry[1]) + MUL(chromaCr, HueColorSpaceAry[2]); *green= MUL(luma, HueColorSpaceAry[3]) + MUL(chromaCb, HueColorSpaceAry[4]) + MUL(chromaCr, HueColorSpaceAry[5]); *blue = MUL(luma, HueColorSpaceAry[6]) + MUL(chromaCb, HueColorSpaceAry[7]) + MUL(chromaCr, HueColorSpaceAry[8]); } __device__ uint32 ARGB_10bit(float red, float green, float blue, uint32 alpha) { uint32 ARGBpixel = 0; // Clamp final 10 bit results red = min(max(red, 0.0f), 1023.f); green = min(max(green, 0.0f), 1023.f); blue = min(max(blue, 0.0f), 1023.f); // Convert to 8 bit unsigned integers per color component ARGBpixel = (((uint32)blue >> 2) | (((uint32)green >> 2) << 8) | (((uint32)red >> 2) << 16) | (uint32)alpha); return ARGBpixel; } // CUDA kernel for outputing the final ARGB output from NV12; extern "C" __global__ void NV12ToARGB(uint32 *srcImage, size_t nSourcePitch, uint32 *dstImage, size_t nDestPitch, uint32 width, uint32 height) { int32 x, y; uint32 yuv101010Pel[2]; uint32 processingPitch = ((width) + 63) & ~63; uint32 dstImagePitch = nDestPitch >> 2; uint8 *srcImageU8 = (uint8 *)srcImage; processingPitch = nSourcePitch; // Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1); y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width) return; if (y >= height) return; // Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way. // if we move to texture we could read 4 luminance values yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2; yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2; uint32 chromaOffset = processingPitch * height; int32 y_chroma = y >> 1; if (y & 1) // odd scanline ? { uint32 chromaCb; uint32 chromaCr; chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ]; chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1]; if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically { chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1; chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1; } yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } else { yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } // this steps performs the color conversion uint32 yuv[6]; float red[2], green[2], blue[2]; yuv[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK); yuv[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); yuv[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK); yuv[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); // YUV to RGB Transformation conversion YUV2RGB(&yuv[0], &red[0], &green[0], &blue[0]); YUV2RGB(&yuv[3], &red[1], &green[1], &blue[1]); // Clamp the results to RGBA dstImage[y * dstImagePitch + x ] = ARGB_10bit(red[0], green[0], blue[0], constAlpha); dstImage[y * dstImagePitch + x + 1 ] = ARGB_10bit(red[1], green[1], blue[1], constAlpha); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected NV12ToARGB .globl NV12ToARGB .p2align 8 .type NV12ToARGB,@function NV12ToARGB: s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b64 s[4:5], s[0:1], 0x20 v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v2, v0, 10, 10 s_delay_alu instid0(VALU_DEP_2) v_lshlrev_b32_e32 v3, 1, v1 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 s_mul_i32 s14, s14, s3 v_mad_u64_u32 v[0:1], null, s15, s2, v[2:3] v_lshl_add_u32 v1, s14, 1, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_cmp_gt_u32_e32 vcc_lo, s4, v1 v_cmp_gt_u32_e64 s2, s5, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_8 s_clause 0x1 s_load_b32 s6, s[0:1], 0x8 s_load_b64 s[2:3], s[0:1], 0x0 v_ashrrev_i32_e32 v8, 1, v0 s_mov_b32 s4, exec_lo s_waitcnt lgkmcnt(0) v_mad_u64_u32 v[4:5], null, v0, s6, v[1:2] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v5, s5, v8 v_mad_u64_u32 v[2:3], null, v5, s6, v[1:2] s_delay_alu instid0(VALU_DEP_3) v_add_nc_u32_e32 v3, 1, v4 s_clause 0x2 global_load_u8 v6, v4, s[2:3] global_load_u8 v5, v3, s[2:3] global_load_u8 v7, v2, s[2:3] v_and_b32_e32 v3, 1, v0 s_delay_alu instid0(VALU_DEP_1) v_cmpx_eq_u32_e32 1, v3 s_xor_b32 s4, exec_lo, s4 s_cbranch_execz .LBB0_5 v_add_nc_u32_e32 v2, 1, v2 s_lshr_b32 s7, s5, 1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_add_i32 s7, s7, -1 v_cmp_gt_u32_e32 vcc_lo, s7, v8 global_load_u8 v2, v2, s[2:3] s_and_saveexec_b32 s7, vcc_lo s_cbranch_execz .LBB0_4 v_add3_u32 v8, s5, 1, v8 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[3:4], null, v8, s6, v[1:2] v_add_nc_u32_e32 v4, 1, v3 s_clause 0x1 global_load_u8 v3, v3, s[2:3] global_load_u8 v4, v4, s[2:3] s_waitcnt vmcnt(1) v_add3_u32 v3, v7, v3, 1 s_waitcnt vmcnt(0) v_add3_u32 v2, v2, v4, 1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshrrev_b32_e32 v7, 1, v3 v_lshrrev_b32_e32 v2, 1, v2 .LBB0_4: s_or_b32 exec_lo, exec_lo, s7 s_waitcnt vmcnt(1) s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b32_e32 v3, 12, v7 s_waitcnt vmcnt(0) v_lshl_or_b32 v4, v2, 22, v3 .LBB0_5: s_and_not1_saveexec_b32 s4, s4 s_cbranch_execz .LBB0_7 v_add_nc_u32_e32 v2, 1, v2 s_waitcnt vmcnt(0) v_lshlrev_b32_e32 v3, 12, v7 global_load_u8 v2, v2, s[2:3] s_waitcnt vmcnt(0) v_lshl_or_b32 v4, v2, 22, v3 .LBB0_7: s_or_b32 exec_lo, exec_lo, s4 s_getpc_b64 s[4:5] s_add_u32 s4, s4, HueColorSpaceAry@rel32@lo+4 s_addc_u32 s5, s5, HueColorSpaceAry@rel32@hi+12 s_getpc_b64 s[6:7] s_add_u32 s6, s6, HueColorSpaceAry@rel32@lo+8 s_addc_u32 s7, s7, HueColorSpaceAry@rel32@hi+16 s_getpc_b64 s[8:9] s_add_u32 s8, s8, HueColorSpaceAry@rel32@lo+12 s_addc_u32 s9, s9, HueColorSpaceAry@rel32@hi+20 s_clause 0x1 s_load_b32 s16, s[4:5], 0x0 s_load_b32 s17, s[6:7], 0x0 s_getpc_b64 s[4:5] s_add_u32 s4, s4, HueColorSpaceAry@rel32@lo+16 s_addc_u32 s5, s5, HueColorSpaceAry@rel32@hi+24 s_getpc_b64 s[6:7] s_add_u32 s6, s6, HueColorSpaceAry@rel32@lo+20 s_addc_u32 s7, s7, HueColorSpaceAry@rel32@hi+28 s_clause 0x2 s_load_b32 s8, s[8:9], 0x0 s_load_b32 s9, s[4:5], 0x0 s_load_b32 s6, s[6:7], 0x0 s_waitcnt vmcnt(1) v_and_b32_e32 v3, 0xff, v5 v_bfe_u32 v5, v4, 10, 10 s_getpc_b64 s[10:11] s_add_u32 s10, s10, HueColorSpaceAry@rel32@lo+24 s_addc_u32 s11, s11, HueColorSpaceAry@rel32@hi+32 s_getpc_b64 s[12:13] s_add_u32 s12, s12, HueColorSpaceAry@rel32@lo+28 s_addc_u32 s13, s13, HueColorSpaceAry@rel32@hi+36 s_getpc_b64 s[14:15] s_add_u32 s14, s14, HueColorSpaceAry@rel32@lo+32 s_addc_u32 s15, s15, HueColorSpaceAry@rel32@hi+40 s_clause 0x2 s_load_b32 s7, s[10:11], 0x0 s_load_b32 s10, s[12:13], 0x0 s_load_b32 s11, s[14:15], 0x0 v_cvt_f32_i32_e32 v5, v5 s_getpc_b64 s[4:5] s_add_u32 s4, s4, HueColorSpaceAry@rel32@lo+36 s_addc_u32 s5, s5, HueColorSpaceAry@rel32@hi+44 v_bfe_u32 v8, v4, 10, 10 s_load_b32 s4, s[4:5], 0x0 s_load_b128 s[0:3], s[0:1], 0x10 v_add_f32_e32 v5, 0xc4000000, v5 v_and_b32_e32 v2, 0xff, v6 v_bfe_u32 v6, v4, 20, 10 v_cvt_f32_i32_e32 v8, v8 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cvt_f32_i32_e32 v6, v6 v_add_f32_e32 v8, 0xc4000000, v8 s_waitcnt vmcnt(0) lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3) v_dual_add_f32 v6, 0xc4000000, v6 :: v_dual_mul_f32 v7, s17, v5 v_lshlrev_b32_e32 v2, 2, v2 v_mul_f32_e32 v10, s6, v5 v_mul_f32_e32 v5, s11, v5 v_and_or_b32 v2, v4, 0x3ff, v2 v_alignbit_b32 v9, s3, s2, 2 s_getpc_b64 s[2:3] s_add_u32 s2, s2, constAlpha@rel32@lo+4 s_addc_u32 s3, s3, constAlpha@rel32@hi+12 s_load_b32 s2, s[2:3], 0x0 v_cvt_f32_u32_e32 v2, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_dual_fmac_f32 v10, s9, v2 :: v_dual_lshlrev_b32 v3, 2, v3 v_mul_f32_e32 v11, s17, v8 v_fmac_f32_e32 v5, s10, v2 v_fmac_f32_e32 v10, s7, v6 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_and_or_b32 v3, v4, 0x3ff, v3 v_bfe_u32 v4, v4, 20, 10 v_cvt_f32_u32_e32 v3, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cvt_f32_i32_e32 v4, v4 v_fmac_f32_e32 v7, s16, v2 v_dual_fmac_f32 v11, s16, v3 :: v_dual_add_f32 v2, 0xc4000000, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_dual_fmac_f32 v7, s8, v6 :: v_dual_mul_f32 v4, s6, v8 v_dual_fmac_f32 v5, s4, v6 :: v_dual_mul_f32 v6, s11, v8 v_med3_f32 v8, v10, 0, 0x447fc000 v_dual_fmac_f32 v11, s8, v2 :: v_dual_fmac_f32 v4, s9, v3 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_med3_f32 v5, v5, 0, 0x447fc000 v_fmac_f32_e32 v6, s10, v3 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_cvt_u32_f32_e32 v3, v8 v_fmac_f32_e32 v4, s7, v2 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_cvt_u32_f32_e32 v5, v5 v_fmac_f32_e32 v6, s4, v2 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) v_lshlrev_b32_e32 v2, 6, v3 v_med3_f32 v3, v7, 0, 0x447fc000 v_lshrrev_b32_e32 v5, 2, v5 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_and_b32_e32 v7, 0xffffff00, v2 v_cvt_u32_f32_e32 v8, v3 v_mad_u64_u32 v[2:3], null, v0, v9, v[1:2] v_mov_b32_e32 v3, 0 v_med3_f32 v4, v4, 0, 0x447fc000 v_med3_f32 v1, v11, 0, 0x447fc000 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_cvt_u32_f32_e32 v0, v4 v_lshlrev_b32_e32 v4, 14, v8 v_cvt_u32_f32_e32 v9, v1 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshlrev_b32_e32 v8, 6, v0 v_and_or_b32 v4, v4, 0xffff0000, v7 v_lshlrev_b64 v[0:1], 2, v[2:3] v_add_nc_u32_e32 v2, 1, v2 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) v_and_b32_e32 v7, 0xffffff00, v8 v_med3_f32 v6, v6, 0, 0x447fc000 v_lshlrev_b32_e32 v8, 14, v9 v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) v_or3_b32 v4, v4, v5, s2 v_add_co_u32 v0, vcc_lo, s0, v0 v_cvt_u32_f32_e32 v6, v6 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo v_add_co_u32 v2, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_lshrrev_b32_e32 v5, 2, v6 v_and_or_b32 v6, v8, 0xffff0000, v7 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo v_or3_b32 v5, v6, v5, s2 s_clause 0x1 global_store_b32 v[0:1], v4, off global_store_b32 v[2:3], v5, off .LBB0_8: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel NV12ToARGB .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 12 .amdhsa_next_free_sgpr 18 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size NV12ToARGB, .Lfunc_end0-NV12ToARGB .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .protected constAlpha .type constAlpha,@object .section .bss,"aw",@nobits .globl constAlpha .p2align 2, 0x0 constAlpha: .long 0 .size constAlpha, 4 .protected HueColorSpaceAry .type HueColorSpaceAry,@object .globl HueColorSpaceAry .p2align 4, 0x0 HueColorSpaceAry: .zero 36 .size HueColorSpaceAry, 36 .type __hip_cuid_,@object .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym constAlpha .addrsig_sym HueColorSpaceAry .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 8 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 36 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: NV12ToARGB .private_segment_fixed_size: 0 .sgpr_count: 20 .sgpr_spill_count: 0 .symbol: NV12ToARGB.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 12 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_vector_types.h> typedef unsigned char uint8; typedef unsigned int uint32; typedef int int32; #define COLOR_COMPONENT_MASK 0x3FF #define COLOR_COMPONENT_BIT_SIZE 10 __constant__ uint32 constAlpha; #define MUL(x,y) (x*y) __constant__ float HueColorSpaceAry[9]; __device__ void YUV2RGB(uint32 *yuv, float *red, float *green, float *blue) { float luma, chromaCb, chromaCr; // Prepare for hue adjustment luma = (float)yuv[0]; chromaCb = (float)((int32)yuv[1] - 512.0f); chromaCr = (float)((int32)yuv[2] - 512.0f); // Convert YUV To RGB with hue adjustment *red = MUL(luma, HueColorSpaceAry[0]) + MUL(chromaCb, HueColorSpaceAry[1]) + MUL(chromaCr, HueColorSpaceAry[2]); *green= MUL(luma, HueColorSpaceAry[3]) + MUL(chromaCb, HueColorSpaceAry[4]) + MUL(chromaCr, HueColorSpaceAry[5]); *blue = MUL(luma, HueColorSpaceAry[6]) + MUL(chromaCb, HueColorSpaceAry[7]) + MUL(chromaCr, HueColorSpaceAry[8]); } __device__ uint32 ARGB_10bit(float red, float green, float blue, uint32 alpha) { uint32 ARGBpixel = 0; // Clamp final 10 bit results red = min(max(red, 0.0f), 1023.f); green = min(max(green, 0.0f), 1023.f); blue = min(max(blue, 0.0f), 1023.f); // Convert to 8 bit unsigned integers per color component ARGBpixel = (((uint32)blue >> 2) | (((uint32)green >> 2) << 8) | (((uint32)red >> 2) << 16) | (uint32)alpha); return ARGBpixel; } // CUDA kernel for outputing the final ARGB output from NV12; extern "C" __global__ void NV12ToARGB(uint32 *srcImage, size_t nSourcePitch, uint32 *dstImage, size_t nDestPitch, uint32 width, uint32 height) { int32 x, y; uint32 yuv101010Pel[2]; uint32 processingPitch = ((width) + 63) & ~63; uint32 dstImagePitch = nDestPitch >> 2; uint8 *srcImageU8 = (uint8 *)srcImage; processingPitch = nSourcePitch; // Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1); y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width) return; if (y >= height) return; // Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way. // if we move to texture we could read 4 luminance values yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2; yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2; uint32 chromaOffset = processingPitch * height; int32 y_chroma = y >> 1; if (y & 1) // odd scanline ? { uint32 chromaCb; uint32 chromaCr; chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ]; chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1]; if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically { chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1; chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1; } yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } else { yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } // this steps performs the color conversion uint32 yuv[6]; float red[2], green[2], blue[2]; yuv[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK); yuv[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); yuv[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK); yuv[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); // YUV to RGB Transformation conversion YUV2RGB(&yuv[0], &red[0], &green[0], &blue[0]); YUV2RGB(&yuv[3], &red[1], &green[1], &blue[1]); // Clamp the results to RGBA dstImage[y * dstImagePitch + x ] = ARGB_10bit(red[0], green[0], blue[0], constAlpha); dstImage[y * dstImagePitch + x + 1 ] = ARGB_10bit(red[1], green[1], blue[1], constAlpha); }
.text .file "kernel.hip" .globl __device_stub__NV12ToARGB # -- Begin function __device_stub__NV12ToARGB .p2align 4, 0x90 .type __device_stub__NV12ToARGB,@function __device_stub__NV12ToARGB: # @__device_stub__NV12ToARGB .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movl %r8d, 12(%rsp) movl %r9d, 8(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $NV12ToARGB, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size __device_stub__NV12ToARGB, .Lfunc_end0-__device_stub__NV12ToARGB .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rbx subq $32, %rsp .cfi_adjust_cfa_offset 32 xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $NV12ToARGB, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction addq $32, %rsp .cfi_adjust_cfa_offset -32 movl $constAlpha, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movl $4, %r9d movq %rbx, %rdi xorl %r8d, %r8d pushq $0 .cfi_adjust_cfa_offset 8 pushq $1 .cfi_adjust_cfa_offset 8 callq __hipRegisterVar addq $16, %rsp .cfi_adjust_cfa_offset -16 movl $HueColorSpaceAry, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movl $36, %r9d movq %rbx, %rdi xorl %r8d, %r8d pushq $0 .cfi_adjust_cfa_offset 8 pushq $1 .cfi_adjust_cfa_offset 8 callq __hipRegisterVar addq $16, %rsp .cfi_adjust_cfa_offset -16 movl $__hip_module_dtor, %edi popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type constAlpha,@object # @constAlpha .local constAlpha .comm constAlpha,4,4 .type HueColorSpaceAry,@object # @HueColorSpaceAry .local HueColorSpaceAry .comm HueColorSpaceAry,36,16 .type NV12ToARGB,@object # @NV12ToARGB .section .rodata,"a",@progbits .globl NV12ToARGB .p2align 3, 0x0 NV12ToARGB: .quad __device_stub__NV12ToARGB .size NV12ToARGB, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "NV12ToARGB" .size .L__unnamed_1, 11 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "constAlpha" .size .L__unnamed_2, 11 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "HueColorSpaceAry" .size .L__unnamed_3, 17 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__NV12ToARGB .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym constAlpha .addrsig_sym HueColorSpaceAry .addrsig_sym NV12ToARGB .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : NV12ToARGB .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e220000002100 */ /*0020*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe40000000800 */ /*0030*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */ /* 0x000fe2000800063f */ /*0040*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */ /* 0x000e680000002600 */ /*0050*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */ /* 0x000e680000002200 */ /*0060*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000ea20000002500 */ /*0070*/ IMAD.SHL.U32 R3, R3, 0x2, RZ ; /* 0x0000000203037824 */ /* 0x001fc400078e00ff */ /*0080*/ IMAD R0, R0, c[0x0][0x4], R5 ; /* 0x0000010000007a24 */ /* 0x002fe400078e0205 */ /*0090*/ IMAD R3, R2, UR4, R3 ; /* 0x0000000402037c24 */ /* 0x004fc6000f8e0203 */ /*00a0*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x184], PT ; /* 0x0000610000007a0c */ /* 0x000fc80003f06070 */ /*00b0*/ ISETP.GE.U32.OR P0, PT, R3, c[0x0][0x180], P0 ; /* 0x0000600003007a0c */ /* 0x000fda0000706470 */ /*00c0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00d0*/ SHF.R.S32.HI R14, RZ, 0x1, R0 ; /* 0x00000001ff0e7819 */ /* 0x000fe20000011400 */ /*00e0*/ IMAD R6, R0, c[0x0][0x168], R3.reuse ; /* 0x00005a0000067a24 */ /* 0x100fe200078e0203 */ /*00f0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fe20000000a00 */ /*0100*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff0d7624 */ /* 0x000fe400078e00ff */ /*0110*/ IMAD R2, R14, c[0x0][0x168], R3 ; /* 0x00005a000e027a24 */ /* 0x000fe200078e0203 */ /*0120*/ IADD3 R8, R6.reuse, 0x1, RZ ; /* 0x0000000106087810 */ /* 0x040fe40007ffe0ff */ /*0130*/ IADD3 R6, P0, R6, c[0x0][0x160], RZ ; /* 0x0000580006067a10 */ /* 0x000fe20007f1e0ff */ /*0140*/ IMAD R13, R13, c[0x0][0x184], R2 ; /* 0x000061000d0d7a24 */ /* 0x000fe200078e0202 */ /*0150*/ IADD3 R8, P1, R8, c[0x0][0x160], RZ ; /* 0x0000580008087a10 */ /* 0x000fc60007f3e0ff */ /*0160*/ IMAD.X R7, RZ, RZ, c[0x0][0x164], P0 ; /* 0x00005900ff077624 */ /* 0x000fe200000e06ff */ /*0170*/ IADD3 R10, P0, R13, c[0x0][0x160], RZ ; /* 0x000058000d0a7a10 */ /* 0x000fe20007f1e0ff */ /*0180*/ IMAD.X R9, RZ, RZ, c[0x0][0x164], P1 ; /* 0x00005900ff097624 */ /* 0x000fc600008e06ff */ /*0190*/ LDG.E.U8 R6, [R6.64] ; /* 0x0000000606067981 */ /* 0x000ea2000c1e1100 */ /*01a0*/ IMAD.X R11, RZ, RZ, c[0x0][0x164], P0 ; /* 0x00005900ff0b7624 */ /* 0x000fc600000e06ff */ /*01b0*/ LDG.E.U8 R8, [R8.64] ; /* 0x0000000608087981 */ /* 0x000ee8000c1e1100 */ /*01c0*/ LDG.E.U8 R11, [R10.64] ; /* 0x000000060a0b7981 */ /* 0x000162000c1e1100 */ /*01d0*/ LOP3.LUT R2, R0, 0x1, RZ, 0xc0, !PT ; /* 0x0000000100027812 */ /* 0x000fe200078ec0ff */ /*01e0*/ BSSY B0, 0x430 ; /* 0x0000024000007945 */ /* 0x000fe20003800000 */ /*01f0*/ IADD3 R4, R13, 0x1, RZ ; /* 0x000000010d047810 */ /* 0x000fe40007ffe0ff */ /*0200*/ ISETP.NE.U32.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */ /* 0x000fe40003f05070 */ /*0210*/ IADD3 R4, P1, R4, c[0x0][0x160], RZ ; /* 0x0000580004047a10 */ /* 0x000fca0007f3e0ff */ /*0220*/ IMAD.X R5, RZ, RZ, c[0x0][0x164], P1 ; /* 0x00005900ff057624 */ /* 0x000fe400008e06ff */ /*0230*/ IMAD.SHL.U32 R2, R6, 0x4, RZ ; /* 0x0000000406027824 */ /* 0x004fe400078e00ff */ /*0240*/ IMAD.SHL.U32 R12, R8, 0x4, RZ ; /* 0x00000004080c7824 */ /* 0x008fe400078e00ff */ /*0250*/ @P0 BRA 0x3c0 ; /* 0x0000016000000947 */ /* 0x000fea0003800000 */ /*0260*/ ULDC UR4, c[0x0][0x184] ; /* 0x0000610000047ab9 */ /* 0x001fe20000000800 */ /*0270*/ LDG.E.U8 R10, [R4.64] ; /* 0x00000006040a7981 */ /* 0x000ea2000c1e1100 */ /*0280*/ ULEA.HI UR4, UR4, 0xffffffff, URZ, 0x1f ; /* 0xffffffff04047891 */ /* 0x000fcc000f8ff83f */ /*0290*/ ISETP.GE.U32.AND P0, PT, R14, UR4, PT ; /* 0x000000040e007c0c */ /* 0x000fda000bf06070 */ /*02a0*/ @!P0 IADD3 R6, R13, c[0x0][0x168], RZ ; /* 0x00005a000d068a10 */ /* 0x000fc80007ffe0ff */ /*02b0*/ @!P0 IADD3 R8, R6.reuse, 0x1, RZ ; /* 0x0000000106088810 */ /* 0x040fe40007ffe0ff */ /*02c0*/ @!P0 IADD3 R6, P1, R6, c[0x0][0x160], RZ ; /* 0x0000580006068a10 */ /* 0x000fe40007f3e0ff */ /*02d0*/ @!P0 IADD3 R8, P2, R8, c[0x0][0x160], RZ ; /* 0x0000580008088a10 */ /* 0x000fc60007f5e0ff */ /*02e0*/ @!P0 IMAD.X R7, RZ, RZ, c[0x0][0x164], P1 ; /* 0x00005900ff078624 */ /* 0x000fe400008e06ff */ /*02f0*/ @!P0 IMAD.X R9, RZ, RZ, c[0x0][0x164], P2 ; /* 0x00005900ff098624 */ /* 0x000fc600010e06ff */ /*0300*/ @!P0 LDG.E.U8 R6, [R6.64] ; /* 0x0000000606068981 */ /* 0x000ee8000c1e1100 */ /*0310*/ @!P0 LDG.E.U8 R9, [R8.64] ; /* 0x0000000608098981 */ /* 0x000ea2000c1e1100 */ /*0320*/ @!P0 IADD3 R13, R11, 0x1, R6 ; /* 0x000000010b0d8810 */ /* 0x028fe40007ffe006 */ /*0330*/ @!P0 IADD3 R14, R10, 0x1, R9 ; /* 0x000000010a0e8810 */ /* 0x004fe40007ffe009 */ /*0340*/ @!P0 SHF.R.U32.HI R11, RZ, 0x1, R13 ; /* 0x00000001ff0b8819 */ /* 0x000fe4000001160d */ /*0350*/ @!P0 SHF.R.U32.HI R10, RZ, 0x1, R14 ; /* 0x00000001ff0a8819 */ /* 0x000fc6000001160e */ /*0360*/ IMAD R2, R11.reuse, 0x1000, R2 ; /* 0x000010000b027824 */ /* 0x040fe400078e0202 */ /*0370*/ IMAD.SHL.U32 R13, R10, 0x400000, RZ ; /* 0x004000000a0d7824 */ /* 0x000fe400078e00ff */ /*0380*/ IMAD R12, R11, 0x1000, R12 ; /* 0x000010000b0c7824 */ /* 0x000fc600078e020c */ /*0390*/ LOP3.LUT R10, R2, R13.reuse, RZ, 0xfc, !PT ; /* 0x0000000d020a7212 */ /* 0x080fe400078efcff */ /*03a0*/ LOP3.LUT R12, R12, R13, RZ, 0xfc, !PT ; /* 0x0000000d0c0c7212 */ /* 0x000fe200078efcff */ /*03b0*/ BRA 0x420 ; /* 0x0000006000007947 */ /* 0x000fea0003800000 */ /*03c0*/ LDG.E.U8 R7, [R4.64] ; /* 0x0000000604077981 */ /* 0x001ea2000c1e1100 */ /*03d0*/ IMAD R2, R11.reuse, 0x1000, R2 ; /* 0x000010000b027824 */ /* 0x060fe400078e0202 */ /*03e0*/ IMAD R12, R11, 0x1000, R12 ; /* 0x000010000b0c7824 */ /* 0x000fe400078e020c */ /*03f0*/ IMAD.SHL.U32 R7, R7, 0x400000, RZ ; /* 0x0040000007077824 */ /* 0x004fca00078e00ff */ /*0400*/ LOP3.LUT R10, R2, R7.reuse, RZ, 0xfc, !PT ; /* 0x00000007020a7212 */ /* 0x080fe400078efcff */ /*0410*/ LOP3.LUT R12, R12, R7, RZ, 0xfc, !PT ; /* 0x000000070c0c7212 */ /* 0x000fe400078efcff */ /*0420*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0430*/ SHF.R.U32.HI R4, RZ, 0xa, R10.reuse ; /* 0x0000000aff047819 */ /* 0x100fe2000001160a */ /*0440*/ ULDC.64 UR4, c[0x0][0x178] ; /* 0x00005e0000047ab9 */ /* 0x000fe20000000a00 */ /*0450*/ LOP3.LUT R2, R10, 0x3ff, RZ, 0xc0, !PT ; /* 0x000003ff0a027812 */ /* 0x000fe200078ec0ff */ /*0460*/ USHF.R.U64 UR4, UR4, 0x2, UR5 ; /* 0x0000000204047899 */ /* 0x000fe20008001205 */ /*0470*/ LOP3.LUT R4, R4, 0x3ff, RZ, 0xc0, !PT ; /* 0x000003ff04047812 */ /* 0x000fe400078ec0ff */ /*0480*/ SHF.R.U32.HI R10, RZ, 0x14, R10 ; /* 0x00000014ff0a7819 */ /* 0x000fe4000001160a */ /*0490*/ SHF.R.U32.HI R5, RZ, 0xa, R12.reuse ; /* 0x0000000aff057819 */ /* 0x100fe2000001160c */ /*04a0*/ I2F.U32 R2, R2 ; /* 0x0000000200027306 */ /* 0x000fe20000201000 */ /*04b0*/ LOP3.LUT R10, R10, 0x3ff, RZ, 0xc0, !PT ; /* 0x000003ff0a0a7812 */ /* 0x000fe200078ec0ff */ /*04c0*/ IMAD R3, R0, UR4, R3 ; /* 0x0000000400037c24 */ /* 0x000fe2000f8e0203 */ /*04d0*/ LOP3.LUT R6, R5, 0x3ff, RZ, 0xc0, !PT ; /* 0x000003ff05067812 */ /* 0x000fe200078ec0ff */ /*04e0*/ IMAD.MOV.U32 R0, RZ, RZ, 0x4 ; /* 0x00000004ff007424 */ /* 0x000fe200078e00ff */ /*04f0*/ SHF.R.U32.HI R5, RZ, 0x14, R12 ; /* 0x00000014ff057819 */ /* 0x000fc6000001160c */ /*0500*/ I2F R4, R4 ; /* 0x0000000400047306 */ /* 0x000e220000201400 */ /*0510*/ LOP3.LUT R7, R5, 0x3ff, RZ, 0xc0, !PT ; /* 0x000003ff05077812 */ /* 0x000fe400078ec0ff */ /*0520*/ LOP3.LUT R5, R12, 0x3ff, RZ, 0xc0, !PT ; /* 0x000003ff0c057812 */ /* 0x000fca00078ec0ff */ /*0530*/ I2F R10, R10 ; /* 0x0000000a000a7306 */ /* 0x000e700000201400 */ /*0540*/ I2F R6, R6 ; /* 0x0000000600067306 */ /* 0x000ea20000201400 */ /*0550*/ FADD R8, R4, -512 ; /* 0xc400000004087421 */ /* 0x001fc80000000000 */ /*0560*/ FMUL R9, R8, c[0x3][0x20] ; /* 0x00c0080008097a20 */ /* 0x000fc60000400000 */ /*0570*/ I2F R7, R7 ; /* 0x0000000700077306 */ /* 0x000e220000201400 */ /*0580*/ FFMA R11, R2, c[0x3][0x1c], R9 ; /* 0x00c00700020b7a23 */ /* 0x000fe40000000009 */ /*0590*/ FMUL R9, R8, c[0x3][0x14] ; /* 0x00c0050008097a20 */ /* 0x000fe40000400000 */ /*05a0*/ FADD R4, R10, -512 ; /* 0xc40000000a047421 */ /* 0x002fe40000000000 */ /*05b0*/ FFMA R9, R2, c[0x3][0x10], R9 ; /* 0x00c0040002097a23 */ /* 0x000fe20000000009 */ /*05c0*/ I2F.U32 R5, R5 ; /* 0x0000000500057306 */ /* 0x000e620000201000 */ /*05d0*/ FADD R10, R6, -512 ; /* 0xc4000000060a7421 */ /* 0x004fe40000000000 */ /*05e0*/ FFMA R6, R4, c[0x3][0x18], R9 ; /* 0x00c0060004067a23 */ /* 0x000fc40000000009 */ /*05f0*/ FMUL R12, R10, c[0x3][0x20] ; /* 0x00c008000a0c7a20 */ /* 0x000fe40000400000 */ /*0600*/ FMUL R9, R8, c[0x3][0x8] ; /* 0x00c0020008097a20 */ /* 0x000fe40000400000 */ /*0610*/ FMUL R8, R10, c[0x3][0x14] ; /* 0x00c005000a087a20 */ /* 0x000fe40000400000 */ /*0620*/ FADD R7, R7, -512 ; /* 0xc400000007077421 */ /* 0x001fe40000000000 */ /*0630*/ FFMA R11, R4, c[0x3][0x24], R11 ; /* 0x00c00900040b7a23 */ /* 0x000fe4000000000b */ /*0640*/ FMUL R10, R10, c[0x3][0x8] ; /* 0x00c002000a0a7a20 */ /* 0x000fc40000400000 */ /*0650*/ FFMA R12, R5.reuse, c[0x3][0x1c], R12 ; /* 0x00c00700050c7a23 */ /* 0x042fe2000000000c */ /*0660*/ FMNMX R11, RZ, R11, !PT ; /* 0x0000000bff0b7209 */ /* 0x000fe20007800000 */ /*0670*/ FFMA R8, R5, c[0x3][0x10], R8 ; /* 0x00c0040005087a23 */ /* 0x000fe40000000008 */ /*0680*/ FFMA R12, R7, c[0x3][0x24], R12 ; /* 0x00c00900070c7a23 */ /* 0x000fe2000000000c */ /*0690*/ FMNMX R13, R11, 1023, PT ; /* 0x447fc0000b0d7809 */ /* 0x000fe20003800000 */ /*06a0*/ FFMA R9, R2, c[0x3][0x4], R9 ; /* 0x00c0010002097a23 */ /* 0x000fe20000000009 */ /*06b0*/ FMNMX R11, RZ, R6, !PT ; /* 0x00000006ff0b7209 */ /* 0x000fe20007800000 */ /*06c0*/ FFMA R8, R7, c[0x3][0x18], R8 ; /* 0x00c0060007087a23 */ /* 0x000fe20000000008 */ /*06d0*/ FMNMX R12, RZ, R12, !PT ; /* 0x0000000cff0c7209 */ /* 0x000fe20007800000 */ /*06e0*/ FFMA R10, R5, c[0x3][0x4], R10 ; /* 0x00c00100050a7a23 */ /* 0x000fe2000000000a */ /*06f0*/ FMNMX R11, R11, 1023, PT ; /* 0x447fc0000b0b7809 */ /* 0x000fe20003800000 */ /*0700*/ FFMA R9, R4, c[0x3][0xc], R9 ; /* 0x00c0030004097a23 */ /* 0x000fe20000000009 */ /*0710*/ FMNMX R8, RZ, R8, !PT ; /* 0x00000008ff087209 */ /* 0x000fe20007800000 */ /*0720*/ FFMA R10, R7, c[0x3][0xc], R10 ; /* 0x00c00300070a7a23 */ /* 0x000fe2000000000a */ /*0730*/ FMNMX R12, R12, 1023, PT ; /* 0x447fc0000c0c7809 */ /* 0x000fe20003800000 */ /*0740*/ F2I.U32.TRUNC.NTZ R6, R13 ; /* 0x0000000d00067305 */ /* 0x000e22000020f000 */ /*0750*/ FMNMX R8, R8, 1023, PT ; /* 0x447fc00008087809 */ /* 0x000fc40003800000 */ /*0760*/ FMNMX R9, RZ, R9, !PT ; /* 0x00000009ff097209 */ /* 0x000fe40007800000 */ /*0770*/ FMNMX R10, RZ, R10, !PT ; /* 0x0000000aff0a7209 */ /* 0x000fe40007800000 */ /*0780*/ FMNMX R9, R9, 1023, PT ; /* 0x447fc00009097809 */ /* 0x000fe20003800000 */ /*0790*/ F2I.U32.TRUNC.NTZ R12, R12 ; /* 0x0000000c000c7305 */ /* 0x000e62000020f000 */ /*07a0*/ FMNMX R10, R10, 1023, PT ; /* 0x447fc0000a0a7809 */ /* 0x000fe40003800000 */ /*07b0*/ IADD3 R4, R3.reuse, 0x1, RZ ; /* 0x0000000103047810 */ /* 0x040fe20007ffe0ff */ /*07c0*/ IMAD.WIDE.U32 R2, R3, R0, c[0x0][0x170] ; /* 0x00005c0003027625 */ /* 0x000fc800078e0000 */ /*07d0*/ F2I.U32.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */ /* 0x000ea2000020f000 */ /*07e0*/ SHF.R.U32.HI R6, RZ, 0x2, R6 ; /* 0x00000002ff067819 */ /* 0x001fc80000011606 */ /*07f0*/ LOP3.LUT R6, R6, c[0x3][0x0], RZ, 0xfc, !PT ; /* 0x00c0000006067a12 */ /* 0x000fc600078efcff */ /*0800*/ F2I.U32.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */ /* 0x000e22000020f000 */ /*0810*/ SHF.R.U32.HI R12, RZ, 0x2, R12 ; /* 0x00000002ff0c7819 */ /* 0x002fc8000001160c */ /*0820*/ LOP3.LUT R5, R12, c[0x3][0x0], RZ, 0xfc, !PT ; /* 0x00c000000c057a12 */ /* 0x000fc600078efcff */ /*0830*/ F2I.U32.TRUNC.NTZ R9, R9 ; /* 0x0000000900097305 */ /* 0x000e62000020f000 */ /*0840*/ IMAD.SHL.U32 R11, R11, 0x40, RZ ; /* 0x000000400b0b7824 */ /* 0x004fca00078e00ff */ /*0850*/ LOP3.LUT R6, R6, 0xffffff00, R11, 0xf8, !PT ; /* 0xffffff0006067812 */ /* 0x000fe400078ef80b */ /*0860*/ F2I.U32.TRUNC.NTZ R10, R10 ; /* 0x0000000a000a7305 */ /* 0x000ea2000020f000 */ /*0870*/ IMAD.SHL.U32 R8, R8, 0x40, RZ ; /* 0x0000004008087824 */ /* 0x001fca00078e00ff */ /*0880*/ LOP3.LUT R5, R5, 0xffffff00, R8, 0xf8, !PT ; /* 0xffffff0005057812 */ /* 0x000fe200078ef808 */ /*0890*/ IMAD.SHL.U32 R9, R9, 0x4000, RZ ; /* 0x0000400009097824 */ /* 0x002fca00078e00ff */ /*08a0*/ LOP3.LUT R9, R6, 0xffff0000, R9, 0xf8, !PT ; /* 0xffff000006097812 */ /* 0x000fe200078ef809 */ /*08b0*/ IMAD.SHL.U32 R10, R10, 0x4000, RZ ; /* 0x000040000a0a7824 */ /* 0x004fc800078e00ff */ /*08c0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x000fe2000c101906 */ /*08d0*/ LOP3.LUT R7, R5, 0xffff0000, R10, 0xf8, !PT ; /* 0xffff000005077812 */ /* 0x000fe200078ef80a */ /*08e0*/ IMAD.WIDE.U32 R4, R4, R0, c[0x0][0x170] ; /* 0x00005c0004047625 */ /* 0x000fca00078e0000 */ /*08f0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x000fe2000c101906 */ /*0900*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0910*/ BRA 0x910; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0920*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0930*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0940*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0950*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0960*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0970*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0980*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0990*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*09f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected NV12ToARGB .globl NV12ToARGB .p2align 8 .type NV12ToARGB,@function NV12ToARGB: s_clause 0x1 s_load_b32 s2, s[0:1], 0x34 s_load_b64 s[4:5], s[0:1], 0x20 v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v2, v0, 10, 10 s_delay_alu instid0(VALU_DEP_2) v_lshlrev_b32_e32 v3, 1, v1 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 s_mul_i32 s14, s14, s3 v_mad_u64_u32 v[0:1], null, s15, s2, v[2:3] v_lshl_add_u32 v1, s14, 1, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_cmp_gt_u32_e32 vcc_lo, s4, v1 v_cmp_gt_u32_e64 s2, s5, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_8 s_clause 0x1 s_load_b32 s6, s[0:1], 0x8 s_load_b64 s[2:3], s[0:1], 0x0 v_ashrrev_i32_e32 v8, 1, v0 s_mov_b32 s4, exec_lo s_waitcnt lgkmcnt(0) v_mad_u64_u32 v[4:5], null, v0, s6, v[1:2] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v5, s5, v8 v_mad_u64_u32 v[2:3], null, v5, s6, v[1:2] s_delay_alu instid0(VALU_DEP_3) v_add_nc_u32_e32 v3, 1, v4 s_clause 0x2 global_load_u8 v6, v4, s[2:3] global_load_u8 v5, v3, s[2:3] global_load_u8 v7, v2, s[2:3] v_and_b32_e32 v3, 1, v0 s_delay_alu instid0(VALU_DEP_1) v_cmpx_eq_u32_e32 1, v3 s_xor_b32 s4, exec_lo, s4 s_cbranch_execz .LBB0_5 v_add_nc_u32_e32 v2, 1, v2 s_lshr_b32 s7, s5, 1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_add_i32 s7, s7, -1 v_cmp_gt_u32_e32 vcc_lo, s7, v8 global_load_u8 v2, v2, s[2:3] s_and_saveexec_b32 s7, vcc_lo s_cbranch_execz .LBB0_4 v_add3_u32 v8, s5, 1, v8 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[3:4], null, v8, s6, v[1:2] v_add_nc_u32_e32 v4, 1, v3 s_clause 0x1 global_load_u8 v3, v3, s[2:3] global_load_u8 v4, v4, s[2:3] s_waitcnt vmcnt(1) v_add3_u32 v3, v7, v3, 1 s_waitcnt vmcnt(0) v_add3_u32 v2, v2, v4, 1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshrrev_b32_e32 v7, 1, v3 v_lshrrev_b32_e32 v2, 1, v2 .LBB0_4: s_or_b32 exec_lo, exec_lo, s7 s_waitcnt vmcnt(1) s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b32_e32 v3, 12, v7 s_waitcnt vmcnt(0) v_lshl_or_b32 v4, v2, 22, v3 .LBB0_5: s_and_not1_saveexec_b32 s4, s4 s_cbranch_execz .LBB0_7 v_add_nc_u32_e32 v2, 1, v2 s_waitcnt vmcnt(0) v_lshlrev_b32_e32 v3, 12, v7 global_load_u8 v2, v2, s[2:3] s_waitcnt vmcnt(0) v_lshl_or_b32 v4, v2, 22, v3 .LBB0_7: s_or_b32 exec_lo, exec_lo, s4 s_getpc_b64 s[4:5] s_add_u32 s4, s4, HueColorSpaceAry@rel32@lo+4 s_addc_u32 s5, s5, HueColorSpaceAry@rel32@hi+12 s_getpc_b64 s[6:7] s_add_u32 s6, s6, HueColorSpaceAry@rel32@lo+8 s_addc_u32 s7, s7, HueColorSpaceAry@rel32@hi+16 s_getpc_b64 s[8:9] s_add_u32 s8, s8, HueColorSpaceAry@rel32@lo+12 s_addc_u32 s9, s9, HueColorSpaceAry@rel32@hi+20 s_clause 0x1 s_load_b32 s16, s[4:5], 0x0 s_load_b32 s17, s[6:7], 0x0 s_getpc_b64 s[4:5] s_add_u32 s4, s4, HueColorSpaceAry@rel32@lo+16 s_addc_u32 s5, s5, HueColorSpaceAry@rel32@hi+24 s_getpc_b64 s[6:7] s_add_u32 s6, s6, HueColorSpaceAry@rel32@lo+20 s_addc_u32 s7, s7, HueColorSpaceAry@rel32@hi+28 s_clause 0x2 s_load_b32 s8, s[8:9], 0x0 s_load_b32 s9, s[4:5], 0x0 s_load_b32 s6, s[6:7], 0x0 s_waitcnt vmcnt(1) v_and_b32_e32 v3, 0xff, v5 v_bfe_u32 v5, v4, 10, 10 s_getpc_b64 s[10:11] s_add_u32 s10, s10, HueColorSpaceAry@rel32@lo+24 s_addc_u32 s11, s11, HueColorSpaceAry@rel32@hi+32 s_getpc_b64 s[12:13] s_add_u32 s12, s12, HueColorSpaceAry@rel32@lo+28 s_addc_u32 s13, s13, HueColorSpaceAry@rel32@hi+36 s_getpc_b64 s[14:15] s_add_u32 s14, s14, HueColorSpaceAry@rel32@lo+32 s_addc_u32 s15, s15, HueColorSpaceAry@rel32@hi+40 s_clause 0x2 s_load_b32 s7, s[10:11], 0x0 s_load_b32 s10, s[12:13], 0x0 s_load_b32 s11, s[14:15], 0x0 v_cvt_f32_i32_e32 v5, v5 s_getpc_b64 s[4:5] s_add_u32 s4, s4, HueColorSpaceAry@rel32@lo+36 s_addc_u32 s5, s5, HueColorSpaceAry@rel32@hi+44 v_bfe_u32 v8, v4, 10, 10 s_load_b32 s4, s[4:5], 0x0 s_load_b128 s[0:3], s[0:1], 0x10 v_add_f32_e32 v5, 0xc4000000, v5 v_and_b32_e32 v2, 0xff, v6 v_bfe_u32 v6, v4, 20, 10 v_cvt_f32_i32_e32 v8, v8 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cvt_f32_i32_e32 v6, v6 v_add_f32_e32 v8, 0xc4000000, v8 s_waitcnt vmcnt(0) lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3) v_dual_add_f32 v6, 0xc4000000, v6 :: v_dual_mul_f32 v7, s17, v5 v_lshlrev_b32_e32 v2, 2, v2 v_mul_f32_e32 v10, s6, v5 v_mul_f32_e32 v5, s11, v5 v_and_or_b32 v2, v4, 0x3ff, v2 v_alignbit_b32 v9, s3, s2, 2 s_getpc_b64 s[2:3] s_add_u32 s2, s2, constAlpha@rel32@lo+4 s_addc_u32 s3, s3, constAlpha@rel32@hi+12 s_load_b32 s2, s[2:3], 0x0 v_cvt_f32_u32_e32 v2, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_dual_fmac_f32 v10, s9, v2 :: v_dual_lshlrev_b32 v3, 2, v3 v_mul_f32_e32 v11, s17, v8 v_fmac_f32_e32 v5, s10, v2 v_fmac_f32_e32 v10, s7, v6 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_and_or_b32 v3, v4, 0x3ff, v3 v_bfe_u32 v4, v4, 20, 10 v_cvt_f32_u32_e32 v3, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cvt_f32_i32_e32 v4, v4 v_fmac_f32_e32 v7, s16, v2 v_dual_fmac_f32 v11, s16, v3 :: v_dual_add_f32 v2, 0xc4000000, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_dual_fmac_f32 v7, s8, v6 :: v_dual_mul_f32 v4, s6, v8 v_dual_fmac_f32 v5, s4, v6 :: v_dual_mul_f32 v6, s11, v8 v_med3_f32 v8, v10, 0, 0x447fc000 v_dual_fmac_f32 v11, s8, v2 :: v_dual_fmac_f32 v4, s9, v3 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_med3_f32 v5, v5, 0, 0x447fc000 v_fmac_f32_e32 v6, s10, v3 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_cvt_u32_f32_e32 v3, v8 v_fmac_f32_e32 v4, s7, v2 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_cvt_u32_f32_e32 v5, v5 v_fmac_f32_e32 v6, s4, v2 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) v_lshlrev_b32_e32 v2, 6, v3 v_med3_f32 v3, v7, 0, 0x447fc000 v_lshrrev_b32_e32 v5, 2, v5 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_and_b32_e32 v7, 0xffffff00, v2 v_cvt_u32_f32_e32 v8, v3 v_mad_u64_u32 v[2:3], null, v0, v9, v[1:2] v_mov_b32_e32 v3, 0 v_med3_f32 v4, v4, 0, 0x447fc000 v_med3_f32 v1, v11, 0, 0x447fc000 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_cvt_u32_f32_e32 v0, v4 v_lshlrev_b32_e32 v4, 14, v8 v_cvt_u32_f32_e32 v9, v1 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshlrev_b32_e32 v8, 6, v0 v_and_or_b32 v4, v4, 0xffff0000, v7 v_lshlrev_b64 v[0:1], 2, v[2:3] v_add_nc_u32_e32 v2, 1, v2 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) v_and_b32_e32 v7, 0xffffff00, v8 v_med3_f32 v6, v6, 0, 0x447fc000 v_lshlrev_b32_e32 v8, 14, v9 v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) v_or3_b32 v4, v4, v5, s2 v_add_co_u32 v0, vcc_lo, s0, v0 v_cvt_u32_f32_e32 v6, v6 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo v_add_co_u32 v2, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_lshrrev_b32_e32 v5, 2, v6 v_and_or_b32 v6, v8, 0xffff0000, v7 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo v_or3_b32 v5, v6, v5, s2 s_clause 0x1 global_store_b32 v[0:1], v4, off global_store_b32 v[2:3], v5, off .LBB0_8: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel NV12ToARGB .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 12 .amdhsa_next_free_sgpr 18 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size NV12ToARGB, .Lfunc_end0-NV12ToARGB .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .protected constAlpha .type constAlpha,@object .section .bss,"aw",@nobits .globl constAlpha .p2align 2, 0x0 constAlpha: .long 0 .size constAlpha, 4 .protected HueColorSpaceAry .type HueColorSpaceAry,@object .globl HueColorSpaceAry .p2align 4, 0x0 HueColorSpaceAry: .zero 36 .size HueColorSpaceAry, 36 .type __hip_cuid_,@object .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym constAlpha .addrsig_sym HueColorSpaceAry .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 8 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 36 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: NV12ToARGB .private_segment_fixed_size: 0 .sgpr_count: 20 .sgpr_spill_count: 0 .symbol: NV12ToARGB.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 12 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00101af6_00000000-6_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z7YUV2RGBPjPfS0_S0_ .type _Z7YUV2RGBPjPfS0_S0_, @function _Z7YUV2RGBPjPfS0_S0_: .LFB2057: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2057: .size _Z7YUV2RGBPjPfS0_S0_, .-_Z7YUV2RGBPjPfS0_S0_ .globl _Z10ARGB_10bitfffj .type _Z10ARGB_10bitfffj, @function _Z10ARGB_10bitfffj: .LFB2058: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2058: .size _Z10ARGB_10bitfffj, .-_Z10ARGB_10bitfffj .globl _Z36__device_stub__Z10NV12ToARGBPjmS_mjjPjmS_mjj .type _Z36__device_stub__Z10NV12ToARGBPjmS_mjjPjmS_mjj, @function _Z36__device_stub__Z10NV12ToARGBPjmS_mjjPjmS_mjj: .LFB2083: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movl %r8d, 12(%rsp) movl %r9d, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 12(%rsp), %rax movq %rax, 144(%rsp) leaq 8(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L11 .L7: movq 168(%rsp), %rax subq %fs:40, %rax jne .L12 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq NV12ToARGB(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L7 .L12: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z36__device_stub__Z10NV12ToARGBPjmS_mjjPjmS_mjj, .-_Z36__device_stub__Z10NV12ToARGBPjmS_mjjPjmS_mjj .globl NV12ToARGB .type NV12ToARGB, @function NV12ToARGB: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z10NV12ToARGBPjmS_mjjPjmS_mjj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size NV12ToARGB, .-NV12ToARGB .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "NV12ToARGB" .LC1: .string "constAlpha" .LC2: .string "HueColorSpaceAry" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq NV12ToARGB(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $1 .cfi_def_cfa_offset 32 movl $4, %r9d movl $0, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _ZL10constAlpha(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $1 .cfi_def_cfa_offset 32 movl $36, %r9d movl $0, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _ZL16HueColorSpaceAry(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .local _ZL16HueColorSpaceAry .comm _ZL16HueColorSpaceAry,36,32 .local _ZL10constAlpha .comm _ZL10constAlpha,4,4 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "kernel.hip" .globl __device_stub__NV12ToARGB # -- Begin function __device_stub__NV12ToARGB .p2align 4, 0x90 .type __device_stub__NV12ToARGB,@function __device_stub__NV12ToARGB: # @__device_stub__NV12ToARGB .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movl %r8d, 12(%rsp) movl %r9d, 8(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 12(%rsp), %rax movq %rax, 128(%rsp) leaq 8(%rsp), %rax movq %rax, 136(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 96(%rsp), %r9 movl $NV12ToARGB, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size __device_stub__NV12ToARGB, .Lfunc_end0-__device_stub__NV12ToARGB .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rbx subq $32, %rsp .cfi_adjust_cfa_offset 32 xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $NV12ToARGB, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction addq $32, %rsp .cfi_adjust_cfa_offset -32 movl $constAlpha, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movl $4, %r9d movq %rbx, %rdi xorl %r8d, %r8d pushq $0 .cfi_adjust_cfa_offset 8 pushq $1 .cfi_adjust_cfa_offset 8 callq __hipRegisterVar addq $16, %rsp .cfi_adjust_cfa_offset -16 movl $HueColorSpaceAry, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movl $36, %r9d movq %rbx, %rdi xorl %r8d, %r8d pushq $0 .cfi_adjust_cfa_offset 8 pushq $1 .cfi_adjust_cfa_offset 8 callq __hipRegisterVar addq $16, %rsp .cfi_adjust_cfa_offset -16 movl $__hip_module_dtor, %edi popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type constAlpha,@object # @constAlpha .local constAlpha .comm constAlpha,4,4 .type HueColorSpaceAry,@object # @HueColorSpaceAry .local HueColorSpaceAry .comm HueColorSpaceAry,36,16 .type NV12ToARGB,@object # @NV12ToARGB .section .rodata,"a",@progbits .globl NV12ToARGB .p2align 3, 0x0 NV12ToARGB: .quad __device_stub__NV12ToARGB .size NV12ToARGB, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "NV12ToARGB" .size .L__unnamed_1, 11 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "constAlpha" .size .L__unnamed_2, 11 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "HueColorSpaceAry" .size .L__unnamed_3, 17 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __device_stub__NV12ToARGB .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym constAlpha .addrsig_sym HueColorSpaceAry .addrsig_sym NV12ToARGB .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> __global__ void dummy() { int j = 0; for(int i = 0; i < 1000000; i++) j++; } int main() { cudaStream_t stream1, stream2; double *A, *B, *C, *D; cudaSetDevice(1); cudaMalloc((void **) &C, 100000000 * sizeof(double)); cudaMalloc((void **) &D, 10000000 * sizeof(double)); cudaSetDevice(0); cudaMalloc((void **) &A, 100000000 * sizeof(double)); cudaMalloc((void **) &B, 10000000 * sizeof(double)); cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaDeviceEnablePeerAccess(1, 0); dummy<<<1, 1>>>(); cudaSetDevice(0); cudaMemcpyPeerAsync(C, 1, A, 0, 100000000 * sizeof(double), stream1); cudaSetDevice(1); for(int i = 0; i < 10; i++) dummy<<<1, 1>>>(); cudaSetDevice(0); cudaMemcpyPeerAsync(D, 1, B, 0, 10000000 * sizeof(double)); for(int i = 0; i < 2; i++) { cudaSetDevice(i); cudaDeviceSynchronize(); } return 0; }
code for sm_80 Function : _Z5dummyv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> __global__ void dummy() { int j = 0; for(int i = 0; i < 1000000; i++) j++; } int main() { cudaStream_t stream1, stream2; double *A, *B, *C, *D; cudaSetDevice(1); cudaMalloc((void **) &C, 100000000 * sizeof(double)); cudaMalloc((void **) &D, 10000000 * sizeof(double)); cudaSetDevice(0); cudaMalloc((void **) &A, 100000000 * sizeof(double)); cudaMalloc((void **) &B, 10000000 * sizeof(double)); cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaDeviceEnablePeerAccess(1, 0); dummy<<<1, 1>>>(); cudaSetDevice(0); cudaMemcpyPeerAsync(C, 1, A, 0, 100000000 * sizeof(double), stream1); cudaSetDevice(1); for(int i = 0; i < 10; i++) dummy<<<1, 1>>>(); cudaSetDevice(0); cudaMemcpyPeerAsync(D, 1, B, 0, 10000000 * sizeof(double)); for(int i = 0; i < 2; i++) { cudaSetDevice(i); cudaDeviceSynchronize(); } return 0; }
.file "tmpxft_00121fe8_00000000-6_memcpyasynctest.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z23__device_stub__Z5dummyvv .type _Z23__device_stub__Z5dummyvv, @function _Z23__device_stub__Z5dummyvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z5dummyv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z23__device_stub__Z5dummyvv, .-_Z23__device_stub__Z5dummyvv .globl _Z5dummyv .type _Z5dummyv, @function _Z5dummyv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z23__device_stub__Z5dummyvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z5dummyv, .-_Z5dummyv .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $80, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, %edi call cudaSetDevice@PLT leaq 32(%rsp), %rdi movl $800000000, %esi call cudaMalloc@PLT leaq 40(%rsp), %rdi movl $80000000, %esi call cudaMalloc@PLT movl $0, %edi call cudaSetDevice@PLT leaq 16(%rsp), %rdi movl $800000000, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $80000000, %esi call cudaMalloc@PLT movq %rsp, %rdi call cudaStreamCreate@PLT leaq 8(%rsp), %rdi call cudaStreamCreate@PLT movl $0, %esi movl $1, %edi call cudaDeviceEnablePeerAccess@PLT movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L18 .L12: movl $0, %edi call cudaSetDevice@PLT movq (%rsp), %r9 movl $800000000, %r8d movl $0, %ecx movq 16(%rsp), %rdx movl $1, %esi movq 32(%rsp), %rdi call cudaMemcpyPeerAsync@PLT movl $1, %edi call cudaSetDevice@PLT movl $10, %ebx jmp .L14 .L18: call _Z23__device_stub__Z5dummyvv jmp .L12 .L13: subl $1, %ebx je .L19 .L14: movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L13 call _Z23__device_stub__Z5dummyvv jmp .L13 .L19: movl $0, %edi call cudaSetDevice@PLT movl $0, %r9d movl $80000000, %r8d movl $0, %ecx movq 24(%rsp), %rdx movl $1, %esi movq 40(%rsp), %rdi call cudaMemcpyPeerAsync@PLT movl $0, %edi call cudaSetDevice@PLT call cudaDeviceSynchronize@PLT movl $1, %edi call cudaSetDevice@PLT call cudaDeviceSynchronize@PLT movq 72(%rsp), %rax subq %fs:40, %rax jne .L20 movl $0, %eax addq $80, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z5dummyv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z5dummyv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> __global__ void dummy() { int j = 0; for(int i = 0; i < 1000000; i++) j++; } int main() { cudaStream_t stream1, stream2; double *A, *B, *C, *D; cudaSetDevice(1); cudaMalloc((void **) &C, 100000000 * sizeof(double)); cudaMalloc((void **) &D, 10000000 * sizeof(double)); cudaSetDevice(0); cudaMalloc((void **) &A, 100000000 * sizeof(double)); cudaMalloc((void **) &B, 10000000 * sizeof(double)); cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaDeviceEnablePeerAccess(1, 0); dummy<<<1, 1>>>(); cudaSetDevice(0); cudaMemcpyPeerAsync(C, 1, A, 0, 100000000 * sizeof(double), stream1); cudaSetDevice(1); for(int i = 0; i < 10; i++) dummy<<<1, 1>>>(); cudaSetDevice(0); cudaMemcpyPeerAsync(D, 1, B, 0, 10000000 * sizeof(double)); for(int i = 0; i < 2; i++) { cudaSetDevice(i); cudaDeviceSynchronize(); } return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void dummy() { int j = 0; for(int i = 0; i < 1000000; i++) j++; } int main() { hipStream_t stream1, stream2; double *A, *B, *C, *D; hipSetDevice(1); hipMalloc((void **) &C, 100000000 * sizeof(double)); hipMalloc((void **) &D, 10000000 * sizeof(double)); hipSetDevice(0); hipMalloc((void **) &A, 100000000 * sizeof(double)); hipMalloc((void **) &B, 10000000 * sizeof(double)); hipStreamCreate(&stream1); hipStreamCreate(&stream2); hipDeviceEnablePeerAccess(1, 0); dummy<<<1, 1>>>(); hipSetDevice(0); hipMemcpyPeerAsync(C, 1, A, 0, 100000000 * sizeof(double), stream1); hipSetDevice(1); for(int i = 0; i < 10; i++) dummy<<<1, 1>>>(); hipSetDevice(0); hipMemcpyPeerAsync(D, 1, B, 0, 10000000 * sizeof(double)); for(int i = 0; i < 2; i++) { hipSetDevice(i); hipDeviceSynchronize(); } return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void dummy() { int j = 0; for(int i = 0; i < 1000000; i++) j++; } int main() { hipStream_t stream1, stream2; double *A, *B, *C, *D; hipSetDevice(1); hipMalloc((void **) &C, 100000000 * sizeof(double)); hipMalloc((void **) &D, 10000000 * sizeof(double)); hipSetDevice(0); hipMalloc((void **) &A, 100000000 * sizeof(double)); hipMalloc((void **) &B, 10000000 * sizeof(double)); hipStreamCreate(&stream1); hipStreamCreate(&stream2); hipDeviceEnablePeerAccess(1, 0); dummy<<<1, 1>>>(); hipSetDevice(0); hipMemcpyPeerAsync(C, 1, A, 0, 100000000 * sizeof(double), stream1); hipSetDevice(1); for(int i = 0; i < 10; i++) dummy<<<1, 1>>>(); hipSetDevice(0); hipMemcpyPeerAsync(D, 1, B, 0, 10000000 * sizeof(double)); for(int i = 0; i < 2; i++) { hipSetDevice(i); hipDeviceSynchronize(); } return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z5dummyv .globl _Z5dummyv .p2align 8 .type _Z5dummyv,@function _Z5dummyv: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z5dummyv .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 0 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 0 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 1 .amdhsa_next_free_sgpr 1 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z5dummyv, .Lfunc_end0-_Z5dummyv .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: [] .group_segment_fixed_size: 0 .kernarg_segment_align: 4 .kernarg_segment_size: 0 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z5dummyv .private_segment_fixed_size: 0 .sgpr_count: 0 .sgpr_spill_count: 0 .symbol: _Z5dummyv.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 0 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void dummy() { int j = 0; for(int i = 0; i < 1000000; i++) j++; } int main() { hipStream_t stream1, stream2; double *A, *B, *C, *D; hipSetDevice(1); hipMalloc((void **) &C, 100000000 * sizeof(double)); hipMalloc((void **) &D, 10000000 * sizeof(double)); hipSetDevice(0); hipMalloc((void **) &A, 100000000 * sizeof(double)); hipMalloc((void **) &B, 10000000 * sizeof(double)); hipStreamCreate(&stream1); hipStreamCreate(&stream2); hipDeviceEnablePeerAccess(1, 0); dummy<<<1, 1>>>(); hipSetDevice(0); hipMemcpyPeerAsync(C, 1, A, 0, 100000000 * sizeof(double), stream1); hipSetDevice(1); for(int i = 0; i < 10; i++) dummy<<<1, 1>>>(); hipSetDevice(0); hipMemcpyPeerAsync(D, 1, B, 0, 10000000 * sizeof(double)); for(int i = 0; i < 2; i++) { hipSetDevice(i); hipDeviceSynchronize(); } return 0; }
.text .file "memcpyasynctest.hip" .globl _Z20__device_stub__dummyv # -- Begin function _Z20__device_stub__dummyv .p2align 4, 0x90 .type _Z20__device_stub__dummyv,@function _Z20__device_stub__dummyv: # @_Z20__device_stub__dummyv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z5dummyv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z20__device_stub__dummyv, .Lfunc_end0-_Z20__device_stub__dummyv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $104, %rsp .cfi_def_cfa_offset 160 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movabsq $4294967297, %rbx # imm = 0x100000001 movl $1, %edi callq hipSetDevice leaq 64(%rsp), %rdi movl $800000000, %esi # imm = 0x2FAF0800 callq hipMalloc leaq 56(%rsp), %rdi movl $80000000, %esi # imm = 0x4C4B400 callq hipMalloc xorl %edi, %edi callq hipSetDevice leaq 80(%rsp), %rdi movl $800000000, %esi # imm = 0x2FAF0800 callq hipMalloc leaq 72(%rsp), %rdi movl $80000000, %esi # imm = 0x4C4B400 callq hipMalloc leaq 88(%rsp), %rdi callq hipStreamCreate leaq 96(%rsp), %rdi callq hipStreamCreate movl $1, %edi xorl %esi, %esi callq hipDeviceEnablePeerAccess movq %rbx, %rdi movl $1, %esi movq %rbx, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z5dummyv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: xorl %edi, %edi callq hipSetDevice movq 64(%rsp), %rdi movq 80(%rsp), %rdx movq 88(%rsp), %r9 movl $800000000, %r8d # imm = 0x2FAF0800 movl $1, %esi xorl %ecx, %ecx callq hipMemcpyPeerAsync movl $1, %edi callq hipSetDevice movl $10, %r14d leaq 16(%rsp), %r15 leaq 8(%rsp), %r12 movq %rsp, %r13 leaq 48(%rsp), %rbp jmp .LBB1_3 .p2align 4, 0x90 .LBB1_5: # in Loop: Header=BB1_3 Depth=1 decl %r14d je .LBB1_6 .LBB1_3: # =>This Inner Loop Header: Depth=1 movq %rbx, %rdi movl $1, %esi movq %rbx, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_5 # %bb.4: # in Loop: Header=BB1_3 Depth=1 leaq 32(%rsp), %rdi movq %r15, %rsi movq %r12, %rdx movq %r13, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d movl $_Z5dummyv, %edi movq %rbp, %r9 pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 jmp .LBB1_5 .LBB1_6: xorl %ebx, %ebx xorl %edi, %edi callq hipSetDevice movq 56(%rsp), %rdi movq 72(%rsp), %rdx movl $80000000, %r8d # imm = 0x4C4B400 movl $1, %esi xorl %ecx, %ecx xorl %r9d, %r9d callq hipMemcpyPeerAsync .p2align 4, 0x90 .LBB1_7: # =>This Inner Loop Header: Depth=1 movl %ebx, %edi callq hipSetDevice callq hipDeviceSynchronize incl %ebx cmpl $1, %ebx je .LBB1_7 # %bb.8: xorl %eax, %eax addq $104, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5dummyv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z5dummyv,@object # @_Z5dummyv .section .rodata,"a",@progbits .globl _Z5dummyv .p2align 3, 0x0 _Z5dummyv: .quad _Z20__device_stub__dummyv .size _Z5dummyv, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z5dummyv" .size .L__unnamed_1, 10 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z20__device_stub__dummyv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z5dummyv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z5dummyv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z5dummyv .globl _Z5dummyv .p2align 8 .type _Z5dummyv,@function _Z5dummyv: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z5dummyv .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 0 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 0 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 1 .amdhsa_next_free_sgpr 1 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z5dummyv, .Lfunc_end0-_Z5dummyv .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: [] .group_segment_fixed_size: 0 .kernarg_segment_align: 4 .kernarg_segment_size: 0 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z5dummyv .private_segment_fixed_size: 0 .sgpr_count: 0 .sgpr_spill_count: 0 .symbol: _Z5dummyv.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 0 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00121fe8_00000000-6_memcpyasynctest.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z23__device_stub__Z5dummyvv .type _Z23__device_stub__Z5dummyvv, @function _Z23__device_stub__Z5dummyvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z5dummyv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z23__device_stub__Z5dummyvv, .-_Z23__device_stub__Z5dummyvv .globl _Z5dummyv .type _Z5dummyv, @function _Z5dummyv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z23__device_stub__Z5dummyvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z5dummyv, .-_Z5dummyv .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $80, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, %edi call cudaSetDevice@PLT leaq 32(%rsp), %rdi movl $800000000, %esi call cudaMalloc@PLT leaq 40(%rsp), %rdi movl $80000000, %esi call cudaMalloc@PLT movl $0, %edi call cudaSetDevice@PLT leaq 16(%rsp), %rdi movl $800000000, %esi call cudaMalloc@PLT leaq 24(%rsp), %rdi movl $80000000, %esi call cudaMalloc@PLT movq %rsp, %rdi call cudaStreamCreate@PLT leaq 8(%rsp), %rdi call cudaStreamCreate@PLT movl $0, %esi movl $1, %edi call cudaDeviceEnablePeerAccess@PLT movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L18 .L12: movl $0, %edi call cudaSetDevice@PLT movq (%rsp), %r9 movl $800000000, %r8d movl $0, %ecx movq 16(%rsp), %rdx movl $1, %esi movq 32(%rsp), %rdi call cudaMemcpyPeerAsync@PLT movl $1, %edi call cudaSetDevice@PLT movl $10, %ebx jmp .L14 .L18: call _Z23__device_stub__Z5dummyvv jmp .L12 .L13: subl $1, %ebx je .L19 .L14: movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L13 call _Z23__device_stub__Z5dummyvv jmp .L13 .L19: movl $0, %edi call cudaSetDevice@PLT movl $0, %r9d movl $80000000, %r8d movl $0, %ecx movq 24(%rsp), %rdx movl $1, %esi movq 40(%rsp), %rdi call cudaMemcpyPeerAsync@PLT movl $0, %edi call cudaSetDevice@PLT call cudaDeviceSynchronize@PLT movl $1, %edi call cudaSetDevice@PLT call cudaDeviceSynchronize@PLT movq 72(%rsp), %rax subq %fs:40, %rax jne .L20 movl $0, %eax addq $80, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z5dummyv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z5dummyv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "memcpyasynctest.hip" .globl _Z20__device_stub__dummyv # -- Begin function _Z20__device_stub__dummyv .p2align 4, 0x90 .type _Z20__device_stub__dummyv,@function _Z20__device_stub__dummyv: # @_Z20__device_stub__dummyv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z5dummyv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z20__device_stub__dummyv, .Lfunc_end0-_Z20__device_stub__dummyv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $104, %rsp .cfi_def_cfa_offset 160 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movabsq $4294967297, %rbx # imm = 0x100000001 movl $1, %edi callq hipSetDevice leaq 64(%rsp), %rdi movl $800000000, %esi # imm = 0x2FAF0800 callq hipMalloc leaq 56(%rsp), %rdi movl $80000000, %esi # imm = 0x4C4B400 callq hipMalloc xorl %edi, %edi callq hipSetDevice leaq 80(%rsp), %rdi movl $800000000, %esi # imm = 0x2FAF0800 callq hipMalloc leaq 72(%rsp), %rdi movl $80000000, %esi # imm = 0x4C4B400 callq hipMalloc leaq 88(%rsp), %rdi callq hipStreamCreate leaq 96(%rsp), %rdi callq hipStreamCreate movl $1, %edi xorl %esi, %esi callq hipDeviceEnablePeerAccess movq %rbx, %rdi movl $1, %esi movq %rbx, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z5dummyv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: xorl %edi, %edi callq hipSetDevice movq 64(%rsp), %rdi movq 80(%rsp), %rdx movq 88(%rsp), %r9 movl $800000000, %r8d # imm = 0x2FAF0800 movl $1, %esi xorl %ecx, %ecx callq hipMemcpyPeerAsync movl $1, %edi callq hipSetDevice movl $10, %r14d leaq 16(%rsp), %r15 leaq 8(%rsp), %r12 movq %rsp, %r13 leaq 48(%rsp), %rbp jmp .LBB1_3 .p2align 4, 0x90 .LBB1_5: # in Loop: Header=BB1_3 Depth=1 decl %r14d je .LBB1_6 .LBB1_3: # =>This Inner Loop Header: Depth=1 movq %rbx, %rdi movl $1, %esi movq %rbx, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_5 # %bb.4: # in Loop: Header=BB1_3 Depth=1 leaq 32(%rsp), %rdi movq %r15, %rsi movq %r12, %rdx movq %r13, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d movl $_Z5dummyv, %edi movq %rbp, %r9 pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 jmp .LBB1_5 .LBB1_6: xorl %ebx, %ebx xorl %edi, %edi callq hipSetDevice movq 56(%rsp), %rdi movq 72(%rsp), %rdx movl $80000000, %r8d # imm = 0x4C4B400 movl $1, %esi xorl %ecx, %ecx xorl %r9d, %r9d callq hipMemcpyPeerAsync .p2align 4, 0x90 .LBB1_7: # =>This Inner Loop Header: Depth=1 movl %ebx, %edi callq hipSetDevice callq hipDeviceSynchronize incl %ebx cmpl $1, %ebx je .LBB1_7 # %bb.8: xorl %eax, %eax addq $104, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5dummyv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z5dummyv,@object # @_Z5dummyv .section .rodata,"a",@progbits .globl _Z5dummyv .p2align 3, 0x0 _Z5dummyv: .quad _Z20__device_stub__dummyv .size _Z5dummyv, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z5dummyv" .size .L__unnamed_1, 10 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z20__device_stub__dummyv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z5dummyv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* Copyright (C) Muaaz Gul Awan and Fahad Saeed This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include<iostream> #include<vector> #include<stdlib.h> #include<time.h> #include<math.h> #include<ctime> #include<algorithm> #include<utility> #include <curand.h> #include <curand_kernel.h> #include<random> #include <thrust/scan.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <string> #include <fstream> using namespace std; #define elesPerBucket 20 #define sampleRate 10 //#define totArrays 10000 #define maxSize2 10000 //#define minSize 10000 #define SAMPLED (sampleRate*maxSize)/100 #define maxBuckets (maxSize/elesPerBucket) inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } //swap function for Insertion sort template <class type> __device__ void swapD (type &a, type &b) { /* &a and &b are reference variables */ type temp; temp=a; a=b; b=temp; } //insertion sort template <class type> __device__ void insertionSort(type *input, int begin, int end){ int i, j; //,tmp; for (i = begin+1; i < end; i++) { j = i; while (j > begin && input[j - 1] > input[j]) { swapD(input[j], input[j-1]); j--; }//end of while loop } } __device__ void getMinMax(int input[], int beginPtr, int endPtr, int *ret){ int min = input[beginPtr]; int max = 0; // int *ret = new int[2]; for(int i = beginPtr; i < endPtr; i++){ if(min > input[i]) min = input[i]; if (max < input[i]) max = input[i]; } ret[0] = min; ret[1] = max; //return ret; } //data generation template <typename mType> struct dataArrays{ vector<mType> dataList; int *prefixArray; }; template <typename type> dataArrays<type> dataGen (int numOfArrays, int maxArraySize, int minArraySize){ dataArrays<int> data; data.prefixArray = new int[numOfArrays+1]; //exclusive prefix scan const int range_from = 0; const unsigned int range_to = 5000;//2147483647; //2^31 - 1 random_device rand_dev; mt19937 generator(rand_dev()); uniform_int_distribution<int> distr(range_from, range_to); int prefixSum = 0; srand(time(0)); for( int i = 0; i < numOfArrays; i++){ int size = rand()%(maxArraySize-minArraySize + 1) + minArraySize; data.prefixArray[i] = prefixSum; for(int j = prefixSum; j < prefixSum + size; j++){ data.dataList.push_back(distr(generator)); } prefixSum += size; } data.prefixArray[numOfArrays] = prefixSum; return data; } //kernel for obtaining num of buckets for each array __global__ void getNumOfBuckets(int *prefixSumArray, int *numOfBucketsArray, int totArrays){ int id = blockIdx.x; // * blockDim.x + threadIdx.x; if(id < totArrays) numOfBucketsArray[id] = (prefixSumArray[id+1] - prefixSumArray[id])/elesPerBucket; } template <typename mType, int maxSize> __device__ void getSplitters (mType *data, mType *splittersArray, int sample[], int beginPtr, int endPtr, int arraySize, int *prefixBucketsArray){ __shared__ mType mySamples[SAMPLED]; //int *ret = new int[2]; //int arraySize = endPtr - beginPtr; // calculating samples for this array int numOfSamples = ((float)sampleRate/100)*(arraySize); //calculating the number of buckets for this array int numOfBuckets = (blockIdx.x == 0) ? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x] - prefixBucketsArray[blockIdx.x-1]); for(int i = 0; i < numOfSamples; i++) mySamples[i] = data[beginPtr+sample[i]]; insertionSort(mySamples, 0, numOfSamples); //calculate splitter index for this array int splitterIndex = ((blockIdx.x == 0)? 1 : (prefixBucketsArray[blockIdx.x-1]+1))+1; //the other plus one is for leaving space for smallest splitter(added later) int splittersSize=0; for(int i = (numOfSamples)/(numOfBuckets); splittersSize < numOfBuckets-1; i +=(numOfSamples)/(numOfBuckets)){ splittersArray[splitterIndex] = mySamples[i]; splitterIndex++; splittersSize++; } //getMinMax(data, beginPtr, endPtr, ret); int bits = 8*sizeof(mType); mType min = -(1 << (bits-1)); mType max = (1 << (bits - 1)) - 1;//int max = (1 << (bits-1)) � 1; splittersArray[((blockIdx.x == 0)? 0 : (prefixBucketsArray[blockIdx.x-1]+1))] = min;//ret[0]-2;//to accodmodate the smallest splittersArray[((blockIdx.x == 0)? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x]))] = max;//ret[1]+2; //delete [] ret; } //kernel for obtaining splitters template <typename mType, int maxSize> __global__ void splitterKer(mType *data, mType *splittersArray, int *prefixSizeArray, int *prefixBucketsArray, int totArrays){ if(blockIdx.x < totArrays){ int id = blockIdx.x; __shared__ int sampleSh[SAMPLED]; int arraySize = prefixSizeArray[id+1] - prefixSizeArray[id]; // calculating samples for this array int numOfSamples = ((float)sampleRate/100)*(arraySize); //int *h_sample = new int[SAMPLED]; int max = arraySize; int sam = numOfSamples; int stride = max/sam; int sampleVal = 0; for( int i = 0; i < numOfSamples; i++) { sampleSh[i] = sampleVal; sampleVal += stride; } //for(int i = 0; i < numOfSamples; i++) // sampleSh[i] = mySample[i]; getSplitters<mType, maxSize>(data, splittersArray, sampleSh, prefixSizeArray[id], prefixSizeArray[id+1], prefixSizeArray[id+1] - prefixSizeArray[id], prefixBucketsArray); } } template <typename mType, int maxSize> __device__ void getBuckets(mType *input, mType *splitters, int beginPtr, int endPtr, int *bucketsSize, mType *myInput, int *prefixBucketsArray, int totArrays){ volatile int numOfBuckets = (blockIdx.x == 0) ? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x] - prefixBucketsArray[blockIdx.x-1]); if(blockIdx.x < totArrays && threadIdx.x < numOfBuckets){ int id = threadIdx.x; int sizeOffset = (blockIdx.x == 0) ? (0+threadIdx.x) : (prefixBucketsArray[blockIdx.x-1] + threadIdx.x); //blockIdx.x*BUCKETS+threadIdx.x; int sizeOffsetBlock = (blockIdx.x == 0) ? (0) : (prefixBucketsArray[blockIdx.x-1]); int bucketSizeOff = sizeOffset+1; mType myBucket[maxSize]; //make it shared as well // int bucketIndexOffset; int indexSum=0; bucketsSize[bucketSizeOff] = 0; for(int i = 0; i < (endPtr - beginPtr); i++){ if(myInput[i] > splitters[id] && myInput[i] <= splitters[id+1]){ myBucket[bucketsSize[bucketSizeOff]] = myInput[i]; bucketsSize[bucketSizeOff]++; } } __syncthreads(); //prefix sum for bucket sizes of current array for(int j = 0; j < threadIdx.x; j++) indexSum += bucketsSize[sizeOffsetBlock+j+1]; //writing back current buckt back to the input memory for(int i = 0; i < bucketsSize[bucketSizeOff]; i++) input[indexSum+beginPtr+i] = myBucket[i]; } } //kernel for obtaining buckets template <typename mType, int maxSize> __global__ void bucketKernel(mType *data, mType *splittersArray, int *prefixSizeArray, int *prefixBucketsArray, int *bucketSizes, int totArrays){ int numOfBuckets = (blockIdx.x == 0) ? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x] - prefixBucketsArray[blockIdx.x-1]); if(blockIdx.x < totArrays && threadIdx.x < numOfBuckets){ bucketSizes[0] = 0; int bid = blockIdx.x; int tid = threadIdx.x; int arraySize = prefixSizeArray[blockIdx.x+1] - prefixSizeArray[blockIdx.x]; int leftOvers = arraySize%numOfBuckets; int jmpFac = arraySize/numOfBuckets; int gArrayStart = prefixSizeArray[blockIdx.x] + tid*jmpFac; int gArrayEnd = (tid==(numOfBuckets-1))?(gArrayStart + jmpFac+leftOvers):(gArrayStart + jmpFac); int lArrayStart = tid*jmpFac; __shared__ int myInput [maxSize]; int arrBegin = prefixSizeArray[bid]; int arrEnd = prefixSizeArray[bid+1]; int splitterIndexSt = ((blockIdx.x == 0)? 0 : (prefixBucketsArray[blockIdx.x-1]+1));//blockIdx.x*(BUCKETS+1); int splitterIndexEd = splitterIndexSt + numOfBuckets+1; __shared__ mType splitters[maxBuckets+2]; //copy my array in shared memory in parallel for(int i=lArrayStart,j=gArrayStart;j<gArrayEnd;i++,j++){ myInput[i] = data[j]; } __syncthreads(); int j = 0; for(int i = splitterIndexSt; i < splitterIndexEd; i++){ splitters[j] = splittersArray[i]; j++; } getBuckets<mType, maxSize>(data, splitters, arrBegin, arrEnd, bucketSizes, myInput, prefixBucketsArray, totArrays); } } //sorting kernel template <typename mType, int maxSize> __global__ void sortBuckets(mType *buckets, int *bucketsSize, int *prefixBucketsArray, int *prefixSizeArray, int totArrays){ int numOfBuckets = (blockIdx.x == 0) ? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x] - prefixBucketsArray[blockIdx.x-1]); if(blockIdx.x < totArrays && threadIdx.x < numOfBuckets){ int sizeOffset = (blockIdx.x == 0) ? (0+threadIdx.x) : (prefixBucketsArray[blockIdx.x-1] + threadIdx.x); int sizeOffsetBlock = (blockIdx.x == 0) ? (0) : (prefixBucketsArray[blockIdx.x-1]); // int bid = blockIdx.x; int tid = threadIdx.x; int arraySize = prefixSizeArray[blockIdx.x+1] - prefixSizeArray[blockIdx.x]; int leftOvers = arraySize%numOfBuckets; int jmpFac = arraySize/numOfBuckets; int gArrayStart = prefixSizeArray[blockIdx.x] + tid*jmpFac; int gArrayEnd = (tid==(numOfBuckets-1))?(gArrayStart + jmpFac+leftOvers):(gArrayStart + jmpFac); int lArrayStart = tid*jmpFac; //int lArrayEnd = (tid==(BUCKETS-1))?(lArrayStart + jmpFac+leftOvers):(lArrayStart + jmpFac); __shared__ mType myArray [maxSize]; int indexSum = 0; for(int i=lArrayStart,j=gArrayStart;j<gArrayEnd;i++,j++){ myArray[i] = buckets[j]; } __syncthreads(); //prefix sum for bucket sizes of current array for(int j = 0; j < threadIdx.x; j++) indexSum += bucketsSize[sizeOffsetBlock+j+1]; insertionSort(myArray, indexSum,indexSum + bucketsSize[sizeOffset+1]); __syncthreads(); for(int i=lArrayStart,j=gArrayStart;j<gArrayEnd;i++,j++){ buckets[j] = myArray[i]; } __syncthreads(); } } template <typename mType, int maxSize> void gpuArraySort(dataArrays<mType> newData, int *prefixSum, int flag, int totArrays){ int *d_prefixSum, *d_numOfBuckets; if(flag == 1){ int *d_prefixSumDo; cudaMalloc((void**) &d_prefixSumDo, (totArrays+1)*sizeof(int)); cudaMemcpy(d_prefixSumDo, prefixSum, sizeof(int)*(1+totArrays), cudaMemcpyHostToDevice); //casting device ptr to thrust dev_ptr thrust::device_ptr<int> prefixDo = thrust::device_pointer_cast(d_prefixSumDo); //performing prefixSum using thrust thrust::exclusive_scan(prefixDo, prefixDo + totArrays, prefixDo); prefixDo[totArrays] = prefixDo[totArrays-1] + prefixSum[totArrays-1]; d_prefixSum = d_prefixSumDo; } mType *d_inputData, *d_splitters, *d_bucketSizes; //creating events cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int *h_totalBuckets = new int[1]; size_t size_heap, size_stack; //setting stack size limit cudaDeviceSetLimit(cudaLimitStackSize,10240); cudaDeviceGetLimit(&size_heap, cudaLimitMallocHeapSize); cudaDeviceGetLimit(&size_stack, cudaLimitStackSize); // allocating device memory for prefixSum, num Of buckets, splitters, bucketSizes cudaMalloc((void**) &d_prefixSum, (totArrays+1)*sizeof(int)); //copying prefixSums to Device cudaMemcpy(d_prefixSum, prefixSum, sizeof(int)*(1+totArrays), cudaMemcpyHostToDevice); //allocating device memory for inputData thrust::device_vector<mType> inData (newData.dataList.size()); thrust::copy(newData.dataList.begin(), newData.dataList.end(), inData.begin()); cudaDeviceSynchronize(); clock_t start2, end2; double cpu_time_used; cudaEventRecord(start); start2 = clock(); // allocating device memory for prefixSum, num Of buckets, splitters, bucketSizes cudaMalloc((void**) &d_numOfBuckets, (totArrays)*sizeof(int)); //clculating buckets on GPU getNumOfBuckets<<<totArrays,1>>>(d_prefixSum, d_numOfBuckets, totArrays); //casting device ptr to thrust dev_ptr thrust::device_ptr<int> prefixNumBuckets = thrust::device_pointer_cast(d_numOfBuckets); //performing prefixSum using thrust thrust::inclusive_scan(prefixNumBuckets, prefixNumBuckets + totArrays, prefixNumBuckets); //copying total number of buckets back checkCuda(cudaMemcpy(h_totalBuckets, d_numOfBuckets+(totArrays-1), sizeof(int), cudaMemcpyDeviceToHost)); //allocating device memory for splitters cudaMalloc((void**) &d_splitters, (totArrays+h_totalBuckets[0])*sizeof(mType)); cudaMalloc((void**) &d_bucketSizes, (1+h_totalBuckets[0])*sizeof(int)); d_inputData = thrust::raw_pointer_cast(&inData[0]); cout<< "**** Generating Splitters ****" << endl; splitterKer<mType, maxSize><<<totArrays, 1>>>(d_inputData, d_splitters, d_prefixSum, d_numOfBuckets, totArrays); cout<< "**** Splitters Generated****" << endl; cout<< "**** Generating Buckets ****" << endl; bucketKernel<mType, maxSize><<<totArrays, maxBuckets>>>(d_inputData, d_splitters, d_prefixSum, d_numOfBuckets, d_bucketSizes, totArrays); cout<< "**** Buckets Generated ****" << endl; cout<< "**** Sorting Buckets ****" << endl; sortBuckets<mType, maxSize><<<totArrays, maxBuckets>>>(d_inputData, d_bucketSizes,d_numOfBuckets, d_prefixSum, totArrays); cout<< "**** Writing Back ****" << endl; cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaDeviceSynchronize(); end2 = clock(); cpu_time_used = ((double) (end2 - start2)) * 1000 / CLOCKS_PER_SEC; mType *h_bucketedData = new mType[newData.prefixArray[totArrays]]; checkCuda(cudaMemcpy(h_bucketedData, d_inputData, (newData.prefixArray[totArrays])*sizeof(mType), cudaMemcpyDeviceToHost)); // cout<<"printing bucketed array:"; // for(int i = newData.prefixArray[totArrays-2]; i < newData.prefixArray[totArrays-1]; i++) // cout<< i<<":"<<h_bucketedData[i]<<endl; cout<< "**** Arrays Sorted, Time Taken : "<< milliseconds<<"****" << endl; cout<< "**** Arrays Sorted, Time Taken (on CPU) : "<< cpu_time_used<<"****" << endl; } dataArrays<int> readFile(int size , int totArrays){ string fn = "../Futhark/list_" + to_string(size) + "_" + to_string(totArrays) + ".in"; ifstream file(fn); if(!file.good()){ cout<< "Error reading file " << fn << "\n"; throw "Can't read file"; } dataArrays<int> data; int cur; for(int a = 0; a < totArrays*size && file.good(); a++){ int c = file.peek(); if(isdigit(c) ){ file >> cur; data.dataList.push_back(cur); } else { file.get(); a--; } } if(!file.good()){ cout<< "Error reading file " << fn << "\n"; throw "Can't read file"; } data.prefixArray = new int[totArrays+1]; //exclusive prefix scan int prefix = 0; for(int a =0; a <= totArrays; a++) { data.prefixArray[a] = prefix; prefix += size; } file.close(); return data; } int main(int argc, char *argv[]){ if(argc != 3){ printf("Need 2 arguments\n"); return 0; } int totArrays = stoi(argv[1]); int size = stoi(argv[2]); printf("%i arrays of length %i\n", totArrays, size); int maxSize = size; int minSize = size; //generate data //dataArrays<int> newData = (dataGen<int>(totArrays,maxSize,minSize)); //read data dataArrays<int> newData; try { newData = readFile(size, totArrays); } catch (const char* msg) { cerr << msg << endl; return 0; } int *prefixSum = newData.prefixArray; //calling GPU-ArraySort if(maxSize == 100){ gpuArraySort<int, 100>(newData, prefixSum, 0, totArrays); } else if(maxSize == 1000) { gpuArraySort<int, 1000>(newData, prefixSum, 0, totArrays); } else if(maxSize == 10000) { gpuArraySort<int, 10000>(newData, prefixSum, 0, totArrays); } else { printf("Incorrect arraysizes, we only support 100, 1000 and 10000\n"); } }
/* Copyright (C) Muaaz Gul Awan and Fahad Saeed This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <hip/hip_runtime.h> #include<iostream> #include<vector> #include<stdlib.h> #include<time.h> #include<math.h> #include<ctime> #include<algorithm> #include<utility> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include<random> #include <thrust/scan.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <string> #include <fstream> using namespace std; #define elesPerBucket 20 #define sampleRate 10 //#define totArrays 10000 #define maxSize2 10000 //#define minSize 10000 #define SAMPLED (sampleRate*maxSize)/100 #define maxBuckets (maxSize/elesPerBucket) inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } //swap function for Insertion sort template <class type> __device__ void swapD (type &a, type &b) { /* &a and &b are reference variables */ type temp; temp=a; a=b; b=temp; } //insertion sort template <class type> __device__ void insertionSort(type *input, int begin, int end){ int i, j; //,tmp; for (i = begin+1; i < end; i++) { j = i; while (j > begin && input[j - 1] > input[j]) { swapD(input[j], input[j-1]); j--; }//end of while loop } } __device__ void getMinMax(int input[], int beginPtr, int endPtr, int *ret){ int min = input[beginPtr]; int max = 0; // int *ret = new int[2]; for(int i = beginPtr; i < endPtr; i++){ if(min > input[i]) min = input[i]; if (max < input[i]) max = input[i]; } ret[0] = min; ret[1] = max; //return ret; } //data generation template <typename mType> struct dataArrays{ vector<mType> dataList; int *prefixArray; }; template <typename type> dataArrays<type> dataGen (int numOfArrays, int maxArraySize, int minArraySize){ dataArrays<int> data; data.prefixArray = new int[numOfArrays+1]; //exclusive prefix scan const int range_from = 0; const unsigned int range_to = 5000;//2147483647; //2^31 - 1 random_device rand_dev; mt19937 generator(rand_dev()); uniform_int_distribution<int> distr(range_from, range_to); int prefixSum = 0; srand(time(0)); for( int i = 0; i < numOfArrays; i++){ int size = rand()%(maxArraySize-minArraySize + 1) + minArraySize; data.prefixArray[i] = prefixSum; for(int j = prefixSum; j < prefixSum + size; j++){ data.dataList.push_back(distr(generator)); } prefixSum += size; } data.prefixArray[numOfArrays] = prefixSum; return data; } //kernel for obtaining num of buckets for each array __global__ void getNumOfBuckets(int *prefixSumArray, int *numOfBucketsArray, int totArrays){ int id = blockIdx.x; // * blockDim.x + threadIdx.x; if(id < totArrays) numOfBucketsArray[id] = (prefixSumArray[id+1] - prefixSumArray[id])/elesPerBucket; } template <typename mType, int maxSize> __device__ void getSplitters (mType *data, mType *splittersArray, int sample[], int beginPtr, int endPtr, int arraySize, int *prefixBucketsArray){ __shared__ mType mySamples[SAMPLED]; //int *ret = new int[2]; //int arraySize = endPtr - beginPtr; // calculating samples for this array int numOfSamples = ((float)sampleRate/100)*(arraySize); //calculating the number of buckets for this array int numOfBuckets = (blockIdx.x == 0) ? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x] - prefixBucketsArray[blockIdx.x-1]); for(int i = 0; i < numOfSamples; i++) mySamples[i] = data[beginPtr+sample[i]]; insertionSort(mySamples, 0, numOfSamples); //calculate splitter index for this array int splitterIndex = ((blockIdx.x == 0)? 1 : (prefixBucketsArray[blockIdx.x-1]+1))+1; //the other plus one is for leaving space for smallest splitter(added later) int splittersSize=0; for(int i = (numOfSamples)/(numOfBuckets); splittersSize < numOfBuckets-1; i +=(numOfSamples)/(numOfBuckets)){ splittersArray[splitterIndex] = mySamples[i]; splitterIndex++; splittersSize++; } //getMinMax(data, beginPtr, endPtr, ret); int bits = 8*sizeof(mType); mType min = -(1 << (bits-1)); mType max = (1 << (bits - 1)) - 1;//int max = (1 << (bits-1)) � 1; splittersArray[((blockIdx.x == 0)? 0 : (prefixBucketsArray[blockIdx.x-1]+1))] = min;//ret[0]-2;//to accodmodate the smallest splittersArray[((blockIdx.x == 0)? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x]))] = max;//ret[1]+2; //delete [] ret; } //kernel for obtaining splitters template <typename mType, int maxSize> __global__ void splitterKer(mType *data, mType *splittersArray, int *prefixSizeArray, int *prefixBucketsArray, int totArrays){ if(blockIdx.x < totArrays){ int id = blockIdx.x; __shared__ int sampleSh[SAMPLED]; int arraySize = prefixSizeArray[id+1] - prefixSizeArray[id]; // calculating samples for this array int numOfSamples = ((float)sampleRate/100)*(arraySize); //int *h_sample = new int[SAMPLED]; int max = arraySize; int sam = numOfSamples; int stride = max/sam; int sampleVal = 0; for( int i = 0; i < numOfSamples; i++) { sampleSh[i] = sampleVal; sampleVal += stride; } //for(int i = 0; i < numOfSamples; i++) // sampleSh[i] = mySample[i]; getSplitters<mType, maxSize>(data, splittersArray, sampleSh, prefixSizeArray[id], prefixSizeArray[id+1], prefixSizeArray[id+1] - prefixSizeArray[id], prefixBucketsArray); } } template <typename mType, int maxSize> __device__ void getBuckets(mType *input, mType *splitters, int beginPtr, int endPtr, int *bucketsSize, mType *myInput, int *prefixBucketsArray, int totArrays){ volatile int numOfBuckets = (blockIdx.x == 0) ? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x] - prefixBucketsArray[blockIdx.x-1]); if(blockIdx.x < totArrays && threadIdx.x < numOfBuckets){ int id = threadIdx.x; int sizeOffset = (blockIdx.x == 0) ? (0+threadIdx.x) : (prefixBucketsArray[blockIdx.x-1] + threadIdx.x); //blockIdx.x*BUCKETS+threadIdx.x; int sizeOffsetBlock = (blockIdx.x == 0) ? (0) : (prefixBucketsArray[blockIdx.x-1]); int bucketSizeOff = sizeOffset+1; mType myBucket[maxSize]; //make it shared as well // int bucketIndexOffset; int indexSum=0; bucketsSize[bucketSizeOff] = 0; for(int i = 0; i < (endPtr - beginPtr); i++){ if(myInput[i] > splitters[id] && myInput[i] <= splitters[id+1]){ myBucket[bucketsSize[bucketSizeOff]] = myInput[i]; bucketsSize[bucketSizeOff]++; } } __syncthreads(); //prefix sum for bucket sizes of current array for(int j = 0; j < threadIdx.x; j++) indexSum += bucketsSize[sizeOffsetBlock+j+1]; //writing back current buckt back to the input memory for(int i = 0; i < bucketsSize[bucketSizeOff]; i++) input[indexSum+beginPtr+i] = myBucket[i]; } } //kernel for obtaining buckets template <typename mType, int maxSize> __global__ void bucketKernel(mType *data, mType *splittersArray, int *prefixSizeArray, int *prefixBucketsArray, int *bucketSizes, int totArrays){ int numOfBuckets = (blockIdx.x == 0) ? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x] - prefixBucketsArray[blockIdx.x-1]); if(blockIdx.x < totArrays && threadIdx.x < numOfBuckets){ bucketSizes[0] = 0; int bid = blockIdx.x; int tid = threadIdx.x; int arraySize = prefixSizeArray[blockIdx.x+1] - prefixSizeArray[blockIdx.x]; int leftOvers = arraySize%numOfBuckets; int jmpFac = arraySize/numOfBuckets; int gArrayStart = prefixSizeArray[blockIdx.x] + tid*jmpFac; int gArrayEnd = (tid==(numOfBuckets-1))?(gArrayStart + jmpFac+leftOvers):(gArrayStart + jmpFac); int lArrayStart = tid*jmpFac; __shared__ int myInput [maxSize]; int arrBegin = prefixSizeArray[bid]; int arrEnd = prefixSizeArray[bid+1]; int splitterIndexSt = ((blockIdx.x == 0)? 0 : (prefixBucketsArray[blockIdx.x-1]+1));//blockIdx.x*(BUCKETS+1); int splitterIndexEd = splitterIndexSt + numOfBuckets+1; __shared__ mType splitters[maxBuckets+2]; //copy my array in shared memory in parallel for(int i=lArrayStart,j=gArrayStart;j<gArrayEnd;i++,j++){ myInput[i] = data[j]; } __syncthreads(); int j = 0; for(int i = splitterIndexSt; i < splitterIndexEd; i++){ splitters[j] = splittersArray[i]; j++; } getBuckets<mType, maxSize>(data, splitters, arrBegin, arrEnd, bucketSizes, myInput, prefixBucketsArray, totArrays); } } //sorting kernel template <typename mType, int maxSize> __global__ void sortBuckets(mType *buckets, int *bucketsSize, int *prefixBucketsArray, int *prefixSizeArray, int totArrays){ int numOfBuckets = (blockIdx.x == 0) ? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x] - prefixBucketsArray[blockIdx.x-1]); if(blockIdx.x < totArrays && threadIdx.x < numOfBuckets){ int sizeOffset = (blockIdx.x == 0) ? (0+threadIdx.x) : (prefixBucketsArray[blockIdx.x-1] + threadIdx.x); int sizeOffsetBlock = (blockIdx.x == 0) ? (0) : (prefixBucketsArray[blockIdx.x-1]); // int bid = blockIdx.x; int tid = threadIdx.x; int arraySize = prefixSizeArray[blockIdx.x+1] - prefixSizeArray[blockIdx.x]; int leftOvers = arraySize%numOfBuckets; int jmpFac = arraySize/numOfBuckets; int gArrayStart = prefixSizeArray[blockIdx.x] + tid*jmpFac; int gArrayEnd = (tid==(numOfBuckets-1))?(gArrayStart + jmpFac+leftOvers):(gArrayStart + jmpFac); int lArrayStart = tid*jmpFac; //int lArrayEnd = (tid==(BUCKETS-1))?(lArrayStart + jmpFac+leftOvers):(lArrayStart + jmpFac); __shared__ mType myArray [maxSize]; int indexSum = 0; for(int i=lArrayStart,j=gArrayStart;j<gArrayEnd;i++,j++){ myArray[i] = buckets[j]; } __syncthreads(); //prefix sum for bucket sizes of current array for(int j = 0; j < threadIdx.x; j++) indexSum += bucketsSize[sizeOffsetBlock+j+1]; insertionSort(myArray, indexSum,indexSum + bucketsSize[sizeOffset+1]); __syncthreads(); for(int i=lArrayStart,j=gArrayStart;j<gArrayEnd;i++,j++){ buckets[j] = myArray[i]; } __syncthreads(); } } template <typename mType, int maxSize> void gpuArraySort(dataArrays<mType> newData, int *prefixSum, int flag, int totArrays){ int *d_prefixSum, *d_numOfBuckets; if(flag == 1){ int *d_prefixSumDo; hipMalloc((void**) &d_prefixSumDo, (totArrays+1)*sizeof(int)); hipMemcpy(d_prefixSumDo, prefixSum, sizeof(int)*(1+totArrays), hipMemcpyHostToDevice); //casting device ptr to thrust dev_ptr thrust::device_ptr<int> prefixDo = thrust::device_pointer_cast(d_prefixSumDo); //performing prefixSum using thrust thrust::exclusive_scan(prefixDo, prefixDo + totArrays, prefixDo); prefixDo[totArrays] = prefixDo[totArrays-1] + prefixSum[totArrays-1]; d_prefixSum = d_prefixSumDo; } mType *d_inputData, *d_splitters, *d_bucketSizes; //creating events hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); int *h_totalBuckets = new int[1]; size_t size_heap, size_stack; //setting stack size limit hipDeviceSetLimit(hipLimitStackSize,10240); hipDeviceGetLimit(&size_heap, hipLimitMallocHeapSize); hipDeviceGetLimit(&size_stack, hipLimitStackSize); // allocating device memory for prefixSum, num Of buckets, splitters, bucketSizes hipMalloc((void**) &d_prefixSum, (totArrays+1)*sizeof(int)); //copying prefixSums to Device hipMemcpy(d_prefixSum, prefixSum, sizeof(int)*(1+totArrays), hipMemcpyHostToDevice); //allocating device memory for inputData thrust::device_vector<mType> inData (newData.dataList.size()); thrust::copy(newData.dataList.begin(), newData.dataList.end(), inData.begin()); hipDeviceSynchronize(); clock_t start2, end2; double cpu_time_used; hipEventRecord(start); start2 = clock(); // allocating device memory for prefixSum, num Of buckets, splitters, bucketSizes hipMalloc((void**) &d_numOfBuckets, (totArrays)*sizeof(int)); //clculating buckets on GPU getNumOfBuckets<<<totArrays,1>>>(d_prefixSum, d_numOfBuckets, totArrays); //casting device ptr to thrust dev_ptr thrust::device_ptr<int> prefixNumBuckets = thrust::device_pointer_cast(d_numOfBuckets); //performing prefixSum using thrust thrust::inclusive_scan(prefixNumBuckets, prefixNumBuckets + totArrays, prefixNumBuckets); //copying total number of buckets back checkCuda(hipMemcpy(h_totalBuckets, d_numOfBuckets+(totArrays-1), sizeof(int), hipMemcpyDeviceToHost)); //allocating device memory for splitters hipMalloc((void**) &d_splitters, (totArrays+h_totalBuckets[0])*sizeof(mType)); hipMalloc((void**) &d_bucketSizes, (1+h_totalBuckets[0])*sizeof(int)); d_inputData = thrust::raw_pointer_cast(&inData[0]); cout<< "**** Generating Splitters ****" << endl; splitterKer<mType, maxSize><<<totArrays, 1>>>(d_inputData, d_splitters, d_prefixSum, d_numOfBuckets, totArrays); cout<< "**** Splitters Generated****" << endl; cout<< "**** Generating Buckets ****" << endl; bucketKernel<mType, maxSize><<<totArrays, maxBuckets>>>(d_inputData, d_splitters, d_prefixSum, d_numOfBuckets, d_bucketSizes, totArrays); cout<< "**** Buckets Generated ****" << endl; cout<< "**** Sorting Buckets ****" << endl; sortBuckets<mType, maxSize><<<totArrays, maxBuckets>>>(d_inputData, d_bucketSizes,d_numOfBuckets, d_prefixSum, totArrays); cout<< "**** Writing Back ****" << endl; hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); hipDeviceSynchronize(); end2 = clock(); cpu_time_used = ((double) (end2 - start2)) * 1000 / CLOCKS_PER_SEC; mType *h_bucketedData = new mType[newData.prefixArray[totArrays]]; checkCuda(hipMemcpy(h_bucketedData, d_inputData, (newData.prefixArray[totArrays])*sizeof(mType), hipMemcpyDeviceToHost)); // cout<<"printing bucketed array:"; // for(int i = newData.prefixArray[totArrays-2]; i < newData.prefixArray[totArrays-1]; i++) // cout<< i<<":"<<h_bucketedData[i]<<endl; cout<< "**** Arrays Sorted, Time Taken : "<< milliseconds<<"****" << endl; cout<< "**** Arrays Sorted, Time Taken (on CPU) : "<< cpu_time_used<<"****" << endl; } dataArrays<int> readFile(int size , int totArrays){ string fn = "../Futhark/list_" + to_string(size) + "_" + to_string(totArrays) + ".in"; ifstream file(fn); if(!file.good()){ cout<< "Error reading file " << fn << "\n"; throw "Can't read file"; } dataArrays<int> data; int cur; for(int a = 0; a < totArrays*size && file.good(); a++){ int c = file.peek(); if(isdigit(c) ){ file >> cur; data.dataList.push_back(cur); } else { file.get(); a--; } } if(!file.good()){ cout<< "Error reading file " << fn << "\n"; throw "Can't read file"; } data.prefixArray = new int[totArrays+1]; //exclusive prefix scan int prefix = 0; for(int a =0; a <= totArrays; a++) { data.prefixArray[a] = prefix; prefix += size; } file.close(); return data; } int main(int argc, char *argv[]){ if(argc != 3){ printf("Need 2 arguments\n"); return 0; } int totArrays = stoi(argv[1]); int size = stoi(argv[2]); printf("%i arrays of length %i\n", totArrays, size); int maxSize = size; int minSize = size; //generate data //dataArrays<int> newData = (dataGen<int>(totArrays,maxSize,minSize)); //read data dataArrays<int> newData; try { newData = readFile(size, totArrays); } catch (const char* msg) { cerr << msg << endl; return 0; } int *prefixSum = newData.prefixArray; //calling GPU-ArraySort if(maxSize == 100){ gpuArraySort<int, 100>(newData, prefixSum, 0, totArrays); } else if(maxSize == 1000) { gpuArraySort<int, 1000>(newData, prefixSum, 0, totArrays); } else if(maxSize == 10000) { gpuArraySort<int, 10000>(newData, prefixSum, 0, totArrays); } else { printf("Incorrect arraysizes, we only support 100, 1000 and 10000\n"); } }
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #define BLOCKSIZE 32 #define NUM char __global__ void MatMul(int n, NUM *a, NUM *b, int *c){ int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; int sum = 0; for(int i = 0; i < n; i++){ sum += a[tidy * n + i] * b[i * n + tidx]; } c[tidy * n + tidx] = sum; } __global__ void SharedMatMul(int n, NUM *a, NUM *b, int *c){ __shared__ NUM as[BLOCKSIZE*BLOCKSIZE*4]; __shared__ NUM bs[BLOCKSIZE*BLOCKSIZE*4]; __shared__ int cs[BLOCKSIZE*BLOCKSIZE]; int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; int lx = threadIdx.x; int ly = threadIdx.y; // COPY FROM GLOBAL MEMORY cs[ly * BLOCKSIZE + lx] = 0; for(int i = 0; i < n; i+=BLOCKSIZE){ as[4*(ly * BLOCKSIZE + lx)] = a[tidy * n + (lx + i)]; bs[4*(ly * BLOCKSIZE + lx)] = b[(ly + i) * n + tidx]; __syncthreads(); for(int j = 0; j < BLOCKSIZE; j++){ cs[BLOCKSIZE * ly + lx] += as[4*(BLOCKSIZE * ly + j)] * bs[4*(BLOCKSIZE * j + lx)]; } __syncthreads(); //c[tidy * n + tidx] += cs[BLOCKSIZE * ly + lx]; } //__syncthreads(); c[tidy * n + tidx] += cs[BLOCKSIZE * ly + lx]; } __host__ void fillMatrix(NUM *a, int n){ for(int i = 0; i < n * n; i++){ a[i] = rand()%3 - 1; } } __host__ void fillMatrixZeros(int *a, int n){ for(int i = 0; i < n*n; i++){ a[i] = 0; } } __host__ void hostMatMul(int n, NUM *a, NUM *b, int *c){ for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ int val = c[i*n + j]; for(int k=0; k<n; ++k){ val += a[i*n+k] * b[k*n +j]; } c[i*n + j] = val; } } } __host__ int verify(int n, int *a, int *b){ int cont = 0; for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ cont++; if(a[i*n+j]!=b[i*n+j]){ printf("FALLO %d\t%d!=%d\n",cont , a[i*n+j], b[i*n+j]); //return 1; } } } return 0; } __host__ void printmat(int *a, int n, const char *name){ printf("mat %s:\n", name); for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ printf("%i ", a[i*n + j]); } printf("\n"); } printf("\n"); } int main(int argc, char *argv[]){ if (argc != 3){ printf("Ejecute como ./prog N\n"); return EXIT_FAILURE; } int n = atoi(argv[1]); srand(atoi(argv[2])); NUM *a, *b, *a_d, *b_d; int *c, *d, *c_d, *d_d, *matmul_simple, *matmul_shared; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float ms = 0; a = (NUM*)malloc(sizeof(NUM)*n*n); b = (NUM*)malloc(sizeof(NUM)*n*n); c = (int*)malloc(sizeof(int)*n*n); d = (int*)malloc(sizeof(int)*n*n); matmul_simple = (int*)malloc(sizeof(int)*n*n); matmul_shared = (int*)malloc(sizeof(int)*n*n); fillMatrix(a, n); fillMatrix(b, n); //fillMatrixZeros(c, n); cudaMalloc(&a_d, sizeof(NUM)*n*n); cudaMalloc(&b_d, sizeof(NUM)*n*n); cudaMalloc(&c_d, sizeof(int)*n*n); cudaMalloc(&d_d, sizeof(int)*n*n); cudaMemcpy(a_d, a, sizeof(NUM) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b, sizeof(NUM) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(c_d, c, sizeof(int) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(d_d, d, sizeof(int) * n * n, cudaMemcpyHostToDevice); dim3 block(BLOCKSIZE, BLOCKSIZE, 1); dim3 grid(n/BLOCKSIZE, n/BLOCKSIZE, 1); /* printf("block: %d %d %d\n", block.x, block.y, block.z); printf("grid : %d %d %d\n", grid.x, grid.y, grid.z); */ cudaEventRecord(start); MatMul<<<grid,block>>>(n ,a_d, b_d, c_d); cudaEventRecord(stop); cudaDeviceSynchronize(); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); // printf("GPU Simple...ok! in %f ms\n", ms); printf("%f\n", ms); /* cudaEventRecord(start); SharedMatMul<<<grid,block>>>(n ,a_d, b_d, d_d); cudaEventRecord(stop); cudaDeviceSynchronize(); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); printf("GPU Shared Memory...ok! in %f ms\n", ms); cudaMemcpy(matmul_shared, d_d, sizeof(int) * n * n, cudaMemcpyDeviceToHost); cudaMemcpy(matmul_simple, c_d, sizeof(int) * n * n, cudaMemcpyDeviceToHost); // hostMatMul(n, a, b, c); if(verify(n, matmul_simple, matmul_shared) != 0){ fprintf(stderr, "error verifying result\n"); exit(EXIT_FAILURE); } */ return 0; }
.file "tmpxft_0009ca31_00000000-6_matmul_simple.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2065: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2065: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z10fillMatrixPci .type _Z10fillMatrixPci, @function _Z10fillMatrixPci: .LFB2057: .cfi_startproc endbr64 imull %esi, %esi testl %esi, %esi jle .L8 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi), %rbp .L5: call rand@PLT movslq %eax, %rdx imulq $1431655766, %rdx, %rdx shrq $32, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,2), %edx subl %edx, %eax subl $1, %eax movb %al, (%rbx) addq $1, %rbx cmpq %rbp, %rbx jne .L5 addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L8: .cfi_restore 3 .cfi_restore 6 ret .cfi_endproc .LFE2057: .size _Z10fillMatrixPci, .-_Z10fillMatrixPci .globl _Z15fillMatrixZerosPii .type _Z15fillMatrixZerosPii, @function _Z15fillMatrixZerosPii: .LFB2058: .cfi_startproc endbr64 imull %esi, %esi testl %esi, %esi jle .L11 movq %rdi, %rax movslq %esi, %rsi leaq (%rdi,%rsi,4), %rdx .L13: movl $0, (%rax) addq $4, %rax cmpq %rdx, %rax jne .L13 .L11: ret .cfi_endproc .LFE2058: .size _Z15fillMatrixZerosPii, .-_Z15fillMatrixZerosPii .globl _Z10hostMatMuliPcS_Pi .type _Z10hostMatMuliPcS_Pi, @function _Z10hostMatMuliPcS_Pi: .LFB2059: .cfi_startproc endbr64 testl %edi, %edi jle .L23 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 movl %edi, %ebp movq %rsi, %r8 movq %rdx, %r12 movq %rcx, %r10 movslq %edi, %rdi leaq 0(,%rdi,4), %r14 movq %rsi, %rbx addq %rdi, %r8 movl $0, %r13d .L17: movl $0, %r9d .L19: movq %r10, %r11 movl (%r10,%r9,4), %esi leaq (%r12,%r9), %rcx movq %rbx, %rax .L18: movsbl (%rax), %edx movsbl (%rcx), %r15d imull %r15d, %edx addl %edx, %esi addq $1, %rax addq %rdi, %rcx cmpq %r8, %rax jne .L18 movl %esi, (%r11,%r9,4) addq $1, %r9 cmpq %rdi, %r9 jne .L19 addl $1, %r13d addq %r14, %r10 addq %rdi, %rbx addq %rdi, %r8 cmpl %r13d, %ebp jne .L17 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L23: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 .cfi_restore 13 .cfi_restore 14 .cfi_restore 15 ret .cfi_endproc .LFE2059: .size _Z10hostMatMuliPcS_Pi, .-_Z10hostMatMuliPcS_Pi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "FALLO %d\t%d!=%d\n" .text .globl _Z6verifyiPiS_ .type _Z6verifyiPiS_, @function _Z6verifyiPiS_: .LFB2060: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $56, %rsp .cfi_def_cfa_offset 112 movq %rsi, 24(%rsp) movq %rdx, 32(%rsp) testl %edi, %edi jle .L27 movslq %edi, %rax salq $2, %rax movq %rax, 40(%rsp) movl %edi, %r13d movl $0, %eax movl $0, %r15d movl $0, %ebx leaq .LC0(%rip), %r14 jmp .L28 .L29: addq $4, %r12 addq $4, %rbp cmpl %r13d, %ebx je .L33 .L30: addl $1, %ebx movl (%r12), %ecx movl 0(%rbp), %r8d cmpl %r8d, %ecx je .L29 movl %ebx, %edx movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L29 .L33: movq 16(%rsp), %rax movl %r15d, %edi movl 12(%rsp), %r15d movl 8(%rsp), %ebx addl %edi, %ebx addl $1, %r15d addl %edi, %r13d movq 40(%rsp), %rdx addq %rdx, %rax cmpl %r15d, %edi je .L27 .L28: movl %ebx, 8(%rsp) movq 24(%rsp), %rsi leaq (%rsi,%rax), %r12 movq 32(%rsp), %rdx leaq (%rdx,%rax), %rbp movl %r15d, 12(%rsp) movq %rax, 16(%rsp) movl %edi, %r15d jmp .L30 .L27: movl $0, %eax addq $56, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _Z6verifyiPiS_, .-_Z6verifyiPiS_ .section .rodata.str1.1 .LC1: .string "mat %s:\n" .LC2: .string "%i " .LC3: .string "\n" .text .globl _Z8printmatPiiPKc .type _Z8printmatPiiPKc, @function _Z8printmatPiiPKc: .LFB2061: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $24, %rsp .cfi_def_cfa_offset 80 movq %rdi, %rbp movl %esi, %ebx movl %esi, 12(%rsp) leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT testl %ebx, %ebx jle .L35 movslq %ebx, %r14 leaq 0(,%r14,4), %r15 addq %r15, %rbp negq %r14 salq $2, %r14 movl $0, %r13d leaq .LC2(%rip), %r12 .L36: leaq 0(%rbp,%r14), %rbx .L37: movl (%rbx), %edx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq %rbp, %rbx jne .L37 leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %r13d addq %r15, %rbp cmpl %r13d, 12(%rsp) jne .L36 .L35: leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _Z8printmatPiiPKc, .-_Z8printmatPiiPKc .globl _Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi .type _Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi, @function _Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi: .LFB2087: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L44 .L40: movq 136(%rsp), %rax subq %fs:40, %rax jne .L45 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L44: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z6MatMuliPcS_Pi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L40 .L45: call __stack_chk_fail@PLT .cfi_endproc .LFE2087: .size _Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi, .-_Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi .globl _Z6MatMuliPcS_Pi .type _Z6MatMuliPcS_Pi, @function _Z6MatMuliPcS_Pi: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _Z6MatMuliPcS_Pi, .-_Z6MatMuliPcS_Pi .section .rodata.str1.1 .LC4: .string "Ejecute como ./prog N\n" .LC6: .string "%f\n" .text .globl main .type main, @function main: .LFB2062: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $120, %rsp .cfi_def_cfa_offset 176 movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax cmpl $3, %edi je .L49 leaq .LC4(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $1, %eax .L48: movq 104(%rsp), %rdx subq %fs:40, %rdx jne .L54 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L49: .cfi_restore_state movq %rsi, %rbp movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %rbx movl %eax, %r12d movq 16(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movl %eax, %edi call srand@PLT leaq 64(%rsp), %rdi call cudaEventCreate@PLT leaq 72(%rsp), %rdi call cudaEventCreate@PLT movl $0x00000000, 28(%rsp) movslq %ebx, %rbx imulq %rbx, %rbx movq %rbx, %rdi call malloc@PLT movq %rax, %r14 movq %rbx, %rdi call malloc@PLT movq %rax, %r13 leaq 0(,%rbx,4), %rbp movq %rbp, %rdi call malloc@PLT movq %rax, %r15 movq %rbp, %rdi call malloc@PLT movq %rax, 8(%rsp) movl %r12d, %esi movq %r14, %rdi call _Z10fillMatrixPci movl %r12d, %esi movq %r13, %rdi call _Z10fillMatrixPci leaq 32(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 40(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 48(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT leaq 56(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi movq 32(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbp, %rdx movq %r15, %rsi movq 48(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbp, %rdx movq 8(%rsp), %rsi movq 56(%rsp), %rdi call cudaMemcpy@PLT movl $32, 80(%rsp) movl $32, 84(%rsp) movl $1, 88(%rsp) movl $32, %ecx movl %r12d, %eax cltd idivl %ecx movl %eax, 92(%rsp) movl %eax, 96(%rsp) movl $1, 100(%rsp) movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT movl 88(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 80(%rsp), %rdx movq 92(%rsp), %rdi movl 100(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L55 .L51: movl $0, %esi movq 72(%rsp), %rdi call cudaEventRecord@PLT call cudaDeviceSynchronize@PLT movq 72(%rsp), %rdi call cudaEventSynchronize@PLT leaq 28(%rsp), %rdi movq 72(%rsp), %rdx movq 64(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 28(%rsp), %xmm0 leaq .LC6(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl $0, %eax jmp .L48 .L55: movq 48(%rsp), %rcx movq 40(%rsp), %rdx movq 32(%rsp), %rsi movl %r12d, %edi call _Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi jmp .L51 .L54: call __stack_chk_fail@PLT .cfi_endproc .LFE2062: .size main, .-main .globl _Z37__device_stub__Z12SharedMatMuliPcS_PiiPcS_Pi .type _Z37__device_stub__Z12SharedMatMuliPcS_PiiPcS_Pi, @function _Z37__device_stub__Z12SharedMatMuliPcS_PiiPcS_Pi: .LFB2089: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L60 .L56: movq 136(%rsp), %rax subq %fs:40, %rax jne .L61 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L60: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12SharedMatMuliPcS_Pi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L56 .L61: call __stack_chk_fail@PLT .cfi_endproc .LFE2089: .size _Z37__device_stub__Z12SharedMatMuliPcS_PiiPcS_Pi, .-_Z37__device_stub__Z12SharedMatMuliPcS_PiiPcS_Pi .globl _Z12SharedMatMuliPcS_Pi .type _Z12SharedMatMuliPcS_Pi, @function _Z12SharedMatMuliPcS_Pi: .LFB2090: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z12SharedMatMuliPcS_PiiPcS_Pi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2090: .size _Z12SharedMatMuliPcS_Pi, .-_Z12SharedMatMuliPcS_Pi .section .rodata.str1.1 .LC7: .string "_Z12SharedMatMuliPcS_Pi" .LC8: .string "_Z6MatMuliPcS_Pi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2092: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z12SharedMatMuliPcS_Pi(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _Z6MatMuliPcS_Pi(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2092: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #define BLOCKSIZE 32 #define NUM char __global__ void MatMul(int n, NUM *a, NUM *b, int *c){ int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; int sum = 0; for(int i = 0; i < n; i++){ sum += a[tidy * n + i] * b[i * n + tidx]; } c[tidy * n + tidx] = sum; } __global__ void SharedMatMul(int n, NUM *a, NUM *b, int *c){ __shared__ NUM as[BLOCKSIZE*BLOCKSIZE*4]; __shared__ NUM bs[BLOCKSIZE*BLOCKSIZE*4]; __shared__ int cs[BLOCKSIZE*BLOCKSIZE]; int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; int lx = threadIdx.x; int ly = threadIdx.y; // COPY FROM GLOBAL MEMORY cs[ly * BLOCKSIZE + lx] = 0; for(int i = 0; i < n; i+=BLOCKSIZE){ as[4*(ly * BLOCKSIZE + lx)] = a[tidy * n + (lx + i)]; bs[4*(ly * BLOCKSIZE + lx)] = b[(ly + i) * n + tidx]; __syncthreads(); for(int j = 0; j < BLOCKSIZE; j++){ cs[BLOCKSIZE * ly + lx] += as[4*(BLOCKSIZE * ly + j)] * bs[4*(BLOCKSIZE * j + lx)]; } __syncthreads(); //c[tidy * n + tidx] += cs[BLOCKSIZE * ly + lx]; } //__syncthreads(); c[tidy * n + tidx] += cs[BLOCKSIZE * ly + lx]; } __host__ void fillMatrix(NUM *a, int n){ for(int i = 0; i < n * n; i++){ a[i] = rand()%3 - 1; } } __host__ void fillMatrixZeros(int *a, int n){ for(int i = 0; i < n*n; i++){ a[i] = 0; } } __host__ void hostMatMul(int n, NUM *a, NUM *b, int *c){ for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ int val = c[i*n + j]; for(int k=0; k<n; ++k){ val += a[i*n+k] * b[k*n +j]; } c[i*n + j] = val; } } } __host__ int verify(int n, int *a, int *b){ int cont = 0; for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ cont++; if(a[i*n+j]!=b[i*n+j]){ printf("FALLO %d\t%d!=%d\n",cont , a[i*n+j], b[i*n+j]); //return 1; } } } return 0; } __host__ void printmat(int *a, int n, const char *name){ printf("mat %s:\n", name); for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ printf("%i ", a[i*n + j]); } printf("\n"); } printf("\n"); } int main(int argc, char *argv[]){ if (argc != 3){ printf("Ejecute como ./prog N\n"); return EXIT_FAILURE; } int n = atoi(argv[1]); srand(atoi(argv[2])); NUM *a, *b, *a_d, *b_d; int *c, *d, *c_d, *d_d, *matmul_simple, *matmul_shared; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float ms = 0; a = (NUM*)malloc(sizeof(NUM)*n*n); b = (NUM*)malloc(sizeof(NUM)*n*n); c = (int*)malloc(sizeof(int)*n*n); d = (int*)malloc(sizeof(int)*n*n); matmul_simple = (int*)malloc(sizeof(int)*n*n); matmul_shared = (int*)malloc(sizeof(int)*n*n); fillMatrix(a, n); fillMatrix(b, n); //fillMatrixZeros(c, n); cudaMalloc(&a_d, sizeof(NUM)*n*n); cudaMalloc(&b_d, sizeof(NUM)*n*n); cudaMalloc(&c_d, sizeof(int)*n*n); cudaMalloc(&d_d, sizeof(int)*n*n); cudaMemcpy(a_d, a, sizeof(NUM) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b, sizeof(NUM) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(c_d, c, sizeof(int) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(d_d, d, sizeof(int) * n * n, cudaMemcpyHostToDevice); dim3 block(BLOCKSIZE, BLOCKSIZE, 1); dim3 grid(n/BLOCKSIZE, n/BLOCKSIZE, 1); /* printf("block: %d %d %d\n", block.x, block.y, block.z); printf("grid : %d %d %d\n", grid.x, grid.y, grid.z); */ cudaEventRecord(start); MatMul<<<grid,block>>>(n ,a_d, b_d, c_d); cudaEventRecord(stop); cudaDeviceSynchronize(); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); // printf("GPU Simple...ok! in %f ms\n", ms); printf("%f\n", ms); /* cudaEventRecord(start); SharedMatMul<<<grid,block>>>(n ,a_d, b_d, d_d); cudaEventRecord(stop); cudaDeviceSynchronize(); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); printf("GPU Shared Memory...ok! in %f ms\n", ms); cudaMemcpy(matmul_shared, d_d, sizeof(int) * n * n, cudaMemcpyDeviceToHost); cudaMemcpy(matmul_simple, c_d, sizeof(int) * n * n, cudaMemcpyDeviceToHost); // hostMatMul(n, a, b, c); if(verify(n, matmul_simple, matmul_shared) != 0){ fprintf(stderr, "error verifying result\n"); exit(EXIT_FAILURE); } */ return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define BLOCKSIZE 32 #define NUM char __global__ void MatMul(int n, NUM *a, NUM *b, int *c){ int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; int sum = 0; for(int i = 0; i < n; i++){ sum += a[tidy * n + i] * b[i * n + tidx]; } c[tidy * n + tidx] = sum; } __global__ void SharedMatMul(int n, NUM *a, NUM *b, int *c){ __shared__ NUM as[BLOCKSIZE*BLOCKSIZE*4]; __shared__ NUM bs[BLOCKSIZE*BLOCKSIZE*4]; __shared__ int cs[BLOCKSIZE*BLOCKSIZE]; int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; int lx = threadIdx.x; int ly = threadIdx.y; // COPY FROM GLOBAL MEMORY cs[ly * BLOCKSIZE + lx] = 0; for(int i = 0; i < n; i+=BLOCKSIZE){ as[4*(ly * BLOCKSIZE + lx)] = a[tidy * n + (lx + i)]; bs[4*(ly * BLOCKSIZE + lx)] = b[(ly + i) * n + tidx]; __syncthreads(); for(int j = 0; j < BLOCKSIZE; j++){ cs[BLOCKSIZE * ly + lx] += as[4*(BLOCKSIZE * ly + j)] * bs[4*(BLOCKSIZE * j + lx)]; } __syncthreads(); //c[tidy * n + tidx] += cs[BLOCKSIZE * ly + lx]; } //__syncthreads(); c[tidy * n + tidx] += cs[BLOCKSIZE * ly + lx]; } __host__ void fillMatrix(NUM *a, int n){ for(int i = 0; i < n * n; i++){ a[i] = rand()%3 - 1; } } __host__ void fillMatrixZeros(int *a, int n){ for(int i = 0; i < n*n; i++){ a[i] = 0; } } __host__ void hostMatMul(int n, NUM *a, NUM *b, int *c){ for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ int val = c[i*n + j]; for(int k=0; k<n; ++k){ val += a[i*n+k] * b[k*n +j]; } c[i*n + j] = val; } } } __host__ int verify(int n, int *a, int *b){ int cont = 0; for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ cont++; if(a[i*n+j]!=b[i*n+j]){ printf("FALLO %d\t%d!=%d\n",cont , a[i*n+j], b[i*n+j]); //return 1; } } } return 0; } __host__ void printmat(int *a, int n, const char *name){ printf("mat %s:\n", name); for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ printf("%i ", a[i*n + j]); } printf("\n"); } printf("\n"); } int main(int argc, char *argv[]){ if (argc != 3){ printf("Ejecute como ./prog N\n"); return EXIT_FAILURE; } int n = atoi(argv[1]); srand(atoi(argv[2])); NUM *a, *b, *a_d, *b_d; int *c, *d, *c_d, *d_d, *matmul_simple, *matmul_shared; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float ms = 0; a = (NUM*)malloc(sizeof(NUM)*n*n); b = (NUM*)malloc(sizeof(NUM)*n*n); c = (int*)malloc(sizeof(int)*n*n); d = (int*)malloc(sizeof(int)*n*n); matmul_simple = (int*)malloc(sizeof(int)*n*n); matmul_shared = (int*)malloc(sizeof(int)*n*n); fillMatrix(a, n); fillMatrix(b, n); //fillMatrixZeros(c, n); hipMalloc(&a_d, sizeof(NUM)*n*n); hipMalloc(&b_d, sizeof(NUM)*n*n); hipMalloc(&c_d, sizeof(int)*n*n); hipMalloc(&d_d, sizeof(int)*n*n); hipMemcpy(a_d, a, sizeof(NUM) * n * n, hipMemcpyHostToDevice); hipMemcpy(b_d, b, sizeof(NUM) * n * n, hipMemcpyHostToDevice); hipMemcpy(c_d, c, sizeof(int) * n * n, hipMemcpyHostToDevice); hipMemcpy(d_d, d, sizeof(int) * n * n, hipMemcpyHostToDevice); dim3 block(BLOCKSIZE, BLOCKSIZE, 1); dim3 grid(n/BLOCKSIZE, n/BLOCKSIZE, 1); /* printf("block: %d %d %d\n", block.x, block.y, block.z); printf("grid : %d %d %d\n", grid.x, grid.y, grid.z); */ hipEventRecord(start); MatMul<<<grid,block>>>(n ,a_d, b_d, c_d); hipEventRecord(stop); hipDeviceSynchronize(); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); // printf("GPU Simple...ok! in %f ms\n", ms); printf("%f\n", ms); /* cudaEventRecord(start); SharedMatMul<<<grid,block>>>(n ,a_d, b_d, d_d); cudaEventRecord(stop); cudaDeviceSynchronize(); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); printf("GPU Shared Memory...ok! in %f ms\n", ms); cudaMemcpy(matmul_shared, d_d, sizeof(int) * n * n, cudaMemcpyDeviceToHost); cudaMemcpy(matmul_simple, c_d, sizeof(int) * n * n, cudaMemcpyDeviceToHost); // hostMatMul(n, a, b, c); if(verify(n, matmul_simple, matmul_shared) != 0){ fprintf(stderr, "error verifying result\n"); exit(EXIT_FAILURE); } */ return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define BLOCKSIZE 32 #define NUM char __global__ void MatMul(int n, NUM *a, NUM *b, int *c){ int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; int sum = 0; for(int i = 0; i < n; i++){ sum += a[tidy * n + i] * b[i * n + tidx]; } c[tidy * n + tidx] = sum; } __global__ void SharedMatMul(int n, NUM *a, NUM *b, int *c){ __shared__ NUM as[BLOCKSIZE*BLOCKSIZE*4]; __shared__ NUM bs[BLOCKSIZE*BLOCKSIZE*4]; __shared__ int cs[BLOCKSIZE*BLOCKSIZE]; int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; int lx = threadIdx.x; int ly = threadIdx.y; // COPY FROM GLOBAL MEMORY cs[ly * BLOCKSIZE + lx] = 0; for(int i = 0; i < n; i+=BLOCKSIZE){ as[4*(ly * BLOCKSIZE + lx)] = a[tidy * n + (lx + i)]; bs[4*(ly * BLOCKSIZE + lx)] = b[(ly + i) * n + tidx]; __syncthreads(); for(int j = 0; j < BLOCKSIZE; j++){ cs[BLOCKSIZE * ly + lx] += as[4*(BLOCKSIZE * ly + j)] * bs[4*(BLOCKSIZE * j + lx)]; } __syncthreads(); //c[tidy * n + tidx] += cs[BLOCKSIZE * ly + lx]; } //__syncthreads(); c[tidy * n + tidx] += cs[BLOCKSIZE * ly + lx]; } __host__ void fillMatrix(NUM *a, int n){ for(int i = 0; i < n * n; i++){ a[i] = rand()%3 - 1; } } __host__ void fillMatrixZeros(int *a, int n){ for(int i = 0; i < n*n; i++){ a[i] = 0; } } __host__ void hostMatMul(int n, NUM *a, NUM *b, int *c){ for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ int val = c[i*n + j]; for(int k=0; k<n; ++k){ val += a[i*n+k] * b[k*n +j]; } c[i*n + j] = val; } } } __host__ int verify(int n, int *a, int *b){ int cont = 0; for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ cont++; if(a[i*n+j]!=b[i*n+j]){ printf("FALLO %d\t%d!=%d\n",cont , a[i*n+j], b[i*n+j]); //return 1; } } } return 0; } __host__ void printmat(int *a, int n, const char *name){ printf("mat %s:\n", name); for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ printf("%i ", a[i*n + j]); } printf("\n"); } printf("\n"); } int main(int argc, char *argv[]){ if (argc != 3){ printf("Ejecute como ./prog N\n"); return EXIT_FAILURE; } int n = atoi(argv[1]); srand(atoi(argv[2])); NUM *a, *b, *a_d, *b_d; int *c, *d, *c_d, *d_d, *matmul_simple, *matmul_shared; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float ms = 0; a = (NUM*)malloc(sizeof(NUM)*n*n); b = (NUM*)malloc(sizeof(NUM)*n*n); c = (int*)malloc(sizeof(int)*n*n); d = (int*)malloc(sizeof(int)*n*n); matmul_simple = (int*)malloc(sizeof(int)*n*n); matmul_shared = (int*)malloc(sizeof(int)*n*n); fillMatrix(a, n); fillMatrix(b, n); //fillMatrixZeros(c, n); hipMalloc(&a_d, sizeof(NUM)*n*n); hipMalloc(&b_d, sizeof(NUM)*n*n); hipMalloc(&c_d, sizeof(int)*n*n); hipMalloc(&d_d, sizeof(int)*n*n); hipMemcpy(a_d, a, sizeof(NUM) * n * n, hipMemcpyHostToDevice); hipMemcpy(b_d, b, sizeof(NUM) * n * n, hipMemcpyHostToDevice); hipMemcpy(c_d, c, sizeof(int) * n * n, hipMemcpyHostToDevice); hipMemcpy(d_d, d, sizeof(int) * n * n, hipMemcpyHostToDevice); dim3 block(BLOCKSIZE, BLOCKSIZE, 1); dim3 grid(n/BLOCKSIZE, n/BLOCKSIZE, 1); /* printf("block: %d %d %d\n", block.x, block.y, block.z); printf("grid : %d %d %d\n", grid.x, grid.y, grid.z); */ hipEventRecord(start); MatMul<<<grid,block>>>(n ,a_d, b_d, c_d); hipEventRecord(stop); hipDeviceSynchronize(); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); // printf("GPU Simple...ok! in %f ms\n", ms); printf("%f\n", ms); /* cudaEventRecord(start); SharedMatMul<<<grid,block>>>(n ,a_d, b_d, d_d); cudaEventRecord(stop); cudaDeviceSynchronize(); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); printf("GPU Shared Memory...ok! in %f ms\n", ms); cudaMemcpy(matmul_shared, d_d, sizeof(int) * n * n, cudaMemcpyDeviceToHost); cudaMemcpy(matmul_simple, c_d, sizeof(int) * n * n, cudaMemcpyDeviceToHost); // hostMatMul(n, a, b, c); if(verify(n, matmul_simple, matmul_shared) != 0){ fprintf(stderr, "error verifying result\n"); exit(EXIT_FAILURE); } */ return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6MatMuliPcS_Pi .globl _Z6MatMuliPcS_Pi .p2align 8 .type _Z6MatMuliPcS_Pi,@function _Z6MatMuliPcS_Pi: s_clause 0x1 s_load_b32 s3, s[0:1], 0x2c s_load_b32 s2, s[0:1], 0x0 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s4, s3, 0xffff s_lshr_b32 s3, s3, 16 s_delay_alu instid0(VALU_DEP_1) v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s3, v[3:4] s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_3 s_load_b128 s[4:7], s[0:1], 0x8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_mul_lo_u32 v2, v1, s2 v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v5, v0 s_mov_b32 s3, s2 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v3, 31, v2 s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s4, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo .p2align 6 .LBB0_2: v_ashrrev_i32_e32 v7, 31, v5 v_add_co_u32 v6, vcc_lo, s6, v5 v_add_nc_u32_e32 v5, s2, v5 s_add_i32 s3, s3, -1 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo s_cmp_eq_u32 s3, 0 global_load_i8 v8, v[2:3], off global_load_i8 v6, v[6:7], off v_add_co_u32 v2, vcc_lo, v2, 1 v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo s_waitcnt vmcnt(0) v_mad_i32_i24 v4, v6, v8, v4 s_cbranch_scc0 .LBB0_2 s_branch .LBB0_4 .LBB0_3: v_mov_b32_e32 v4, 0 .LBB0_4: s_load_b64 s[0:1], s[0:1], 0x18 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, v1, s2, v[0:1] v_ashrrev_i32_e32 v3, 31, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[2:3] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v4, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6MatMuliPcS_Pi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6MatMuliPcS_Pi, .Lfunc_end0-_Z6MatMuliPcS_Pi .section .AMDGPU.csdata,"",@progbits .text .protected _Z12SharedMatMuliPcS_Pi .globl _Z12SharedMatMuliPcS_Pi .p2align 8 .type _Z12SharedMatMuliPcS_Pi,@function _Z12SharedMatMuliPcS_Pi: s_clause 0x1 s_load_b32 s3, s[0:1], 0x2c s_load_b32 s2, s[0:1], 0x0 v_and_b32_e32 v5, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshl_add_u32 v1, v0, 5, v5 v_lshlrev_b32_e32 v7, 2, v1 s_waitcnt lgkmcnt(0) s_and_b32 s4, s3, 0xffff s_lshr_b32 s3, s3, 16 v_mad_u64_u32 v[1:2], null, s14, s4, v[5:6] s_cmp_lt_i32 s2, 1 v_add_nc_u32_e32 v6, 0x2000, v7 s_delay_alu instid0(VALU_DEP_2) v_mad_u64_u32 v[2:3], null, s15, s3, v[0:1] v_mov_b32_e32 v3, 0 ds_store_b32 v7, v3 offset:8192 s_cbranch_scc1 .LBB1_5 s_load_b128 s[4:7], s[0:1], 0x8 v_mad_u64_u32 v[3:4], null, v2, s2, v[5:6] v_lshl_or_b32 v4, v5, 2, 0x1000 v_lshlrev_b32_e32 v5, 7, v0 s_mov_b32 s3, 0 s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB1_2: v_add_nc_u32_e32 v10, s3, v0 s_mov_b32 s8, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[8:9], null, v10, s2, v[1:2] v_add_nc_u32_e32 v9, s3, v3 v_ashrrev_i32_e32 v10, 31, v9 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v12, 31, v8 s_waitcnt lgkmcnt(0) v_add_co_u32 v9, vcc_lo, s4, v9 v_add_co_ci_u32_e32 v10, vcc_lo, s5, v10, vcc_lo v_add_co_u32 v11, vcc_lo, s6, v8 v_add_co_ci_u32_e32 v12, vcc_lo, s7, v12, vcc_lo global_load_u8 v8, v[9:10], off global_load_u8 v9, v[11:12], off s_waitcnt vmcnt(1) ds_store_b8 v7, v8 s_waitcnt vmcnt(0) ds_store_b8 v7, v9 offset:4096 v_mov_b32_e32 v9, v4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b32 v8, v6 .LBB1_3: v_add_nc_u32_e32 v10, s8, v5 s_add_i32 s8, s8, 4 ds_load_i8 v11, v9 ds_load_i8 v10, v10 v_add_nc_u32_e32 v9, 0x80, v9 s_cmpk_eq_i32 s8, 0x80 s_waitcnt lgkmcnt(0) v_mad_i32_i24 v8, v11, v10, v8 s_cbranch_scc0 .LBB1_3 s_add_i32 s3, s3, 32 ds_store_b32 v6, v8 s_cmp_ge_i32 s3, s2 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc0 .LBB1_2 .LBB1_5: s_set_inst_prefetch_distance 0x2 s_load_b64 s[0:1], s[0:1], 0x18 v_mad_u64_u32 v[3:4], null, v2, s2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v4, 31, v3 v_lshlrev_b64 v[0:1], 2, v[3:4] ds_load_b32 v3, v6 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b32 v2, v[0:1], off s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, v2, v3 global_store_b32 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12SharedMatMuliPcS_Pi .amdhsa_group_segment_fixed_size 12288 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 13 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z12SharedMatMuliPcS_Pi, .Lfunc_end1-_Z12SharedMatMuliPcS_Pi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6MatMuliPcS_Pi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z6MatMuliPcS_Pi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 12288 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12SharedMatMuliPcS_Pi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12SharedMatMuliPcS_Pi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 13 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define BLOCKSIZE 32 #define NUM char __global__ void MatMul(int n, NUM *a, NUM *b, int *c){ int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; int sum = 0; for(int i = 0; i < n; i++){ sum += a[tidy * n + i] * b[i * n + tidx]; } c[tidy * n + tidx] = sum; } __global__ void SharedMatMul(int n, NUM *a, NUM *b, int *c){ __shared__ NUM as[BLOCKSIZE*BLOCKSIZE*4]; __shared__ NUM bs[BLOCKSIZE*BLOCKSIZE*4]; __shared__ int cs[BLOCKSIZE*BLOCKSIZE]; int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; int lx = threadIdx.x; int ly = threadIdx.y; // COPY FROM GLOBAL MEMORY cs[ly * BLOCKSIZE + lx] = 0; for(int i = 0; i < n; i+=BLOCKSIZE){ as[4*(ly * BLOCKSIZE + lx)] = a[tidy * n + (lx + i)]; bs[4*(ly * BLOCKSIZE + lx)] = b[(ly + i) * n + tidx]; __syncthreads(); for(int j = 0; j < BLOCKSIZE; j++){ cs[BLOCKSIZE * ly + lx] += as[4*(BLOCKSIZE * ly + j)] * bs[4*(BLOCKSIZE * j + lx)]; } __syncthreads(); //c[tidy * n + tidx] += cs[BLOCKSIZE * ly + lx]; } //__syncthreads(); c[tidy * n + tidx] += cs[BLOCKSIZE * ly + lx]; } __host__ void fillMatrix(NUM *a, int n){ for(int i = 0; i < n * n; i++){ a[i] = rand()%3 - 1; } } __host__ void fillMatrixZeros(int *a, int n){ for(int i = 0; i < n*n; i++){ a[i] = 0; } } __host__ void hostMatMul(int n, NUM *a, NUM *b, int *c){ for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ int val = c[i*n + j]; for(int k=0; k<n; ++k){ val += a[i*n+k] * b[k*n +j]; } c[i*n + j] = val; } } } __host__ int verify(int n, int *a, int *b){ int cont = 0; for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ cont++; if(a[i*n+j]!=b[i*n+j]){ printf("FALLO %d\t%d!=%d\n",cont , a[i*n+j], b[i*n+j]); //return 1; } } } return 0; } __host__ void printmat(int *a, int n, const char *name){ printf("mat %s:\n", name); for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ printf("%i ", a[i*n + j]); } printf("\n"); } printf("\n"); } int main(int argc, char *argv[]){ if (argc != 3){ printf("Ejecute como ./prog N\n"); return EXIT_FAILURE; } int n = atoi(argv[1]); srand(atoi(argv[2])); NUM *a, *b, *a_d, *b_d; int *c, *d, *c_d, *d_d, *matmul_simple, *matmul_shared; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float ms = 0; a = (NUM*)malloc(sizeof(NUM)*n*n); b = (NUM*)malloc(sizeof(NUM)*n*n); c = (int*)malloc(sizeof(int)*n*n); d = (int*)malloc(sizeof(int)*n*n); matmul_simple = (int*)malloc(sizeof(int)*n*n); matmul_shared = (int*)malloc(sizeof(int)*n*n); fillMatrix(a, n); fillMatrix(b, n); //fillMatrixZeros(c, n); hipMalloc(&a_d, sizeof(NUM)*n*n); hipMalloc(&b_d, sizeof(NUM)*n*n); hipMalloc(&c_d, sizeof(int)*n*n); hipMalloc(&d_d, sizeof(int)*n*n); hipMemcpy(a_d, a, sizeof(NUM) * n * n, hipMemcpyHostToDevice); hipMemcpy(b_d, b, sizeof(NUM) * n * n, hipMemcpyHostToDevice); hipMemcpy(c_d, c, sizeof(int) * n * n, hipMemcpyHostToDevice); hipMemcpy(d_d, d, sizeof(int) * n * n, hipMemcpyHostToDevice); dim3 block(BLOCKSIZE, BLOCKSIZE, 1); dim3 grid(n/BLOCKSIZE, n/BLOCKSIZE, 1); /* printf("block: %d %d %d\n", block.x, block.y, block.z); printf("grid : %d %d %d\n", grid.x, grid.y, grid.z); */ hipEventRecord(start); MatMul<<<grid,block>>>(n ,a_d, b_d, c_d); hipEventRecord(stop); hipDeviceSynchronize(); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); // printf("GPU Simple...ok! in %f ms\n", ms); printf("%f\n", ms); /* cudaEventRecord(start); SharedMatMul<<<grid,block>>>(n ,a_d, b_d, d_d); cudaEventRecord(stop); cudaDeviceSynchronize(); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); printf("GPU Shared Memory...ok! in %f ms\n", ms); cudaMemcpy(matmul_shared, d_d, sizeof(int) * n * n, cudaMemcpyDeviceToHost); cudaMemcpy(matmul_simple, c_d, sizeof(int) * n * n, cudaMemcpyDeviceToHost); // hostMatMul(n, a, b, c); if(verify(n, matmul_simple, matmul_shared) != 0){ fprintf(stderr, "error verifying result\n"); exit(EXIT_FAILURE); } */ return 0; }
.text .file "matmul_simple.hip" .globl _Z21__device_stub__MatMuliPcS_Pi # -- Begin function _Z21__device_stub__MatMuliPcS_Pi .p2align 4, 0x90 .type _Z21__device_stub__MatMuliPcS_Pi,@function _Z21__device_stub__MatMuliPcS_Pi: # @_Z21__device_stub__MatMuliPcS_Pi .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 4(%rsp) movq %rsi, 72(%rsp) movq %rdx, 64(%rsp) movq %rcx, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6MatMuliPcS_Pi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z21__device_stub__MatMuliPcS_Pi, .Lfunc_end0-_Z21__device_stub__MatMuliPcS_Pi .cfi_endproc # -- End function .globl _Z27__device_stub__SharedMatMuliPcS_Pi # -- Begin function _Z27__device_stub__SharedMatMuliPcS_Pi .p2align 4, 0x90 .type _Z27__device_stub__SharedMatMuliPcS_Pi,@function _Z27__device_stub__SharedMatMuliPcS_Pi: # @_Z27__device_stub__SharedMatMuliPcS_Pi .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 4(%rsp) movq %rsi, 72(%rsp) movq %rdx, 64(%rsp) movq %rcx, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12SharedMatMuliPcS_Pi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z27__device_stub__SharedMatMuliPcS_Pi, .Lfunc_end1-_Z27__device_stub__SharedMatMuliPcS_Pi .cfi_endproc # -- End function .globl _Z10fillMatrixPci # -- Begin function _Z10fillMatrixPci .p2align 4, 0x90 .type _Z10fillMatrixPci,@function _Z10fillMatrixPci: # @_Z10fillMatrixPci .cfi_startproc # %bb.0: testl %esi, %esi je .LBB2_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %esi, %ebx movq %rdi, %r14 imull %ebx, %ebx cmpl $1, %ebx adcl $0, %ebx xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1431655766, %rax, %rcx # imm = 0x55555556 movq %rcx, %rdx shrq $63, %rdx shrq $32, %rcx addl %edx, %ecx leal (%rcx,%rcx,2), %ecx subl %ecx, %eax decb %al movb %al, (%r14,%r15) incq %r15 cmpq %r15, %rbx jne .LBB2_2 # %bb.3: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .cfi_restore %r15 .LBB2_4: # %._crit_edge retq .Lfunc_end2: .size _Z10fillMatrixPci, .Lfunc_end2-_Z10fillMatrixPci .cfi_endproc # -- End function .globl _Z15fillMatrixZerosPii # -- Begin function _Z15fillMatrixZerosPii .p2align 4, 0x90 .type _Z15fillMatrixZerosPii,@function _Z15fillMatrixZerosPii: # @_Z15fillMatrixZerosPii .cfi_startproc # %bb.0: testl %esi, %esi je .LBB3_1 # %bb.2: # %.lr.ph.preheader movl %esi, %edx imull %edx, %edx cmpl $1, %edx adcl $0, %edx shlq $2, %rdx xorl %esi, %esi jmp memset@PLT # TAILCALL .LBB3_1: # %._crit_edge retq .Lfunc_end3: .size _Z15fillMatrixZerosPii, .Lfunc_end3-_Z15fillMatrixZerosPii .cfi_endproc # -- End function .globl _Z10hostMatMuliPcS_Pi # -- Begin function _Z10hostMatMuliPcS_Pi .p2align 4, 0x90 .type _Z10hostMatMuliPcS_Pi,@function _Z10hostMatMuliPcS_Pi: # @_Z10hostMatMuliPcS_Pi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rcx, -8(%rsp) # 8-byte Spill testl %edi, %edi jle .LBB4_7 # %bb.1: # %.preheader.lr.ph movl %edi, %eax xorl %ecx, %ecx xorl %r9d, %r9d .p2align 4, 0x90 .LBB4_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB4_3 Depth 2 # Child Loop BB4_4 Depth 3 movl %ecx, %r10d addq %rsi, %r10 movq %r9, %r11 imulq %rax, %r11 movq -8(%rsp), %r8 # 8-byte Reload leaq (%r8,%r11,4), %r11 movq %rdx, %rbx xorl %r14d, %r14d .p2align 4, 0x90 .LBB4_3: # %.lr.ph # Parent Loop BB4_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_4 Depth 3 movl (%r11,%r14,4), %ebp movq %rbx, %r15 xorl %r12d, %r12d .p2align 4, 0x90 .LBB4_4: # Parent Loop BB4_2 Depth=1 # Parent Loop BB4_3 Depth=2 # => This Inner Loop Header: Depth=3 movsbl (%r10,%r12), %r13d movsbl (%r15), %r8d imull %r13d, %r8d addl %r8d, %ebp incq %r12 addq %rax, %r15 cmpq %r12, %rax jne .LBB4_4 # %bb.5: # %._crit_edge # in Loop: Header=BB4_3 Depth=2 movl %ebp, (%r11,%r14,4) incq %r14 incq %rbx cmpq %rax, %r14 jne .LBB4_3 # %bb.6: # %._crit_edge34 # in Loop: Header=BB4_2 Depth=1 incq %r9 addl %edi, %ecx cmpq %rax, %r9 jne .LBB4_2 .LBB4_7: # %._crit_edge36 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z10hostMatMuliPcS_Pi, .Lfunc_end4-_Z10hostMatMuliPcS_Pi .cfi_endproc # -- End function .globl _Z6verifyiPiS_ # -- Begin function _Z6verifyiPiS_ .p2align 4, 0x90 .type _Z6verifyiPiS_,@function _Z6verifyiPiS_: # @_Z6verifyiPiS_ .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $40, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdx, 32(%rsp) # 8-byte Spill movq %rsi, 24(%rsp) # 8-byte Spill movl %edi, 8(%rsp) # 4-byte Spill testl %edi, %edi jle .LBB5_7 # %bb.1: # %.preheader.lr.ph movl 8(%rsp), %eax # 4-byte Reload movq %rax, 16(%rsp) # 8-byte Spill leaq (,%rax,4), %r13 xorl %eax, %eax xorl %r14d, %r14d xorl %r15d, %r15d jmp .LBB5_2 .p2align 4, 0x90 .LBB5_6: # %._crit_edge # in Loop: Header=BB5_2 Depth=1 incq %r14 movl 12(%rsp), %eax # 4-byte Reload addl 8(%rsp), %eax # 4-byte Folded Reload cmpq 16(%rsp), %r14 # 8-byte Folded Reload je .LBB5_7 .LBB5_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB5_3 Depth 2 movl %eax, 12(%rsp) # 4-byte Spill movl %eax, %eax movq 32(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %r12 movq 24(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %rbp xorl %ebx, %ebx jmp .LBB5_3 .p2align 4, 0x90 .LBB5_5: # in Loop: Header=BB5_3 Depth=2 addq $4, %rbx cmpq %rbx, %r13 je .LBB5_6 .LBB5_3: # Parent Loop BB5_2 Depth=1 # => This Inner Loop Header: Depth=2 incl %r15d movl (%rbp,%rbx), %edx movl (%r12,%rbx), %ecx cmpl %ecx, %edx je .LBB5_5 # %bb.4: # in Loop: Header=BB5_3 Depth=2 movl $.L.str, %edi movl %r15d, %esi xorl %eax, %eax callq printf jmp .LBB5_5 .LBB5_7: # %._crit_edge32 xorl %eax, %eax addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size _Z6verifyiPiS_, .Lfunc_end5-_Z6verifyiPiS_ .cfi_endproc # -- End function .globl _Z8printmatPiiPKc # -- Begin function _Z8printmatPiiPKc .p2align 4, 0x90 .type _Z8printmatPiiPKc,@function _Z8printmatPiiPKc: # @_Z8printmatPiiPKc .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 pushq %rax .cfi_def_cfa_offset 64 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %esi, %ebx movq %rdi, (%rsp) # 8-byte Spill movl $.L.str.1, %edi movq %rdx, %rsi xorl %eax, %eax callq printf testl %ebx, %ebx jle .LBB6_5 # %bb.1: # %.preheader.lr.ph movl %ebx, %r15d xorl %ebp, %ebp xorl %r12d, %r12d .p2align 4, 0x90 .LBB6_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB6_3 Depth 2 movl %ebp, %eax movq (%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %r13 xorl %r14d, %r14d .p2align 4, 0x90 .LBB6_3: # Parent Loop BB6_2 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r13,%r14,4), %esi movl $.L.str.2, %edi xorl %eax, %eax callq printf incq %r14 cmpq %r14, %r15 jne .LBB6_3 # %bb.4: # %._crit_edge # in Loop: Header=BB6_2 Depth=1 movl $10, %edi callq putchar@PLT incq %r12 addl %ebx, %ebp cmpq %r15, %r12 jne .LBB6_2 .LBB6_5: # %._crit_edge15 movl $10, %edi addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp putchar@PLT # TAILCALL .Lfunc_end6: .size _Z8printmatPiiPKc, .Lfunc_end6-_Z8printmatPiiPKc .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $216, %rsp .cfi_def_cfa_offset 272 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 cmpl $3, %edi jne .LBB7_1 # %bb.2: movq 8(%rsi), %rdi movq %rsi, %rbx xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r14 movq 16(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movl %eax, %edi callq srand leaq 32(%rsp), %rdi callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate movl $0, 12(%rsp) movslq %r14d, %rbx movq %rbx, %r15 imulq %rbx, %r15 movq %r15, %rdi callq malloc movq %rax, %r12 movq %r15, %rdi callq malloc movq %rax, %rbp leaq (,%r15,4), %r13 movq %r13, %rdi callq malloc movq %rax, 72(%rsp) # 8-byte Spill movq %r13, 88(%rsp) # 8-byte Spill movq %r13, %rdi callq malloc movq %rax, 80(%rsp) # 8-byte Spill testl %ebx, %ebx je .LBB7_8 # %bb.3: # %.lr.ph.preheader.i movq %r14, 64(%rsp) # 8-byte Spill movl %r14d, %r13d imull %r13d, %r13d cmpl $1, %r13d movl %r13d, %ebx adcl $0, %ebx xorl %r14d, %r14d .p2align 4, 0x90 .LBB7_4: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1431655766, %rax, %rcx # imm = 0x55555556 movq %rcx, %rdx shrq $63, %rdx shrq $32, %rcx addl %edx, %ecx leal (%rcx,%rcx,2), %ecx subl %ecx, %eax decb %al movb %al, (%r12,%r14) incq %r14 cmpq %r14, %rbx jne .LBB7_4 # %bb.5: # %_Z10fillMatrixPci.exit movq 64(%rsp), %r14 # 8-byte Reload testl %r14d, %r14d je .LBB7_8 # %bb.6: # %.lr.ph.preheader.i50 cmpl $1, %r13d adcl $0, %r13d xorl %ebx, %ebx .p2align 4, 0x90 .LBB7_7: # %.lr.ph.i53 # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1431655766, %rax, %rcx # imm = 0x55555556 movq %rcx, %rdx shrq $63, %rdx shrq $32, %rcx addl %edx, %ecx leal (%rcx,%rcx,2), %ecx subl %ecx, %eax decb %al movb %al, (%rbp,%rbx) incq %rbx cmpq %rbx, %r13 jne .LBB7_7 .LBB7_8: # %_Z10fillMatrixPci.exit57 leaq 56(%rsp), %rdi movq %r15, %rsi callq hipMalloc leaq 48(%rsp), %rdi movq %r15, %rsi callq hipMalloc leaq 40(%rsp), %rdi movq 88(%rsp), %rbx # 8-byte Reload movq %rbx, %rsi callq hipMalloc leaq 96(%rsp), %rdi movq %rbx, %rsi callq hipMalloc movq 56(%rsp), %rdi movq %r12, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy movq 48(%rsp), %rdi movq %rbp, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy movq 40(%rsp), %rdi movq 72(%rsp), %rsi # 8-byte Reload movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movq 96(%rsp), %rdi movq 80(%rsp), %rsi # 8-byte Reload movq %rbx, %rdx movl $1, %ecx callq hipMemcpy leal 31(%r14), %eax testl %r14d, %r14d cmovnsl %r14d, %eax sarl $5, %eax movq %rax, %r15 shlq $32, %r15 orq %rax, %r15 movq 32(%rsp), %rdi movq %r14, %rbx xorl %r14d, %r14d xorl %esi, %esi callq hipEventRecord movabsq $137438953504, %rdx # imm = 0x2000000020 movq %r15, %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB7_10 # %bb.9: movq 56(%rsp), %rax movq 48(%rsp), %rcx movq 40(%rsp), %rdx movl %ebx, 28(%rsp) movq %rax, 168(%rsp) movq %rcx, 160(%rsp) movq %rdx, 152(%rsp) leaq 28(%rsp), %rax movq %rax, 176(%rsp) leaq 168(%rsp), %rax movq %rax, 184(%rsp) leaq 160(%rsp), %rax movq %rax, 192(%rsp) leaq 152(%rsp), %rax movq %rax, 200(%rsp) leaq 136(%rsp), %rdi leaq 120(%rsp), %rsi leaq 112(%rsp), %rdx leaq 104(%rsp), %rcx callq __hipPopCallConfiguration movq 136(%rsp), %rsi movl 144(%rsp), %edx movq 120(%rsp), %rcx movl 128(%rsp), %r8d leaq 176(%rsp), %r9 movl $_Z6MatMuliPcS_Pi, %edi pushq 104(%rsp) .cfi_adjust_cfa_offset 8 pushq 120(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB7_10: movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord callq hipDeviceSynchronize movq 16(%rsp), %rdi callq hipEventSynchronize movq 32(%rsp), %rsi movq 16(%rsp), %rdx leaq 12(%rsp), %rdi callq hipEventElapsedTime movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.5, %edi movb $1, %al callq printf jmp .LBB7_11 .LBB7_1: movl $.Lstr, %edi callq puts@PLT movl $1, %r14d .LBB7_11: movl %r14d, %eax addq $216, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end7: .size main, .Lfunc_end7-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB8_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB8_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6MatMuliPcS_Pi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12SharedMatMuliPcS_Pi, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end8: .size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB9_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB9_2: retq .Lfunc_end9: .size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor .cfi_endproc # -- End function .type _Z6MatMuliPcS_Pi,@object # @_Z6MatMuliPcS_Pi .section .rodata,"a",@progbits .globl _Z6MatMuliPcS_Pi .p2align 3, 0x0 _Z6MatMuliPcS_Pi: .quad _Z21__device_stub__MatMuliPcS_Pi .size _Z6MatMuliPcS_Pi, 8 .type _Z12SharedMatMuliPcS_Pi,@object # @_Z12SharedMatMuliPcS_Pi .globl _Z12SharedMatMuliPcS_Pi .p2align 3, 0x0 _Z12SharedMatMuliPcS_Pi: .quad _Z27__device_stub__SharedMatMuliPcS_Pi .size _Z12SharedMatMuliPcS_Pi, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "FALLO %d\t%d!=%d\n" .size .L.str, 17 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "mat %s:\n" .size .L.str.1, 9 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%i " .size .L.str.2, 4 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "%f\n" .size .L.str.5, 4 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6MatMuliPcS_Pi" .size .L__unnamed_1, 17 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z12SharedMatMuliPcS_Pi" .size .L__unnamed_2, 24 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Ejecute como ./prog N" .size .Lstr, 22 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__MatMuliPcS_Pi .addrsig_sym _Z27__device_stub__SharedMatMuliPcS_Pi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6MatMuliPcS_Pi .addrsig_sym _Z12SharedMatMuliPcS_Pi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0009ca31_00000000-6_matmul_simple.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2065: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2065: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z10fillMatrixPci .type _Z10fillMatrixPci, @function _Z10fillMatrixPci: .LFB2057: .cfi_startproc endbr64 imull %esi, %esi testl %esi, %esi jle .L8 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi), %rbp .L5: call rand@PLT movslq %eax, %rdx imulq $1431655766, %rdx, %rdx shrq $32, %rdx movl %eax, %ecx sarl $31, %ecx subl %ecx, %edx leal (%rdx,%rdx,2), %edx subl %edx, %eax subl $1, %eax movb %al, (%rbx) addq $1, %rbx cmpq %rbp, %rbx jne .L5 addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L8: .cfi_restore 3 .cfi_restore 6 ret .cfi_endproc .LFE2057: .size _Z10fillMatrixPci, .-_Z10fillMatrixPci .globl _Z15fillMatrixZerosPii .type _Z15fillMatrixZerosPii, @function _Z15fillMatrixZerosPii: .LFB2058: .cfi_startproc endbr64 imull %esi, %esi testl %esi, %esi jle .L11 movq %rdi, %rax movslq %esi, %rsi leaq (%rdi,%rsi,4), %rdx .L13: movl $0, (%rax) addq $4, %rax cmpq %rdx, %rax jne .L13 .L11: ret .cfi_endproc .LFE2058: .size _Z15fillMatrixZerosPii, .-_Z15fillMatrixZerosPii .globl _Z10hostMatMuliPcS_Pi .type _Z10hostMatMuliPcS_Pi, @function _Z10hostMatMuliPcS_Pi: .LFB2059: .cfi_startproc endbr64 testl %edi, %edi jle .L23 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 movl %edi, %ebp movq %rsi, %r8 movq %rdx, %r12 movq %rcx, %r10 movslq %edi, %rdi leaq 0(,%rdi,4), %r14 movq %rsi, %rbx addq %rdi, %r8 movl $0, %r13d .L17: movl $0, %r9d .L19: movq %r10, %r11 movl (%r10,%r9,4), %esi leaq (%r12,%r9), %rcx movq %rbx, %rax .L18: movsbl (%rax), %edx movsbl (%rcx), %r15d imull %r15d, %edx addl %edx, %esi addq $1, %rax addq %rdi, %rcx cmpq %r8, %rax jne .L18 movl %esi, (%r11,%r9,4) addq $1, %r9 cmpq %rdi, %r9 jne .L19 addl $1, %r13d addq %r14, %r10 addq %rdi, %rbx addq %rdi, %r8 cmpl %r13d, %ebp jne .L17 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L23: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 .cfi_restore 13 .cfi_restore 14 .cfi_restore 15 ret .cfi_endproc .LFE2059: .size _Z10hostMatMuliPcS_Pi, .-_Z10hostMatMuliPcS_Pi .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "FALLO %d\t%d!=%d\n" .text .globl _Z6verifyiPiS_ .type _Z6verifyiPiS_, @function _Z6verifyiPiS_: .LFB2060: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $56, %rsp .cfi_def_cfa_offset 112 movq %rsi, 24(%rsp) movq %rdx, 32(%rsp) testl %edi, %edi jle .L27 movslq %edi, %rax salq $2, %rax movq %rax, 40(%rsp) movl %edi, %r13d movl $0, %eax movl $0, %r15d movl $0, %ebx leaq .LC0(%rip), %r14 jmp .L28 .L29: addq $4, %r12 addq $4, %rbp cmpl %r13d, %ebx je .L33 .L30: addl $1, %ebx movl (%r12), %ecx movl 0(%rbp), %r8d cmpl %r8d, %ecx je .L29 movl %ebx, %edx movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L29 .L33: movq 16(%rsp), %rax movl %r15d, %edi movl 12(%rsp), %r15d movl 8(%rsp), %ebx addl %edi, %ebx addl $1, %r15d addl %edi, %r13d movq 40(%rsp), %rdx addq %rdx, %rax cmpl %r15d, %edi je .L27 .L28: movl %ebx, 8(%rsp) movq 24(%rsp), %rsi leaq (%rsi,%rax), %r12 movq 32(%rsp), %rdx leaq (%rdx,%rax), %rbp movl %r15d, 12(%rsp) movq %rax, 16(%rsp) movl %edi, %r15d jmp .L30 .L27: movl $0, %eax addq $56, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _Z6verifyiPiS_, .-_Z6verifyiPiS_ .section .rodata.str1.1 .LC1: .string "mat %s:\n" .LC2: .string "%i " .LC3: .string "\n" .text .globl _Z8printmatPiiPKc .type _Z8printmatPiiPKc, @function _Z8printmatPiiPKc: .LFB2061: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $24, %rsp .cfi_def_cfa_offset 80 movq %rdi, %rbp movl %esi, %ebx movl %esi, 12(%rsp) leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT testl %ebx, %ebx jle .L35 movslq %ebx, %r14 leaq 0(,%r14,4), %r15 addq %r15, %rbp negq %r14 salq $2, %r14 movl $0, %r13d leaq .LC2(%rip), %r12 .L36: leaq 0(%rbp,%r14), %rbx .L37: movl (%rbx), %edx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq %rbp, %rbx jne .L37 leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %r13d addq %r15, %rbp cmpl %r13d, 12(%rsp) jne .L36 .L35: leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _Z8printmatPiiPKc, .-_Z8printmatPiiPKc .globl _Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi .type _Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi, @function _Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi: .LFB2087: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L44 .L40: movq 136(%rsp), %rax subq %fs:40, %rax jne .L45 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L44: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z6MatMuliPcS_Pi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L40 .L45: call __stack_chk_fail@PLT .cfi_endproc .LFE2087: .size _Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi, .-_Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi .globl _Z6MatMuliPcS_Pi .type _Z6MatMuliPcS_Pi, @function _Z6MatMuliPcS_Pi: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _Z6MatMuliPcS_Pi, .-_Z6MatMuliPcS_Pi .section .rodata.str1.1 .LC4: .string "Ejecute como ./prog N\n" .LC6: .string "%f\n" .text .globl main .type main, @function main: .LFB2062: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $120, %rsp .cfi_def_cfa_offset 176 movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax cmpl $3, %edi je .L49 leaq .LC4(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $1, %eax .L48: movq 104(%rsp), %rdx subq %fs:40, %rdx jne .L54 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L49: .cfi_restore_state movq %rsi, %rbp movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %rbx movl %eax, %r12d movq 16(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movl %eax, %edi call srand@PLT leaq 64(%rsp), %rdi call cudaEventCreate@PLT leaq 72(%rsp), %rdi call cudaEventCreate@PLT movl $0x00000000, 28(%rsp) movslq %ebx, %rbx imulq %rbx, %rbx movq %rbx, %rdi call malloc@PLT movq %rax, %r14 movq %rbx, %rdi call malloc@PLT movq %rax, %r13 leaq 0(,%rbx,4), %rbp movq %rbp, %rdi call malloc@PLT movq %rax, %r15 movq %rbp, %rdi call malloc@PLT movq %rax, 8(%rsp) movl %r12d, %esi movq %r14, %rdi call _Z10fillMatrixPci movl %r12d, %esi movq %r13, %rdi call _Z10fillMatrixPci leaq 32(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 40(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 48(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT leaq 56(%rsp), %rdi movq %rbp, %rsi call cudaMalloc@PLT movl $1, %ecx movq %rbx, %rdx movq %r14, %rsi movq 32(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %r13, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbp, %rdx movq %r15, %rsi movq 48(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbp, %rdx movq 8(%rsp), %rsi movq 56(%rsp), %rdi call cudaMemcpy@PLT movl $32, 80(%rsp) movl $32, 84(%rsp) movl $1, 88(%rsp) movl $32, %ecx movl %r12d, %eax cltd idivl %ecx movl %eax, 92(%rsp) movl %eax, 96(%rsp) movl $1, 100(%rsp) movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT movl 88(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 80(%rsp), %rdx movq 92(%rsp), %rdi movl 100(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L55 .L51: movl $0, %esi movq 72(%rsp), %rdi call cudaEventRecord@PLT call cudaDeviceSynchronize@PLT movq 72(%rsp), %rdi call cudaEventSynchronize@PLT leaq 28(%rsp), %rdi movq 72(%rsp), %rdx movq 64(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 28(%rsp), %xmm0 leaq .LC6(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl $0, %eax jmp .L48 .L55: movq 48(%rsp), %rcx movq 40(%rsp), %rdx movq 32(%rsp), %rsi movl %r12d, %edi call _Z30__device_stub__Z6MatMuliPcS_PiiPcS_Pi jmp .L51 .L54: call __stack_chk_fail@PLT .cfi_endproc .LFE2062: .size main, .-main .globl _Z37__device_stub__Z12SharedMatMuliPcS_PiiPcS_Pi .type _Z37__device_stub__Z12SharedMatMuliPcS_PiiPcS_Pi, @function _Z37__device_stub__Z12SharedMatMuliPcS_PiiPcS_Pi: .LFB2089: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 28(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 28(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L60 .L56: movq 136(%rsp), %rax subq %fs:40, %rax jne .L61 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L60: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12SharedMatMuliPcS_Pi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L56 .L61: call __stack_chk_fail@PLT .cfi_endproc .LFE2089: .size _Z37__device_stub__Z12SharedMatMuliPcS_PiiPcS_Pi, .-_Z37__device_stub__Z12SharedMatMuliPcS_PiiPcS_Pi .globl _Z12SharedMatMuliPcS_Pi .type _Z12SharedMatMuliPcS_Pi, @function _Z12SharedMatMuliPcS_Pi: .LFB2090: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z12SharedMatMuliPcS_PiiPcS_Pi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2090: .size _Z12SharedMatMuliPcS_Pi, .-_Z12SharedMatMuliPcS_Pi .section .rodata.str1.1 .LC7: .string "_Z12SharedMatMuliPcS_Pi" .LC8: .string "_Z6MatMuliPcS_Pi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2092: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z12SharedMatMuliPcS_Pi(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _Z6MatMuliPcS_Pi(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2092: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "matmul_simple.hip" .globl _Z21__device_stub__MatMuliPcS_Pi # -- Begin function _Z21__device_stub__MatMuliPcS_Pi .p2align 4, 0x90 .type _Z21__device_stub__MatMuliPcS_Pi,@function _Z21__device_stub__MatMuliPcS_Pi: # @_Z21__device_stub__MatMuliPcS_Pi .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 4(%rsp) movq %rsi, 72(%rsp) movq %rdx, 64(%rsp) movq %rcx, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6MatMuliPcS_Pi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z21__device_stub__MatMuliPcS_Pi, .Lfunc_end0-_Z21__device_stub__MatMuliPcS_Pi .cfi_endproc # -- End function .globl _Z27__device_stub__SharedMatMuliPcS_Pi # -- Begin function _Z27__device_stub__SharedMatMuliPcS_Pi .p2align 4, 0x90 .type _Z27__device_stub__SharedMatMuliPcS_Pi,@function _Z27__device_stub__SharedMatMuliPcS_Pi: # @_Z27__device_stub__SharedMatMuliPcS_Pi .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movl %edi, 4(%rsp) movq %rsi, 72(%rsp) movq %rdx, 64(%rsp) movq %rcx, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12SharedMatMuliPcS_Pi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z27__device_stub__SharedMatMuliPcS_Pi, .Lfunc_end1-_Z27__device_stub__SharedMatMuliPcS_Pi .cfi_endproc # -- End function .globl _Z10fillMatrixPci # -- Begin function _Z10fillMatrixPci .p2align 4, 0x90 .type _Z10fillMatrixPci,@function _Z10fillMatrixPci: # @_Z10fillMatrixPci .cfi_startproc # %bb.0: testl %esi, %esi je .LBB2_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl %esi, %ebx movq %rdi, %r14 imull %ebx, %ebx cmpl $1, %ebx adcl $0, %ebx xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1431655766, %rax, %rcx # imm = 0x55555556 movq %rcx, %rdx shrq $63, %rdx shrq $32, %rcx addl %edx, %ecx leal (%rcx,%rcx,2), %ecx subl %ecx, %eax decb %al movb %al, (%r14,%r15) incq %r15 cmpq %r15, %rbx jne .LBB2_2 # %bb.3: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .cfi_restore %r15 .LBB2_4: # %._crit_edge retq .Lfunc_end2: .size _Z10fillMatrixPci, .Lfunc_end2-_Z10fillMatrixPci .cfi_endproc # -- End function .globl _Z15fillMatrixZerosPii # -- Begin function _Z15fillMatrixZerosPii .p2align 4, 0x90 .type _Z15fillMatrixZerosPii,@function _Z15fillMatrixZerosPii: # @_Z15fillMatrixZerosPii .cfi_startproc # %bb.0: testl %esi, %esi je .LBB3_1 # %bb.2: # %.lr.ph.preheader movl %esi, %edx imull %edx, %edx cmpl $1, %edx adcl $0, %edx shlq $2, %rdx xorl %esi, %esi jmp memset@PLT # TAILCALL .LBB3_1: # %._crit_edge retq .Lfunc_end3: .size _Z15fillMatrixZerosPii, .Lfunc_end3-_Z15fillMatrixZerosPii .cfi_endproc # -- End function .globl _Z10hostMatMuliPcS_Pi # -- Begin function _Z10hostMatMuliPcS_Pi .p2align 4, 0x90 .type _Z10hostMatMuliPcS_Pi,@function _Z10hostMatMuliPcS_Pi: # @_Z10hostMatMuliPcS_Pi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rcx, -8(%rsp) # 8-byte Spill testl %edi, %edi jle .LBB4_7 # %bb.1: # %.preheader.lr.ph movl %edi, %eax xorl %ecx, %ecx xorl %r9d, %r9d .p2align 4, 0x90 .LBB4_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB4_3 Depth 2 # Child Loop BB4_4 Depth 3 movl %ecx, %r10d addq %rsi, %r10 movq %r9, %r11 imulq %rax, %r11 movq -8(%rsp), %r8 # 8-byte Reload leaq (%r8,%r11,4), %r11 movq %rdx, %rbx xorl %r14d, %r14d .p2align 4, 0x90 .LBB4_3: # %.lr.ph # Parent Loop BB4_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB4_4 Depth 3 movl (%r11,%r14,4), %ebp movq %rbx, %r15 xorl %r12d, %r12d .p2align 4, 0x90 .LBB4_4: # Parent Loop BB4_2 Depth=1 # Parent Loop BB4_3 Depth=2 # => This Inner Loop Header: Depth=3 movsbl (%r10,%r12), %r13d movsbl (%r15), %r8d imull %r13d, %r8d addl %r8d, %ebp incq %r12 addq %rax, %r15 cmpq %r12, %rax jne .LBB4_4 # %bb.5: # %._crit_edge # in Loop: Header=BB4_3 Depth=2 movl %ebp, (%r11,%r14,4) incq %r14 incq %rbx cmpq %rax, %r14 jne .LBB4_3 # %bb.6: # %._crit_edge34 # in Loop: Header=BB4_2 Depth=1 incq %r9 addl %edi, %ecx cmpq %rax, %r9 jne .LBB4_2 .LBB4_7: # %._crit_edge36 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z10hostMatMuliPcS_Pi, .Lfunc_end4-_Z10hostMatMuliPcS_Pi .cfi_endproc # -- End function .globl _Z6verifyiPiS_ # -- Begin function _Z6verifyiPiS_ .p2align 4, 0x90 .type _Z6verifyiPiS_,@function _Z6verifyiPiS_: # @_Z6verifyiPiS_ .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $40, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdx, 32(%rsp) # 8-byte Spill movq %rsi, 24(%rsp) # 8-byte Spill movl %edi, 8(%rsp) # 4-byte Spill testl %edi, %edi jle .LBB5_7 # %bb.1: # %.preheader.lr.ph movl 8(%rsp), %eax # 4-byte Reload movq %rax, 16(%rsp) # 8-byte Spill leaq (,%rax,4), %r13 xorl %eax, %eax xorl %r14d, %r14d xorl %r15d, %r15d jmp .LBB5_2 .p2align 4, 0x90 .LBB5_6: # %._crit_edge # in Loop: Header=BB5_2 Depth=1 incq %r14 movl 12(%rsp), %eax # 4-byte Reload addl 8(%rsp), %eax # 4-byte Folded Reload cmpq 16(%rsp), %r14 # 8-byte Folded Reload je .LBB5_7 .LBB5_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB5_3 Depth 2 movl %eax, 12(%rsp) # 4-byte Spill movl %eax, %eax movq 32(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %r12 movq 24(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %rbp xorl %ebx, %ebx jmp .LBB5_3 .p2align 4, 0x90 .LBB5_5: # in Loop: Header=BB5_3 Depth=2 addq $4, %rbx cmpq %rbx, %r13 je .LBB5_6 .LBB5_3: # Parent Loop BB5_2 Depth=1 # => This Inner Loop Header: Depth=2 incl %r15d movl (%rbp,%rbx), %edx movl (%r12,%rbx), %ecx cmpl %ecx, %edx je .LBB5_5 # %bb.4: # in Loop: Header=BB5_3 Depth=2 movl $.L.str, %edi movl %r15d, %esi xorl %eax, %eax callq printf jmp .LBB5_5 .LBB5_7: # %._crit_edge32 xorl %eax, %eax addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size _Z6verifyiPiS_, .Lfunc_end5-_Z6verifyiPiS_ .cfi_endproc # -- End function .globl _Z8printmatPiiPKc # -- Begin function _Z8printmatPiiPKc .p2align 4, 0x90 .type _Z8printmatPiiPKc,@function _Z8printmatPiiPKc: # @_Z8printmatPiiPKc .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 pushq %rax .cfi_def_cfa_offset 64 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %esi, %ebx movq %rdi, (%rsp) # 8-byte Spill movl $.L.str.1, %edi movq %rdx, %rsi xorl %eax, %eax callq printf testl %ebx, %ebx jle .LBB6_5 # %bb.1: # %.preheader.lr.ph movl %ebx, %r15d xorl %ebp, %ebp xorl %r12d, %r12d .p2align 4, 0x90 .LBB6_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB6_3 Depth 2 movl %ebp, %eax movq (%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %r13 xorl %r14d, %r14d .p2align 4, 0x90 .LBB6_3: # Parent Loop BB6_2 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r13,%r14,4), %esi movl $.L.str.2, %edi xorl %eax, %eax callq printf incq %r14 cmpq %r14, %r15 jne .LBB6_3 # %bb.4: # %._crit_edge # in Loop: Header=BB6_2 Depth=1 movl $10, %edi callq putchar@PLT incq %r12 addl %ebx, %ebp cmpq %r15, %r12 jne .LBB6_2 .LBB6_5: # %._crit_edge15 movl $10, %edi addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp putchar@PLT # TAILCALL .Lfunc_end6: .size _Z8printmatPiiPKc, .Lfunc_end6-_Z8printmatPiiPKc .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $216, %rsp .cfi_def_cfa_offset 272 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 cmpl $3, %edi jne .LBB7_1 # %bb.2: movq 8(%rsi), %rdi movq %rsi, %rbx xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r14 movq 16(%rbx), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movl %eax, %edi callq srand leaq 32(%rsp), %rdi callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate movl $0, 12(%rsp) movslq %r14d, %rbx movq %rbx, %r15 imulq %rbx, %r15 movq %r15, %rdi callq malloc movq %rax, %r12 movq %r15, %rdi callq malloc movq %rax, %rbp leaq (,%r15,4), %r13 movq %r13, %rdi callq malloc movq %rax, 72(%rsp) # 8-byte Spill movq %r13, 88(%rsp) # 8-byte Spill movq %r13, %rdi callq malloc movq %rax, 80(%rsp) # 8-byte Spill testl %ebx, %ebx je .LBB7_8 # %bb.3: # %.lr.ph.preheader.i movq %r14, 64(%rsp) # 8-byte Spill movl %r14d, %r13d imull %r13d, %r13d cmpl $1, %r13d movl %r13d, %ebx adcl $0, %ebx xorl %r14d, %r14d .p2align 4, 0x90 .LBB7_4: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1431655766, %rax, %rcx # imm = 0x55555556 movq %rcx, %rdx shrq $63, %rdx shrq $32, %rcx addl %edx, %ecx leal (%rcx,%rcx,2), %ecx subl %ecx, %eax decb %al movb %al, (%r12,%r14) incq %r14 cmpq %r14, %rbx jne .LBB7_4 # %bb.5: # %_Z10fillMatrixPci.exit movq 64(%rsp), %r14 # 8-byte Reload testl %r14d, %r14d je .LBB7_8 # %bb.6: # %.lr.ph.preheader.i50 cmpl $1, %r13d adcl $0, %r13d xorl %ebx, %ebx .p2align 4, 0x90 .LBB7_7: # %.lr.ph.i53 # =>This Inner Loop Header: Depth=1 callq rand cltq imulq $1431655766, %rax, %rcx # imm = 0x55555556 movq %rcx, %rdx shrq $63, %rdx shrq $32, %rcx addl %edx, %ecx leal (%rcx,%rcx,2), %ecx subl %ecx, %eax decb %al movb %al, (%rbp,%rbx) incq %rbx cmpq %rbx, %r13 jne .LBB7_7 .LBB7_8: # %_Z10fillMatrixPci.exit57 leaq 56(%rsp), %rdi movq %r15, %rsi callq hipMalloc leaq 48(%rsp), %rdi movq %r15, %rsi callq hipMalloc leaq 40(%rsp), %rdi movq 88(%rsp), %rbx # 8-byte Reload movq %rbx, %rsi callq hipMalloc leaq 96(%rsp), %rdi movq %rbx, %rsi callq hipMalloc movq 56(%rsp), %rdi movq %r12, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy movq 48(%rsp), %rdi movq %rbp, %rsi movq %r15, %rdx movl $1, %ecx callq hipMemcpy movq 40(%rsp), %rdi movq 72(%rsp), %rsi # 8-byte Reload movq %rbx, %rdx movl $1, %ecx callq hipMemcpy movq 96(%rsp), %rdi movq 80(%rsp), %rsi # 8-byte Reload movq %rbx, %rdx movl $1, %ecx callq hipMemcpy leal 31(%r14), %eax testl %r14d, %r14d cmovnsl %r14d, %eax sarl $5, %eax movq %rax, %r15 shlq $32, %r15 orq %rax, %r15 movq 32(%rsp), %rdi movq %r14, %rbx xorl %r14d, %r14d xorl %esi, %esi callq hipEventRecord movabsq $137438953504, %rdx # imm = 0x2000000020 movq %r15, %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB7_10 # %bb.9: movq 56(%rsp), %rax movq 48(%rsp), %rcx movq 40(%rsp), %rdx movl %ebx, 28(%rsp) movq %rax, 168(%rsp) movq %rcx, 160(%rsp) movq %rdx, 152(%rsp) leaq 28(%rsp), %rax movq %rax, 176(%rsp) leaq 168(%rsp), %rax movq %rax, 184(%rsp) leaq 160(%rsp), %rax movq %rax, 192(%rsp) leaq 152(%rsp), %rax movq %rax, 200(%rsp) leaq 136(%rsp), %rdi leaq 120(%rsp), %rsi leaq 112(%rsp), %rdx leaq 104(%rsp), %rcx callq __hipPopCallConfiguration movq 136(%rsp), %rsi movl 144(%rsp), %edx movq 120(%rsp), %rcx movl 128(%rsp), %r8d leaq 176(%rsp), %r9 movl $_Z6MatMuliPcS_Pi, %edi pushq 104(%rsp) .cfi_adjust_cfa_offset 8 pushq 120(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB7_10: movq 16(%rsp), %rdi xorl %esi, %esi callq hipEventRecord callq hipDeviceSynchronize movq 16(%rsp), %rdi callq hipEventSynchronize movq 32(%rsp), %rsi movq 16(%rsp), %rdx leaq 12(%rsp), %rdi callq hipEventElapsedTime movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.5, %edi movb $1, %al callq printf jmp .LBB7_11 .LBB7_1: movl $.Lstr, %edi callq puts@PLT movl $1, %r14d .LBB7_11: movl %r14d, %eax addq $216, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end7: .size main, .Lfunc_end7-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB8_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB8_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6MatMuliPcS_Pi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12SharedMatMuliPcS_Pi, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end8: .size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB9_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB9_2: retq .Lfunc_end9: .size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor .cfi_endproc # -- End function .type _Z6MatMuliPcS_Pi,@object # @_Z6MatMuliPcS_Pi .section .rodata,"a",@progbits .globl _Z6MatMuliPcS_Pi .p2align 3, 0x0 _Z6MatMuliPcS_Pi: .quad _Z21__device_stub__MatMuliPcS_Pi .size _Z6MatMuliPcS_Pi, 8 .type _Z12SharedMatMuliPcS_Pi,@object # @_Z12SharedMatMuliPcS_Pi .globl _Z12SharedMatMuliPcS_Pi .p2align 3, 0x0 _Z12SharedMatMuliPcS_Pi: .quad _Z27__device_stub__SharedMatMuliPcS_Pi .size _Z12SharedMatMuliPcS_Pi, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "FALLO %d\t%d!=%d\n" .size .L.str, 17 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "mat %s:\n" .size .L.str.1, 9 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%i " .size .L.str.2, 4 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "%f\n" .size .L.str.5, 4 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6MatMuliPcS_Pi" .size .L__unnamed_1, 17 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z12SharedMatMuliPcS_Pi" .size .L__unnamed_2, 24 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Ejecute como ./prog N" .size .Lstr, 22 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__MatMuliPcS_Pi .addrsig_sym _Z27__device_stub__SharedMatMuliPcS_Pi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6MatMuliPcS_Pi .addrsig_sym _Z12SharedMatMuliPcS_Pi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <cuda_runtime.h> #include <stdio.h> /* Naive kernel for transposing a rectangular host array. */ const int TILE_DIM = 32; #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } void initialData(float *in, const int size) { // initialise matrix for (int i = 0; i < size; i++) { in[i] = (float)(rand() & 0xFF) / 10.0f; } return; } void printData(float *in, const int size) { // print matrix for (int i = 0; i < size; i++) { printf("%3.0f ", in[i]); } printf("\n"); return; } void checkResult(float *hostRef, float *gpuRef, int rows, int cols) { // check that transposed matrix is correct double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { int index = i*cols + j; if (abs(hostRef[index] - gpuRef[index]) > epsilon) { match = 0; printf("different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]); break; } } if (!match) break; } if (!match) printf("Arrays do not match.\n\n"); } void transposeHost(float *out, float *in, const int nrows, const int ncols) { // transpose using CPU for (int iy = 0; iy < ncols; ++iy) { for (int ix = 0; ix < nrows; ++ix) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } } __global__ void justcopy(float *out, float *in, const int nrows, const int ncols) { // routine to copy data from one matrix to another -- no transposition done // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // copy data as is with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[ix * ncols + iy]; } } __global__ void naivetranspose(float *out, float *in, const int nrows, const int ncols) { // naive routine to transpose a matrix -- no optimisations considered // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // transpose with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } __global__ void lessnaivetranspose(float *out, float *in, const int nrows, const int ncols) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.x + threadIdx.y; unsigned int width = gridDim.x * blockDim.x; for (int j=0; j<blockDim.x; j+=blockDim.y) { out[x*width + (y+j)] = in[(y+j)*width + x]; } } __global__ void opttranspose(float *out, float *in, const int nrows, const int ncols) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.x + threadIdx.y; int width = gridDim.x * blockDim.x; for (int j = 0; j < blockDim.x; j += blockDim.y) tile[threadIdx.y+j][threadIdx.x] = in[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * blockDim.x + threadIdx.x; // transpose block offset y = blockIdx.x * blockDim.x + threadIdx.y; for (int j = 0; j < blockDim.x; j += blockDim.y) out[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting transpose at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // initialise CUDA timing float milli; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); bool iprint = 0; // set up array size 1024 int nrows = 1 << 10; int ncols = 1 << 10; int blockx = 16; int blocky = 16; // interpret command line arguments if present if (argc > 1) iprint = atoi(argv[1]); if (argc > 2) blockx = atoi(argv[2]); if (argc > 3) blocky = atoi(argv[3]); if (argc > 4) nrows = atoi(argv[4]); if (argc > 5) ncols = atoi(argv[5]); printf(" with matrix nrows %d ncols %d\n", nrows, ncols); size_t ncells = nrows * ncols; size_t nBytes = ncells * sizeof(float); // execution configuration dim3 block (blockx, blocky); dim3 grid ((nrows + block.x - 1) / block.x, (ncols + block.y - 1) / block.y); dim3 grid_opt (nrows/block.x, ncols/block.x); // allocate host memory float *h_A = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nrows * ncols); // transpose at host side transposeHost(hostRef, h_A, nrows, ncols); // allocate device memory float *d_A, *d_C; CHECK(cudaMalloc((float**)&d_A, nBytes)); CHECK(cudaMalloc((float**)&d_C, nBytes)); // copy data from host to device CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice)); // execute justcopy kernel CHECK(cudaMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); cudaEventRecord(start); // start timing justcopy<<<grid, block>>>(d_C, d_A, nrows, ncols); CHECK(cudaDeviceSynchronize()); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprint) printData(gpuRef, nrows * ncols); float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); // convert bytes and millisec to GB/sec // ibnd = 2 * ncells * sizeof(float) / 1e9 / milli/1000; printf("justcopy kernel elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid.x, grid.y, block.x, block.y, ibnd); // execute naive transpose kernel CHECK(cudaMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); cudaEventRecord(start); // start timing // naivetranspose<<<grid, block>>>(d_C, d_A, nrows, ncols); // lessnaivetranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols); opttranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols); CHECK(cudaDeviceSynchronize()); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprint) printData(gpuRef, ncells); ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); printf("optimised transpose elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid_opt.x, grid_opt.y, block.x, block.y, ibnd); checkResult(hostRef, gpuRef, ncols, nrows); // free host and device memory CHECK(cudaFree(d_A)); CHECK(cudaFree(d_C)); free(h_A); free(hostRef); free(gpuRef); // reset device CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
code for sm_80 Function : _Z12opttransposePfS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */ /* 0x000e220000002500 */ /*0020*/ ISETP.NE.AND P1, PT, RZ, c[0x0][0x0], PT ; /* 0x00000000ff007a0c */ /* 0x000fe20003f25270 */ /*0030*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe40000000800 */ /*0040*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e620000002100 */ /*0050*/ ULDC UR5, c[0x0][0xc] ; /* 0x0000030000057ab9 */ /* 0x000fe40000000800 */ /*0060*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fe20000000a00 */ /*0070*/ S2R R8, SR_CTAID.Y ; /* 0x0000000000087919 */ /* 0x000ea20000002600 */ /*0080*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */ /* 0x000fc6000f8e023f */ /*0090*/ S2R R11, SR_TID.Y ; /* 0x00000000000b7919 */ /* 0x000ee40000002200 */ /*00a0*/ @!P1 BRA 0x190 ; /* 0x000000e000009947 */ /* 0x000fea0003800000 */ /*00b0*/ HFMA2.MMA R5, -RZ, RZ, 0, 0 ; /* 0x00000000ff057435 */ /* 0x000fe200000001ff */ /*00c0*/ IMAD R0, R8, c[0x0][0x0], R11 ; /* 0x0000000008007a24 */ /* 0x00cfe400078e020b */ /*00d0*/ IMAD R4, R7, c[0x0][0x0], R9 ; /* 0x0000000007047a24 */ /* 0x003fce00078e0209 */ /*00e0*/ IMAD.IADD R3, R0, 0x1, R5 ; /* 0x0000000100037824 */ /* 0x000fe200078e0205 */ /*00f0*/ MOV R2, 0x4 ; /* 0x0000000400027802 */ /* 0x001fc60000000f00 */ /*0100*/ IMAD R3, R3, UR4, R4 ; /* 0x0000000403037c24 */ /* 0x000fc8000f8e0204 */ /*0110*/ IMAD.WIDE R2, R3, R2, c[0x0][0x168] ; /* 0x00005a0003027625 */ /* 0x000fcc00078e0202 */ /*0120*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */ /* 0x000ea2000c1e1900 */ /*0130*/ IMAD.IADD R6, R11, 0x1, R5 ; /* 0x000000010b067824 */ /* 0x000fe200078e0205 */ /*0140*/ IADD3 R5, R5, c[0x0][0x4], RZ ; /* 0x0000010005057a10 */ /* 0x000fc80007ffe0ff */ /*0150*/ LEA R13, R6, R9, 0x5 ; /* 0x00000009060d7211 */ /* 0x000fe400078e28ff */ /*0160*/ ISETP.GE.U32.AND P0, PT, R5, c[0x0][0x0], PT ; /* 0x0000000005007a0c */ /* 0x000fc60003f06070 */ /*0170*/ STS [R13.X4], R2 ; /* 0x000000020d007388 */ /* 0x0041f40000004800 */ /*0180*/ @!P0 BRA 0xe0 ; /* 0xffffff5000008947 */ /* 0x000fea000383ffff */ /*0190*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*01a0*/ @!P1 EXIT ; /* 0x000000000000994d */ /* 0x000fea0003800000 */ /*01b0*/ IMAD R0, R8, c[0x0][0x0], R9 ; /* 0x0000000008007a24 */ /* 0x006fe200078e0209 */ /*01c0*/ MOV R4, RZ ; /* 0x000000ff00047202 */ /* 0x000fe20000000f00 */ /*01d0*/ IMAD.MOV.U32 R6, RZ, RZ, 0x4 ; /* 0x00000004ff067424 */ /* 0x000fc600078e00ff */ /*01e0*/ IADD3 R2, R11, R4, RZ ; /* 0x000000040b027210 */ /* 0x009fe40007ffe0ff */ /*01f0*/ IADD3 R4, R4, c[0x0][0x4], RZ ; /* 0x0000010004047a10 */ /* 0x000fc40007ffe0ff */ /*0200*/ LEA R5, R9, R2, 0x5 ; /* 0x0000000209057211 */ /* 0x000fe200078e28ff */ /*0210*/ IMAD R3, R7, c[0x0][0x0], R2 ; /* 0x0000000007037a24 */ /* 0x000fe200078e0202 */ /*0220*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x0], PT ; /* 0x0000000004007a0c */ /* 0x000fc60003f06070 */ /*0230*/ IMAD R3, R3, UR4, R0 ; /* 0x0000000403037c24 */ /* 0x000fe2000f8e0200 */ /*0240*/ LDS R5, [R5.X4] ; /* 0x0000000005057984 */ /* 0x000e260000004800 */ /*0250*/ IMAD.WIDE R2, R3, R6, c[0x0][0x160] ; /* 0x0000580003027625 */ /* 0x000fca00078e0206 */ /*0260*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x0011e2000c101906 */ /*0270*/ @!P0 BRA 0x1e0 ; /* 0xffffff6000008947 */ /* 0x000fea000383ffff */ /*0280*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0290*/ BRA 0x290; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z18lessnaivetransposePfS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x0], PT ; /* 0x00000000ff007a0c */ /* 0x000fc60003f05270 */ /*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e740000002100 */ /*0040*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0050*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */ /* 0x000ea20000002200 */ /*0060*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe20000000800 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x003fe200078e0203 */ /*0080*/ ULDC UR5, c[0x0][0xc] ; /* 0x0000030000057ab9 */ /* 0x000fe20000000800 */ /*0090*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000ea20000002600 */ /*00a0*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */ /* 0x000fe2000f8e023f */ /*00b0*/ HFMA2.MMA R8, -RZ, RZ, 0, 0 ; /* 0x00000000ff087435 */ /* 0x000fe200000001ff */ /*00c0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fc80000000a00 */ /*00d0*/ IMAD R6, R0, UR4, RZ ; /* 0x0000000400067c24 */ /* 0x000fe4000f8e02ff */ /*00e0*/ IMAD R7, R2, c[0x0][0x0], R7 ; /* 0x0000000002077a24 */ /* 0x004fca00078e0207 */ /*00f0*/ IADD3 R5, R7, R8, RZ ; /* 0x0000000807057210 */ /* 0x001fe40007ffe0ff */ /*0100*/ MOV R4, 0x4 ; /* 0x0000000400047802 */ /* 0x000fc60000000f00 */ /*0110*/ IMAD R2, R5, UR4, R0 ; /* 0x0000000405027c24 */ /* 0x000fc8000f8e0200 */ /*0120*/ IMAD.WIDE.U32 R2, R2, R4, c[0x0][0x168] ; /* 0x00005a0002027625 */ /* 0x000fcc00078e0004 */ /*0130*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */ /* 0x000ea2000c1e1900 */ /*0140*/ IADD3 R8, R8, c[0x0][0x4], RZ ; /* 0x0000010008087a10 */ /* 0x000fe40007ffe0ff */ /*0150*/ IADD3 R5, R6, R5, RZ ; /* 0x0000000506057210 */ /* 0x000fe40007ffe0ff */ /*0160*/ ISETP.GE.U32.AND P0, PT, R8, c[0x0][0x0], PT ; /* 0x0000000008007a0c */ /* 0x000fc60003f06070 */ /*0170*/ IMAD.WIDE.U32 R4, R5, R4, c[0x0][0x160] ; /* 0x0000580005047625 */ /* 0x000fca00078e0004 */ /*0180*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x0041ea000c101906 */ /*0190*/ @!P0 BRA 0xf0 ; /* 0xffffff5000008947 */ /* 0x000fea000383ffff */ /*01a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01b0*/ BRA 0x1b0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z14naivetransposePfS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */ /* 0x000e280000002600 */ /*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R5, R5, c[0x0][0x4], R2 ; /* 0x0000010005057a24 */ /* 0x001fca00078e0202 */ /*0060*/ ISETP.GE.U32.AND P0, PT, R5, c[0x0][0x174], PT ; /* 0x00005d0005007a0c */ /* 0x000fe20003f06070 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0203 */ /*0080*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */ /* 0x000fda0000706470 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */ /* 0x000fe200000001ff */ /*00b0*/ IMAD R2, R5, c[0x0][0x170], R0 ; /* 0x00005c0005027a24 */ /* 0x000fe200078e0200 */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE.U32 R2, R2, R4, c[0x0][0x168] ; /* 0x00005a0002027625 */ /* 0x000fcc00078e0004 */ /*00e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ IMAD R5, R0, c[0x0][0x174], R5 ; /* 0x00005d0000057a24 */ /* 0x000fc800078e0205 */ /*0100*/ IMAD.WIDE.U32 R4, R5, R4, c[0x0][0x160] ; /* 0x0000580005047625 */ /* 0x000fca00078e0004 */ /*0110*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe2000c101904 */ /*0120*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0130*/ BRA 0x130; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z8justcopyPfS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002600 */ /*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */ /* 0x001fca00078e0202 */ /*0060*/ ISETP.GE.U32.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */ /* 0x000fe20003f06070 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0205 */ /*0080*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */ /* 0x000fda0000706470 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*00b0*/ IMAD R0, R0, c[0x0][0x174], R3 ; /* 0x00005d0000007a24 */ /* 0x000fe200078e0203 */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x168] ; /* 0x00005a0000027625 */ /* 0x000fcc00078e0005 */ /*00e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ IMAD.WIDE.U32 R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */ /* 0x000fca00078e0005 */ /*0100*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe2000c101904 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cuda_runtime.h> #include <stdio.h> /* Naive kernel for transposing a rectangular host array. */ const int TILE_DIM = 32; #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } void initialData(float *in, const int size) { // initialise matrix for (int i = 0; i < size; i++) { in[i] = (float)(rand() & 0xFF) / 10.0f; } return; } void printData(float *in, const int size) { // print matrix for (int i = 0; i < size; i++) { printf("%3.0f ", in[i]); } printf("\n"); return; } void checkResult(float *hostRef, float *gpuRef, int rows, int cols) { // check that transposed matrix is correct double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { int index = i*cols + j; if (abs(hostRef[index] - gpuRef[index]) > epsilon) { match = 0; printf("different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]); break; } } if (!match) break; } if (!match) printf("Arrays do not match.\n\n"); } void transposeHost(float *out, float *in, const int nrows, const int ncols) { // transpose using CPU for (int iy = 0; iy < ncols; ++iy) { for (int ix = 0; ix < nrows; ++ix) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } } __global__ void justcopy(float *out, float *in, const int nrows, const int ncols) { // routine to copy data from one matrix to another -- no transposition done // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // copy data as is with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[ix * ncols + iy]; } } __global__ void naivetranspose(float *out, float *in, const int nrows, const int ncols) { // naive routine to transpose a matrix -- no optimisations considered // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // transpose with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } __global__ void lessnaivetranspose(float *out, float *in, const int nrows, const int ncols) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.x + threadIdx.y; unsigned int width = gridDim.x * blockDim.x; for (int j=0; j<blockDim.x; j+=blockDim.y) { out[x*width + (y+j)] = in[(y+j)*width + x]; } } __global__ void opttranspose(float *out, float *in, const int nrows, const int ncols) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.x + threadIdx.y; int width = gridDim.x * blockDim.x; for (int j = 0; j < blockDim.x; j += blockDim.y) tile[threadIdx.y+j][threadIdx.x] = in[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * blockDim.x + threadIdx.x; // transpose block offset y = blockIdx.x * blockDim.x + threadIdx.y; for (int j = 0; j < blockDim.x; j += blockDim.y) out[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting transpose at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // initialise CUDA timing float milli; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); bool iprint = 0; // set up array size 1024 int nrows = 1 << 10; int ncols = 1 << 10; int blockx = 16; int blocky = 16; // interpret command line arguments if present if (argc > 1) iprint = atoi(argv[1]); if (argc > 2) blockx = atoi(argv[2]); if (argc > 3) blocky = atoi(argv[3]); if (argc > 4) nrows = atoi(argv[4]); if (argc > 5) ncols = atoi(argv[5]); printf(" with matrix nrows %d ncols %d\n", nrows, ncols); size_t ncells = nrows * ncols; size_t nBytes = ncells * sizeof(float); // execution configuration dim3 block (blockx, blocky); dim3 grid ((nrows + block.x - 1) / block.x, (ncols + block.y - 1) / block.y); dim3 grid_opt (nrows/block.x, ncols/block.x); // allocate host memory float *h_A = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nrows * ncols); // transpose at host side transposeHost(hostRef, h_A, nrows, ncols); // allocate device memory float *d_A, *d_C; CHECK(cudaMalloc((float**)&d_A, nBytes)); CHECK(cudaMalloc((float**)&d_C, nBytes)); // copy data from host to device CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice)); // execute justcopy kernel CHECK(cudaMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); cudaEventRecord(start); // start timing justcopy<<<grid, block>>>(d_C, d_A, nrows, ncols); CHECK(cudaDeviceSynchronize()); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprint) printData(gpuRef, nrows * ncols); float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); // convert bytes and millisec to GB/sec // ibnd = 2 * ncells * sizeof(float) / 1e9 / milli/1000; printf("justcopy kernel elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid.x, grid.y, block.x, block.y, ibnd); // execute naive transpose kernel CHECK(cudaMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); cudaEventRecord(start); // start timing // naivetranspose<<<grid, block>>>(d_C, d_A, nrows, ncols); // lessnaivetranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols); opttranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols); CHECK(cudaDeviceSynchronize()); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprint) printData(gpuRef, ncells); ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); printf("optimised transpose elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid_opt.x, grid_opt.y, block.x, block.y, ibnd); checkResult(hostRef, gpuRef, ncols, nrows); // free host and device memory CHECK(cudaFree(d_A)); CHECK(cudaFree(d_C)); free(h_A); free(hostRef); free(gpuRef); // reset device CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
.file "tmpxft_0010989e_00000000-6_junetranspose-opt.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2064: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2064: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z11initialDataPfi .type _Z11initialDataPfi, @function _Z11initialDataPfi: .LFB2057: .cfi_startproc endbr64 testl %esi, %esi jle .L8 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi,4), %rbp .L5: call rand@PLT movzbl %al, %eax pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 divss .LC0(%rip), %xmm0 movss %xmm0, (%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L5 addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L8: .cfi_restore 3 .cfi_restore 6 ret .cfi_endproc .LFE2057: .size _Z11initialDataPfi, .-_Z11initialDataPfi .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "%3.0f " .LC2: .string "\n" .text .globl _Z9printDataPfi .type _Z9printDataPfi, @function _Z9printDataPfi: .LFB2058: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 testl %esi, %esi jle .L12 movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi,4), %r12 leaq .LC1(%rip), %rbp .L13: pxor %xmm0, %xmm0 cvtss2sd (%rbx), %xmm0 movq %rbp, %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $4, %rbx cmpq %r12, %rbx jne .L13 .L12: leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z9printDataPfi, .-_Z9printDataPfi .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC5: .string "different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n" .section .rodata.str1.1 .LC6: .string "Arrays do not match.\n\n" .text .globl _Z11checkResultPfS_ii .type _Z11checkResultPfS_ii, @function _Z11checkResultPfS_ii: .LFB2059: .cfi_startproc endbr64 testl %edx, %edx jle .L28 movq %rdi, %r8 movq %rsi, %r9 movl %edx, %r10d movl %ecx, %r11d movl $0, %esi movl $0, %edi movslq %ecx, %rdx movss .LC3(%rip), %xmm4 movsd .LC4(%rip), %xmm3 .L29: testl %r11d, %r11d jle .L33 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 jmp .L22 .L34: cvtss2sd %xmm0, %xmm0 cvtss2sd %xmm1, %xmm1 leal (%rsi,%rcx), %r8d movl %edi, %edx leaq .LC5(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L16: popq %rbx .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state addl $1, %edi addl %r11d, %esi cmpl %edi, %r10d je .L16 testl %r11d, %r11d jle .L23 .L22: movslq %esi, %rax salq $2, %rax leaq (%r8,%rax), %rbx addq %r9, %rax movl $0, %ecx .L21: movss (%rbx,%rcx,4), %xmm0 movss (%rax,%rcx,4), %xmm1 movaps %xmm0, %xmm2 subss %xmm1, %xmm2 andps %xmm4, %xmm2 cvtss2sd %xmm2, %xmm2 comisd %xmm3, %xmm2 ja .L34 addq $1, %rcx cmpq %rdx, %rcx jne .L21 jmp .L23 .L33: .cfi_def_cfa_offset 8 .cfi_restore 3 addl $1, %edi addl %r11d, %esi cmpl %edi, %r10d jne .L29 .L28: ret .cfi_endproc .LFE2059: .size _Z11checkResultPfS_ii, .-_Z11checkResultPfS_ii .globl _Z13transposeHostPfS_ii .type _Z13transposeHostPfS_ii, @function _Z13transposeHostPfS_ii: .LFB2060: .cfi_startproc endbr64 testl %ecx, %ecx jle .L43 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 movq %rsi, %rbx movl %edx, %r11d movslq %ecx, %r8 salq $2, %r8 movl $0, %r10d movl $0, %r9d movslq %edx, %rbp jmp .L37 .L39: movslq %r10d, %rdx leaq (%rbx,%rdx,4), %rax addq %rbp, %rdx leaq (%rbx,%rdx,4), %rsi movq %rdi, %rdx .L38: movss (%rax), %xmm0 movss %xmm0, (%rdx) addq $4, %rax addq %r8, %rdx cmpq %rsi, %rax jne .L38 .L40: addl $1, %r9d addq $4, %rdi addl %r11d, %r10d cmpl %r9d, %ecx je .L35 .L37: testl %r11d, %r11d jg .L39 jmp .L40 .L35: popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L43: .cfi_restore 3 .cfi_restore 6 ret .cfi_endproc .LFE2060: .size _Z13transposeHostPfS_ii, .-_Z13transposeHostPfS_ii .globl _Z31__device_stub__Z8justcopyPfS_iiPfS_ii .type _Z31__device_stub__Z8justcopyPfS_iiPfS_ii, @function _Z31__device_stub__Z8justcopyPfS_iiPfS_ii: .LFB2086: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L50 .L46: movq 136(%rsp), %rax subq %fs:40, %rax jne .L51 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L50: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8justcopyPfS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L46 .L51: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _Z31__device_stub__Z8justcopyPfS_iiPfS_ii, .-_Z31__device_stub__Z8justcopyPfS_iiPfS_ii .globl _Z8justcopyPfS_ii .type _Z8justcopyPfS_ii, @function _Z8justcopyPfS_ii: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z8justcopyPfS_iiPfS_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _Z8justcopyPfS_ii, .-_Z8justcopyPfS_ii .globl _Z38__device_stub__Z14naivetransposePfS_iiPfS_ii .type _Z38__device_stub__Z14naivetransposePfS_iiPfS_ii, @function _Z38__device_stub__Z14naivetransposePfS_iiPfS_ii: .LFB2088: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L58 .L54: movq 136(%rsp), %rax subq %fs:40, %rax jne .L59 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L58: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14naivetransposePfS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L54 .L59: call __stack_chk_fail@PLT .cfi_endproc .LFE2088: .size _Z38__device_stub__Z14naivetransposePfS_iiPfS_ii, .-_Z38__device_stub__Z14naivetransposePfS_iiPfS_ii .globl _Z14naivetransposePfS_ii .type _Z14naivetransposePfS_ii, @function _Z14naivetransposePfS_ii: .LFB2089: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z38__device_stub__Z14naivetransposePfS_iiPfS_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _Z14naivetransposePfS_ii, .-_Z14naivetransposePfS_ii .globl _Z42__device_stub__Z18lessnaivetransposePfS_iiPfS_ii .type _Z42__device_stub__Z18lessnaivetransposePfS_iiPfS_ii, @function _Z42__device_stub__Z18lessnaivetransposePfS_iiPfS_ii: .LFB2090: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L66 .L62: movq 136(%rsp), %rax subq %fs:40, %rax jne .L67 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L66: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18lessnaivetransposePfS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L62 .L67: call __stack_chk_fail@PLT .cfi_endproc .LFE2090: .size _Z42__device_stub__Z18lessnaivetransposePfS_iiPfS_ii, .-_Z42__device_stub__Z18lessnaivetransposePfS_iiPfS_ii .globl _Z18lessnaivetransposePfS_ii .type _Z18lessnaivetransposePfS_ii, @function _Z18lessnaivetransposePfS_ii: .LFB2091: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z18lessnaivetransposePfS_iiPfS_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2091: .size _Z18lessnaivetransposePfS_ii, .-_Z18lessnaivetransposePfS_ii .globl _Z36__device_stub__Z12opttransposePfS_iiPfS_ii .type _Z36__device_stub__Z12opttransposePfS_iiPfS_ii, @function _Z36__device_stub__Z12opttransposePfS_iiPfS_ii: .LFB2092: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L74 .L70: movq 136(%rsp), %rax subq %fs:40, %rax jne .L75 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L74: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12opttransposePfS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L70 .L75: call __stack_chk_fail@PLT .cfi_endproc .LFE2092: .size _Z36__device_stub__Z12opttransposePfS_iiPfS_ii, .-_Z36__device_stub__Z12opttransposePfS_iiPfS_ii .globl _Z12opttransposePfS_ii .type _Z12opttransposePfS_ii, @function _Z12opttransposePfS_ii: .LFB2093: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z12opttransposePfS_iiPfS_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2093: .size _Z12opttransposePfS_ii, .-_Z12opttransposePfS_ii .section .rodata.str1.8 .align 8 .LC7: .string "/home/ubuntu/Datasets/stackv2/train-structured/RhodesCS2016/gpgpu/master/glinklater/exam/junetranspose-opt.cu" .section .rodata.str1.1 .LC8: .string "Error: %s:%d, " .LC9: .string "code: %d, reason: %s\n" .LC10: .string "%s starting transpose at " .LC11: .string "device %d: %s " .section .rodata.str1.8 .align 8 .LC12: .string " with matrix nrows %d ncols %d\n" .align 8 .LC15: .string "justcopy kernel elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n" .align 8 .LC16: .string "optimised transpose elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n" .text .globl main .type main, @function main: .LFB2061: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $1192, %rsp .cfi_def_cfa_offset 1248 movl %edi, %ebp movq %rsi, %rbx movq %fs:40, %rax movq %rax, 1176(%rsp) xorl %eax, %eax leaq 144(%rsp), %rdi movl $0, %esi call cudaGetDeviceProperties_v2@PLT testl %eax, %eax jne .L107 movq (%rbx), %rdx leaq .LC10(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq 144(%rsp), %rcx movl $0, %edx leaq .LC11(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $0, %edi call cudaSetDevice@PLT movl %eax, %r12d testl %eax, %eax jne .L108 leaq 72(%rsp), %rdi call cudaEventCreate@PLT leaq 80(%rsp), %rdi call cudaEventCreate@PLT cmpl $1, %ebp jg .L109 movl $16, %r12d movl $16, %r15d movl $1024, %r13d movb $0, 51(%rsp) movl $1024, %r14d .L81: movl %r14d, %ecx movl %r13d, %edx leaq .LC12(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl %r14d, %eax imull %r13d, %eax movl %eax, 52(%rsp) movslq %eax, %rcx movq %rcx, 56(%rsp) leaq 0(,%rcx,4), %rbx movl %r12d, 12(%rsp) movl %r15d, %ebp movl $1, 116(%rsp) leal -1(%r14,%r12), %eax movl $0, %edx divl %r12d movl %eax, 24(%rsp) leal -1(%r13,%r15), %eax movl $0, %edx divl %r15d movl %eax, 28(%rsp) movl $1, 128(%rsp) movl %r14d, %eax movl $0, %edx divl %r15d movl %eax, 40(%rsp) movl %r13d, %eax movl $0, %edx divl %r15d movl %eax, 44(%rsp) movl $1, 140(%rsp) movq %rbx, %rdi call malloc@PLT movq %rax, %r15 movq %rax, 32(%rsp) movq %rbx, %rdi call malloc@PLT movq %rax, 16(%rsp) movq %rbx, %rdi call malloc@PLT movq %rax, %r12 movl 52(%rsp), %esi movq %r15, %rdi call _Z11initialDataPfi movl %r14d, %ecx movl %r13d, %edx movq %r15, %rsi movq 16(%rsp), %rdi call _Z13transposeHostPfS_ii leaq 88(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT movl %eax, %r15d testl %eax, %eax jne .L110 leaq 96(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT movl %eax, %r15d testl %eax, %eax jne .L111 movl $1, %ecx movq %rbx, %rdx movq 32(%rsp), %rsi movq 88(%rsp), %rdi call cudaMemcpy@PLT movl %eax, %r15d testl %eax, %eax jne .L112 movq %rbx, %rdx movl $0, %esi movq 96(%rsp), %rdi call cudaMemset@PLT movl %eax, %r15d testl %eax, %eax jne .L113 movq %rbx, %rcx movq %rbx, %rdx movl $0, %esi movq %r12, %rdi call __memset_chk@PLT movl $0, %esi movq 72(%rsp), %rdi call cudaEventRecord@PLT movl 28(%rsp), %eax movl %eax, 120(%rsp) movl 24(%rsp), %eax movl %eax, 124(%rsp) movl %ebp, 108(%rsp) movl 12(%rsp), %eax movl %eax, 112(%rsp) movl 116(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 108(%rsp), %rdx movq 120(%rsp), %rdi movl 128(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L114 .L86: call cudaDeviceSynchronize@PLT movl %eax, %r15d testl %eax, %eax jne .L115 movl $0, %esi movq 80(%rsp), %rdi call cudaEventRecord@PLT movq 80(%rsp), %rdi call cudaEventSynchronize@PLT leaq 68(%rsp), %rdi movq 80(%rsp), %rdx movq 72(%rsp), %rsi call cudaEventElapsedTime@PLT movl $2, %ecx movq %rbx, %rdx movq 96(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movl %eax, %r15d testl %eax, %eax jne .L116 cmpb $0, 51(%rsp) jne .L117 .L89: movq 56(%rsp), %rax salq $3, %rax js .L90 pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 .L91: movapd %xmm0, %xmm3 mulsd .LC13(%rip), %xmm3 movsd %xmm3, 56(%rsp) movss 68(%rsp), %xmm0 movaps %xmm0, %xmm2 divss .LC14(%rip), %xmm2 cvtss2sd %xmm2, %xmm2 divsd %xmm2, %xmm3 pxor %xmm1, %xmm1 cvtsd2ss %xmm3, %xmm1 cvtss2sd %xmm0, %xmm0 cvtss2sd %xmm1, %xmm1 movl 12(%rsp), %r9d movl %ebp, %r8d movl 24(%rsp), %ecx movl 28(%rsp), %edx leaq .LC15(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT movq %rbx, %rdx movl $0, %esi movq 96(%rsp), %rdi call cudaMemset@PLT movl %eax, %r15d testl %eax, %eax jne .L118 movq %rbx, %rcx movq %rbx, %rdx movl $0, %esi movq %r12, %rdi call __memset_chk@PLT movl $0, %esi movq 72(%rsp), %rdi call cudaEventRecord@PLT movl 44(%rsp), %eax movl %eax, 132(%rsp) movl 40(%rsp), %eax movl %eax, 136(%rsp) movl 116(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 108(%rsp), %rdx movq 132(%rsp), %rdi movl 140(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L119 .L93: call cudaDeviceSynchronize@PLT movl %eax, %r15d testl %eax, %eax jne .L120 movl $0, %esi movq 80(%rsp), %rdi call cudaEventRecord@PLT movq 80(%rsp), %rdi call cudaEventSynchronize@PLT leaq 68(%rsp), %rdi movq 80(%rsp), %rdx movq 72(%rsp), %rsi call cudaEventElapsedTime@PLT movl $2, %ecx movq %rbx, %rdx movq 96(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movl %eax, %ebx testl %eax, %eax jne .L121 cmpb $0, 51(%rsp) jne .L122 .L96: movss 68(%rsp), %xmm0 movaps %xmm0, %xmm2 divss .LC14(%rip), %xmm2 cvtss2sd %xmm2, %xmm2 movsd 56(%rsp), %xmm1 divsd %xmm2, %xmm1 cvtsd2ss %xmm1, %xmm1 cvtss2sd %xmm0, %xmm0 cvtss2sd %xmm1, %xmm1 movl 12(%rsp), %r9d movl %ebp, %r8d movl 40(%rsp), %ecx movl 44(%rsp), %edx leaq .LC16(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT movl %r13d, %ecx movl %r14d, %edx movq %r12, %rsi movq 16(%rsp), %rdi call _Z11checkResultPfS_ii movq 88(%rsp), %rdi call cudaFree@PLT movl %eax, %ebx testl %eax, %eax jne .L123 movq 96(%rsp), %rdi call cudaFree@PLT movl %eax, %ebx testl %eax, %eax jne .L124 movq 32(%rsp), %rdi call free@PLT movq 16(%rsp), %rdi call free@PLT movq %r12, %rdi call free@PLT call cudaDeviceReset@PLT movl %eax, %ebx testl %eax, %eax jne .L125 movq 1176(%rsp), %rax subq %fs:40, %rax jne .L126 movl $0, %eax addq $1192, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L107: .cfi_restore_state movl %eax, %r12d movl $126, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r12d, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %r12d, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L108: movl $129, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r12d, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %r12d, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L109: movq 8(%rbx), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT testl %eax, %eax setne 51(%rsp) cmpl $2, %ebp jg .L127 movl $16, %r12d movl $16, %r15d movl $1024, %r13d movl $1024, %r14d jmp .L81 .L127: movq 16(%rbx), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movl %eax, %r15d cmpl $3, %ebp jle .L103 movq 24(%rbx), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movl %eax, %r12d cmpl $4, %ebp jg .L128 movl $1024, %r13d movl $1024, %r14d jmp .L81 .L128: movq 32(%rbx), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movl %eax, %r13d movl $1024, %r14d cmpl $5, %ebp jle .L81 movq 40(%rbx), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movl %eax, %r14d jmp .L81 .L103: movl $16, %r12d movl $1024, %r13d movl $1024, %r14d jmp .L81 .L110: movl $179, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %r15d, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L111: movl $180, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %r15d, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L112: movl $183, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %r15d, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L113: movl $186, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %r15d, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L114: movl %r14d, %ecx movl %r13d, %edx movq 88(%rsp), %rsi movq 96(%rsp), %rdi call _Z31__device_stub__Z8justcopyPfS_iiPfS_ii jmp .L86 .L115: movl $191, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %r15d, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L116: movl $196, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %r15d, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L117: movl 52(%rsp), %esi movq %r12, %rdi call _Z9printDataPfi jmp .L89 .L90: shrq %rax pxor %xmm0, %xmm0 cvtsi2sdq %rax, %xmm0 addsd %xmm0, %xmm0 jmp .L91 .L118: movl $205, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %r15d, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L119: movl %r14d, %ecx movl %r13d, %edx movq 88(%rsp), %rsi movq 96(%rsp), %rdi call _Z36__device_stub__Z12opttransposePfS_iiPfS_ii jmp .L93 .L120: movl $212, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %r15d, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %r15d, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L121: movl $217, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %ebx, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %ebx, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L122: movl 52(%rsp), %esi movq %r12, %rdi call _Z9printDataPfi jmp .L96 .L123: movl $226, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %ebx, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %ebx, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L124: movl $227, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %ebx, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %ebx, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L125: movl $233, %r8d leaq .LC7(%rip), %rcx leaq .LC8(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl %ebx, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl %ebx, %ecx leaq .LC9(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L126: call __stack_chk_fail@PLT .cfi_endproc .LFE2061: .size main, .-main .section .rodata.str1.1 .LC17: .string "_Z12opttransposePfS_ii" .LC18: .string "_Z18lessnaivetransposePfS_ii" .LC19: .string "_Z14naivetransposePfS_ii" .LC20: .string "_Z8justcopyPfS_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2095: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC17(%rip), %rdx movq %rdx, %rcx leaq _Z12opttransposePfS_ii(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC18(%rip), %rdx movq %rdx, %rcx leaq _Z18lessnaivetransposePfS_ii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC19(%rip), %rdx movq %rdx, %rcx leaq _Z14naivetransposePfS_ii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC20(%rip), %rdx movq %rdx, %rcx leaq _Z8justcopyPfS_ii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2095: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1092616192 .section .rodata.cst16,"aM",@progbits,16 .align 16 .LC3: .long 2147483647 .long 0 .long 0 .long 0 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC4: .long -500134854 .long 1044740494 .align 8 .LC13: .long 0 .long 1041235968 .section .rodata.cst4 .align 4 .LC14: .long 1148846080 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cuda_runtime.h> #include <stdio.h> /* Naive kernel for transposing a rectangular host array. */ const int TILE_DIM = 32; #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } void initialData(float *in, const int size) { // initialise matrix for (int i = 0; i < size; i++) { in[i] = (float)(rand() & 0xFF) / 10.0f; } return; } void printData(float *in, const int size) { // print matrix for (int i = 0; i < size; i++) { printf("%3.0f ", in[i]); } printf("\n"); return; } void checkResult(float *hostRef, float *gpuRef, int rows, int cols) { // check that transposed matrix is correct double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { int index = i*cols + j; if (abs(hostRef[index] - gpuRef[index]) > epsilon) { match = 0; printf("different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]); break; } } if (!match) break; } if (!match) printf("Arrays do not match.\n\n"); } void transposeHost(float *out, float *in, const int nrows, const int ncols) { // transpose using CPU for (int iy = 0; iy < ncols; ++iy) { for (int ix = 0; ix < nrows; ++ix) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } } __global__ void justcopy(float *out, float *in, const int nrows, const int ncols) { // routine to copy data from one matrix to another -- no transposition done // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // copy data as is with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[ix * ncols + iy]; } } __global__ void naivetranspose(float *out, float *in, const int nrows, const int ncols) { // naive routine to transpose a matrix -- no optimisations considered // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // transpose with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } __global__ void lessnaivetranspose(float *out, float *in, const int nrows, const int ncols) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.x + threadIdx.y; unsigned int width = gridDim.x * blockDim.x; for (int j=0; j<blockDim.x; j+=blockDim.y) { out[x*width + (y+j)] = in[(y+j)*width + x]; } } __global__ void opttranspose(float *out, float *in, const int nrows, const int ncols) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.x + threadIdx.y; int width = gridDim.x * blockDim.x; for (int j = 0; j < blockDim.x; j += blockDim.y) tile[threadIdx.y+j][threadIdx.x] = in[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * blockDim.x + threadIdx.x; // transpose block offset y = blockIdx.x * blockDim.x + threadIdx.y; for (int j = 0; j < blockDim.x; j += blockDim.y) out[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting transpose at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // initialise CUDA timing float milli; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); bool iprint = 0; // set up array size 1024 int nrows = 1 << 10; int ncols = 1 << 10; int blockx = 16; int blocky = 16; // interpret command line arguments if present if (argc > 1) iprint = atoi(argv[1]); if (argc > 2) blockx = atoi(argv[2]); if (argc > 3) blocky = atoi(argv[3]); if (argc > 4) nrows = atoi(argv[4]); if (argc > 5) ncols = atoi(argv[5]); printf(" with matrix nrows %d ncols %d\n", nrows, ncols); size_t ncells = nrows * ncols; size_t nBytes = ncells * sizeof(float); // execution configuration dim3 block (blockx, blocky); dim3 grid ((nrows + block.x - 1) / block.x, (ncols + block.y - 1) / block.y); dim3 grid_opt (nrows/block.x, ncols/block.x); // allocate host memory float *h_A = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nrows * ncols); // transpose at host side transposeHost(hostRef, h_A, nrows, ncols); // allocate device memory float *d_A, *d_C; CHECK(cudaMalloc((float**)&d_A, nBytes)); CHECK(cudaMalloc((float**)&d_C, nBytes)); // copy data from host to device CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice)); // execute justcopy kernel CHECK(cudaMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); cudaEventRecord(start); // start timing justcopy<<<grid, block>>>(d_C, d_A, nrows, ncols); CHECK(cudaDeviceSynchronize()); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprint) printData(gpuRef, nrows * ncols); float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); // convert bytes and millisec to GB/sec // ibnd = 2 * ncells * sizeof(float) / 1e9 / milli/1000; printf("justcopy kernel elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid.x, grid.y, block.x, block.y, ibnd); // execute naive transpose kernel CHECK(cudaMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); cudaEventRecord(start); // start timing // naivetranspose<<<grid, block>>>(d_C, d_A, nrows, ncols); // lessnaivetranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols); opttranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols); CHECK(cudaDeviceSynchronize()); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprint) printData(gpuRef, ncells); ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); printf("optimised transpose elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid_opt.x, grid_opt.y, block.x, block.y, ibnd); checkResult(hostRef, gpuRef, ncols, nrows); // free host and device memory CHECK(cudaFree(d_A)); CHECK(cudaFree(d_C)); free(h_A); free(hostRef); free(gpuRef); // reset device CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
#include <hip/hip_runtime.h> #include <stdio.h> /* Naive kernel for transposing a rectangular host array. */ const int TILE_DIM = 32; #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } void initialData(float *in, const int size) { // initialise matrix for (int i = 0; i < size; i++) { in[i] = (float)(rand() & 0xFF) / 10.0f; } return; } void printData(float *in, const int size) { // print matrix for (int i = 0; i < size; i++) { printf("%3.0f ", in[i]); } printf("\n"); return; } void checkResult(float *hostRef, float *gpuRef, int rows, int cols) { // check that transposed matrix is correct double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { int index = i*cols + j; if (abs(hostRef[index] - gpuRef[index]) > epsilon) { match = 0; printf("different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]); break; } } if (!match) break; } if (!match) printf("Arrays do not match.\n\n"); } void transposeHost(float *out, float *in, const int nrows, const int ncols) { // transpose using CPU for (int iy = 0; iy < ncols; ++iy) { for (int ix = 0; ix < nrows; ++ix) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } } __global__ void justcopy(float *out, float *in, const int nrows, const int ncols) { // routine to copy data from one matrix to another -- no transposition done // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // copy data as is with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[ix * ncols + iy]; } } __global__ void naivetranspose(float *out, float *in, const int nrows, const int ncols) { // naive routine to transpose a matrix -- no optimisations considered // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // transpose with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } __global__ void lessnaivetranspose(float *out, float *in, const int nrows, const int ncols) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.x + threadIdx.y; unsigned int width = gridDim.x * blockDim.x; for (int j=0; j<blockDim.x; j+=blockDim.y) { out[x*width + (y+j)] = in[(y+j)*width + x]; } } __global__ void opttranspose(float *out, float *in, const int nrows, const int ncols) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.x + threadIdx.y; int width = gridDim.x * blockDim.x; for (int j = 0; j < blockDim.x; j += blockDim.y) tile[threadIdx.y+j][threadIdx.x] = in[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * blockDim.x + threadIdx.x; // transpose block offset y = blockIdx.x * blockDim.x + threadIdx.y; for (int j = 0; j < blockDim.x; j += blockDim.y) out[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting transpose at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // initialise CUDA timing float milli; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); bool iprint = 0; // set up array size 1024 int nrows = 1 << 10; int ncols = 1 << 10; int blockx = 16; int blocky = 16; // interpret command line arguments if present if (argc > 1) iprint = atoi(argv[1]); if (argc > 2) blockx = atoi(argv[2]); if (argc > 3) blocky = atoi(argv[3]); if (argc > 4) nrows = atoi(argv[4]); if (argc > 5) ncols = atoi(argv[5]); printf(" with matrix nrows %d ncols %d\n", nrows, ncols); size_t ncells = nrows * ncols; size_t nBytes = ncells * sizeof(float); // execution configuration dim3 block (blockx, blocky); dim3 grid ((nrows + block.x - 1) / block.x, (ncols + block.y - 1) / block.y); dim3 grid_opt (nrows/block.x, ncols/block.x); // allocate host memory float *h_A = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nrows * ncols); // transpose at host side transposeHost(hostRef, h_A, nrows, ncols); // allocate device memory float *d_A, *d_C; CHECK(hipMalloc((float**)&d_A, nBytes)); CHECK(hipMalloc((float**)&d_C, nBytes)); // copy data from host to device CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice)); // execute justcopy kernel CHECK(hipMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); hipEventRecord(start); // start timing justcopy<<<grid, block>>>(d_C, d_A, nrows, ncols); CHECK(hipDeviceSynchronize()); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprint) printData(gpuRef, nrows * ncols); float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); // convert bytes and millisec to GB/sec // ibnd = 2 * ncells * sizeof(float) / 1e9 / milli/1000; printf("justcopy kernel elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid.x, grid.y, block.x, block.y, ibnd); // execute naive transpose kernel CHECK(hipMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); hipEventRecord(start); // start timing // naivetranspose<<<grid, block>>>(d_C, d_A, nrows, ncols); // lessnaivetranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols); opttranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols); CHECK(hipDeviceSynchronize()); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprint) printData(gpuRef, ncells); ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); printf("optimised transpose elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid_opt.x, grid_opt.y, block.x, block.y, ibnd); checkResult(hostRef, gpuRef, ncols, nrows); // free host and device memory CHECK(hipFree(d_A)); CHECK(hipFree(d_C)); free(h_A); free(hostRef); free(gpuRef); // reset device CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> /* Naive kernel for transposing a rectangular host array. */ const int TILE_DIM = 32; #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } void initialData(float *in, const int size) { // initialise matrix for (int i = 0; i < size; i++) { in[i] = (float)(rand() & 0xFF) / 10.0f; } return; } void printData(float *in, const int size) { // print matrix for (int i = 0; i < size; i++) { printf("%3.0f ", in[i]); } printf("\n"); return; } void checkResult(float *hostRef, float *gpuRef, int rows, int cols) { // check that transposed matrix is correct double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { int index = i*cols + j; if (abs(hostRef[index] - gpuRef[index]) > epsilon) { match = 0; printf("different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]); break; } } if (!match) break; } if (!match) printf("Arrays do not match.\n\n"); } void transposeHost(float *out, float *in, const int nrows, const int ncols) { // transpose using CPU for (int iy = 0; iy < ncols; ++iy) { for (int ix = 0; ix < nrows; ++ix) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } } __global__ void justcopy(float *out, float *in, const int nrows, const int ncols) { // routine to copy data from one matrix to another -- no transposition done // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // copy data as is with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[ix * ncols + iy]; } } __global__ void naivetranspose(float *out, float *in, const int nrows, const int ncols) { // naive routine to transpose a matrix -- no optimisations considered // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // transpose with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } __global__ void lessnaivetranspose(float *out, float *in, const int nrows, const int ncols) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.x + threadIdx.y; unsigned int width = gridDim.x * blockDim.x; for (int j=0; j<blockDim.x; j+=blockDim.y) { out[x*width + (y+j)] = in[(y+j)*width + x]; } } __global__ void opttranspose(float *out, float *in, const int nrows, const int ncols) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.x + threadIdx.y; int width = gridDim.x * blockDim.x; for (int j = 0; j < blockDim.x; j += blockDim.y) tile[threadIdx.y+j][threadIdx.x] = in[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * blockDim.x + threadIdx.x; // transpose block offset y = blockIdx.x * blockDim.x + threadIdx.y; for (int j = 0; j < blockDim.x; j += blockDim.y) out[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting transpose at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // initialise CUDA timing float milli; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); bool iprint = 0; // set up array size 1024 int nrows = 1 << 10; int ncols = 1 << 10; int blockx = 16; int blocky = 16; // interpret command line arguments if present if (argc > 1) iprint = atoi(argv[1]); if (argc > 2) blockx = atoi(argv[2]); if (argc > 3) blocky = atoi(argv[3]); if (argc > 4) nrows = atoi(argv[4]); if (argc > 5) ncols = atoi(argv[5]); printf(" with matrix nrows %d ncols %d\n", nrows, ncols); size_t ncells = nrows * ncols; size_t nBytes = ncells * sizeof(float); // execution configuration dim3 block (blockx, blocky); dim3 grid ((nrows + block.x - 1) / block.x, (ncols + block.y - 1) / block.y); dim3 grid_opt (nrows/block.x, ncols/block.x); // allocate host memory float *h_A = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nrows * ncols); // transpose at host side transposeHost(hostRef, h_A, nrows, ncols); // allocate device memory float *d_A, *d_C; CHECK(hipMalloc((float**)&d_A, nBytes)); CHECK(hipMalloc((float**)&d_C, nBytes)); // copy data from host to device CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice)); // execute justcopy kernel CHECK(hipMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); hipEventRecord(start); // start timing justcopy<<<grid, block>>>(d_C, d_A, nrows, ncols); CHECK(hipDeviceSynchronize()); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprint) printData(gpuRef, nrows * ncols); float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); // convert bytes and millisec to GB/sec // ibnd = 2 * ncells * sizeof(float) / 1e9 / milli/1000; printf("justcopy kernel elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid.x, grid.y, block.x, block.y, ibnd); // execute naive transpose kernel CHECK(hipMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); hipEventRecord(start); // start timing // naivetranspose<<<grid, block>>>(d_C, d_A, nrows, ncols); // lessnaivetranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols); opttranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols); CHECK(hipDeviceSynchronize()); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprint) printData(gpuRef, ncells); ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); printf("optimised transpose elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid_opt.x, grid_opt.y, block.x, block.y, ibnd); checkResult(hostRef, gpuRef, ncols, nrows); // free host and device memory CHECK(hipFree(d_A)); CHECK(hipFree(d_C)); free(h_A); free(hostRef); free(gpuRef); // reset device CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z8justcopyPfS_ii .globl _Z8justcopyPfS_ii .p2align 8 .type _Z8justcopyPfS_ii,@function _Z8justcopyPfS_ii: s_clause 0x1 s_load_b32 s4, s[0:1], 0x24 s_load_b64 s[2:3], s[0:1], 0x10 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s5, s4, 0xffff s_lshr_b32 s4, s4, 16 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[0:1], null, s14, s5, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s4, v[3:4] v_cmp_gt_u32_e32 vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_u32_e64 s2, s3, v1 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s4, s2 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, v0, s3, v[1:2] v_mov_b32_e32 v3, 0 v_lshlrev_b64 v[0:1], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo global_load_b32 v2, v[2:3], off s_waitcnt vmcnt(0) global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z8justcopyPfS_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z8justcopyPfS_ii, .Lfunc_end0-_Z8justcopyPfS_ii .section .AMDGPU.csdata,"",@progbits .text .protected _Z14naivetransposePfS_ii .globl _Z14naivetransposePfS_ii .p2align 8 .type _Z14naivetransposePfS_ii,@function _Z14naivetransposePfS_ii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b64 s[4:5], s[0:1], 0x10 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4] v_cmp_gt_u32_e32 vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_u32_e64 s2, s5, v1 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB1_2 s_load_b128 s[0:3], s[0:1], 0x0 v_mad_u64_u32 v[2:3], null, v1, s4, v[0:1] v_mov_b32_e32 v3, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[2:3] s_waitcnt lgkmcnt(0) v_add_co_u32 v4, vcc_lo, s2, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1) v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo global_load_b32 v2, v[4:5], off s_waitcnt vmcnt(0) v_mad_u64_u32 v[4:5], null, v0, s5, v[1:2] v_mov_b32_e32 v5, v3 v_lshlrev_b64 v[0:1], 2, v[4:5] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v2, off .LBB1_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14naivetransposePfS_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z14naivetransposePfS_ii, .Lfunc_end1-_Z14naivetransposePfS_ii .section .AMDGPU.csdata,"",@progbits .text .protected _Z18lessnaivetransposePfS_ii .globl _Z18lessnaivetransposePfS_ii .p2align 8 .type _Z18lessnaivetransposePfS_ii,@function _Z18lessnaivetransposePfS_ii: s_load_b32 s6, s[0:1], 0x24 s_add_u32 s2, s0, 24 s_addc_u32 s3, s1, 0 s_waitcnt lgkmcnt(0) v_cmp_eq_u16_e64 s5, s6, 0 s_delay_alu instid0(VALU_DEP_1) s_and_b32 vcc_lo, exec_lo, s5 s_mov_b32 s5, 0 s_cbranch_vccnz .LBB2_3 s_clause 0x1 s_load_b32 s7, s[2:3], 0x0 s_load_b32 s8, s[2:3], 0xc v_and_b32_e32 v3, 0x3ff, v0 v_bfe_u32 v2, v0, 10, 10 s_and_b32 s6, 0xffff, s6 s_mov_b32 s4, s15 s_load_b128 s[0:3], s[0:1], 0x0 v_mad_u64_u32 v[0:1], null, s14, s6, v[3:4] v_mad_u64_u32 v[4:5], null, s4, s6, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mad_u64_u32 v[5:6], null, s7, v0, s[4:5] v_mad_u64_u32 v[6:7], null, s7, v4, s[14:15] s_lshr_b32 s4, s8, 16 s_mul_i32 s7, s7, s6 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2) s_mul_i32 s7, s7, s4 v_mad_u64_u32 v[0:1], null, v5, s6, v[2:3] s_delay_alu instid0(VALU_DEP_2) v_mad_u64_u32 v[1:2], null, v6, s6, v[3:4] v_mov_b32_e32 v2, 0 .p2align 6 .LBB2_2: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[3:4], 2, v[1:2] v_add_nc_u32_e32 v1, s7, v1 v_add_co_u32 v3, vcc_lo, s2, v3 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo global_load_b32 v5, v[3:4], off v_dual_mov_b32 v4, v2 :: v_dual_add_nc_u32 v3, s5, v0 s_add_i32 s5, s5, s4 s_cmp_ge_u32 s5, s6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 2, v[3:4] v_add_co_u32 v3, vcc_lo, s0, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[3:4], v5, off s_cbranch_scc0 .LBB2_2 .LBB2_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18lessnaivetransposePfS_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z18lessnaivetransposePfS_ii, .Lfunc_end2-_Z18lessnaivetransposePfS_ii .section .AMDGPU.csdata,"",@progbits .text .protected _Z12opttransposePfS_ii .globl _Z12opttransposePfS_ii .p2align 8 .type _Z12opttransposePfS_ii,@function _Z12opttransposePfS_ii: s_clause 0x1 s_load_b32 s3, s[0:1], 0x24 s_load_b32 s8, s[0:1], 0x18 s_add_u32 s4, s0, 24 v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_addc_u32 s5, s1, 0 s_mov_b32 s2, s15 s_mov_b32 s10, 0 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s3, 0 s_mul_i32 s9, s8, s3 s_cbranch_scc1 .LBB3_3 s_load_b32 s11, s[4:5], 0xc v_mad_u64_u32 v[2:3], null, s2, s3, v[0:1] s_load_b64 s[6:7], s[0:1], 0x8 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[4:5], null, s8, v2, s[14:15] v_lshlrev_b32_e32 v5, 2, v1 v_mad_u64_u32 v[2:3], null, v4, s3, v[1:2] s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_lshl_add_u32 v4, v0, 7, v5 s_waitcnt lgkmcnt(0) s_lshr_b32 s11, s11, 16 s_lshl_b32 s12, s11, 7 s_mul_i32 s13, s9, s11 .LBB3_2: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_ashrrev_i32_e32 v3, 31, v2 s_add_i32 s10, s10, s11 s_cmp_ge_u32 s10, s3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[5:6], 2, v[2:3] v_add_nc_u32_e32 v2, s13, v2 v_add_co_u32 v5, vcc_lo, s6, v5 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v6, vcc_lo, s7, v6, vcc_lo global_load_b32 v3, v[5:6], off s_waitcnt vmcnt(0) ds_store_b32 v4, v3 v_add_nc_u32_e32 v4, s12, v4 s_cbranch_scc0 .LBB3_2 .LBB3_3: s_cmp_eq_u32 s3, 0 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB3_6 s_load_b32 s4, s[4:5], 0xc v_mad_u64_u32 v[2:3], null, s14, s3, v[0:1] s_load_b64 s[0:1], s[0:1], 0x0 v_lshlrev_b32_e32 v0, 2, v0 s_mov_b32 s5, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshl_add_u32 v0, v1, 7, v0 v_mad_u64_u32 v[4:5], null, s8, v2, s[2:3] s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_mad_u64_u32 v[2:3], null, v4, s3, v[1:2] s_waitcnt lgkmcnt(0) s_lshr_b32 s2, s4, 16 s_mul_i32 s9, s9, s2 s_lshl_b32 s4, s2, 2 .LBB3_5: ds_load_b32 v1, v0 v_ashrrev_i32_e32 v3, 31, v2 v_add_nc_u32_e32 v0, s4, v0 s_add_i32 s5, s5, s2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2) s_cmp_ge_u32 s5, s3 v_lshlrev_b64 v[3:4], 2, v[2:3] v_add_nc_u32_e32 v2, s9, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v3, vcc_lo, s0, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo s_waitcnt lgkmcnt(0) global_store_b32 v[3:4], v1, off s_cbranch_scc0 .LBB3_5 .LBB3_6: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12opttransposePfS_ii .amdhsa_group_segment_fixed_size 4096 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end3: .size _Z12opttransposePfS_ii, .Lfunc_end3-_Z12opttransposePfS_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z8justcopyPfS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z8justcopyPfS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14naivetransposePfS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14naivetransposePfS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18lessnaivetransposePfS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18lessnaivetransposePfS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 4096 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12opttransposePfS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12opttransposePfS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> /* Naive kernel for transposing a rectangular host array. */ const int TILE_DIM = 32; #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } void initialData(float *in, const int size) { // initialise matrix for (int i = 0; i < size; i++) { in[i] = (float)(rand() & 0xFF) / 10.0f; } return; } void printData(float *in, const int size) { // print matrix for (int i = 0; i < size; i++) { printf("%3.0f ", in[i]); } printf("\n"); return; } void checkResult(float *hostRef, float *gpuRef, int rows, int cols) { // check that transposed matrix is correct double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { int index = i*cols + j; if (abs(hostRef[index] - gpuRef[index]) > epsilon) { match = 0; printf("different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]); break; } } if (!match) break; } if (!match) printf("Arrays do not match.\n\n"); } void transposeHost(float *out, float *in, const int nrows, const int ncols) { // transpose using CPU for (int iy = 0; iy < ncols; ++iy) { for (int ix = 0; ix < nrows; ++ix) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } } __global__ void justcopy(float *out, float *in, const int nrows, const int ncols) { // routine to copy data from one matrix to another -- no transposition done // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // copy data as is with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[ix * ncols + iy]; } } __global__ void naivetranspose(float *out, float *in, const int nrows, const int ncols) { // naive routine to transpose a matrix -- no optimisations considered // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // transpose with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } __global__ void lessnaivetranspose(float *out, float *in, const int nrows, const int ncols) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.x + threadIdx.y; unsigned int width = gridDim.x * blockDim.x; for (int j=0; j<blockDim.x; j+=blockDim.y) { out[x*width + (y+j)] = in[(y+j)*width + x]; } } __global__ void opttranspose(float *out, float *in, const int nrows, const int ncols) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.x + threadIdx.y; int width = gridDim.x * blockDim.x; for (int j = 0; j < blockDim.x; j += blockDim.y) tile[threadIdx.y+j][threadIdx.x] = in[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * blockDim.x + threadIdx.x; // transpose block offset y = blockIdx.x * blockDim.x + threadIdx.y; for (int j = 0; j < blockDim.x; j += blockDim.y) out[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting transpose at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // initialise CUDA timing float milli; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); bool iprint = 0; // set up array size 1024 int nrows = 1 << 10; int ncols = 1 << 10; int blockx = 16; int blocky = 16; // interpret command line arguments if present if (argc > 1) iprint = atoi(argv[1]); if (argc > 2) blockx = atoi(argv[2]); if (argc > 3) blocky = atoi(argv[3]); if (argc > 4) nrows = atoi(argv[4]); if (argc > 5) ncols = atoi(argv[5]); printf(" with matrix nrows %d ncols %d\n", nrows, ncols); size_t ncells = nrows * ncols; size_t nBytes = ncells * sizeof(float); // execution configuration dim3 block (blockx, blocky); dim3 grid ((nrows + block.x - 1) / block.x, (ncols + block.y - 1) / block.y); dim3 grid_opt (nrows/block.x, ncols/block.x); // allocate host memory float *h_A = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nrows * ncols); // transpose at host side transposeHost(hostRef, h_A, nrows, ncols); // allocate device memory float *d_A, *d_C; CHECK(hipMalloc((float**)&d_A, nBytes)); CHECK(hipMalloc((float**)&d_C, nBytes)); // copy data from host to device CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice)); // execute justcopy kernel CHECK(hipMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); hipEventRecord(start); // start timing justcopy<<<grid, block>>>(d_C, d_A, nrows, ncols); CHECK(hipDeviceSynchronize()); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprint) printData(gpuRef, nrows * ncols); float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); // convert bytes and millisec to GB/sec // ibnd = 2 * ncells * sizeof(float) / 1e9 / milli/1000; printf("justcopy kernel elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid.x, grid.y, block.x, block.y, ibnd); // execute naive transpose kernel CHECK(hipMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); hipEventRecord(start); // start timing // naivetranspose<<<grid, block>>>(d_C, d_A, nrows, ncols); // lessnaivetranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols); opttranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols); CHECK(hipDeviceSynchronize()); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprint) printData(gpuRef, ncells); ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); printf("optimised transpose elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid_opt.x, grid_opt.y, block.x, block.y, ibnd); checkResult(hostRef, gpuRef, ncols, nrows); // free host and device memory CHECK(hipFree(d_A)); CHECK(hipFree(d_C)); free(h_A); free(hostRef); free(gpuRef); // reset device CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
.text .file "junetranspose-opt.hip" .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z11initialDataPfi .LCPI0_0: .long 0x41200000 # float 10 .text .globl _Z11initialDataPfi .p2align 4, 0x90 .type _Z11initialDataPfi,@function _Z11initialDataPfi: # @_Z11initialDataPfi .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB0_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx movl %esi, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand movzbl %al, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 divss .LCPI0_0(%rip), %xmm0 movss %xmm0, (%rbx,%r15,4) incq %r15 cmpq %r15, %r14 jne .LBB0_2 # %bb.3: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .cfi_restore %r15 .LBB0_4: # %._crit_edge retq .Lfunc_end0: .size _Z11initialDataPfi, .Lfunc_end0-_Z11initialDataPfi .cfi_endproc # -- End function .globl _Z9printDataPfi # -- Begin function _Z9printDataPfi .p2align 4, 0x90 .type _Z9printDataPfi,@function _Z9printDataPfi: # @_Z9printDataPfi .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB1_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx movl %esi, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r15 cmpq %r15, %r14 jne .LBB1_2 # %bb.3: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .cfi_restore %r15 .LBB1_4: # %._crit_edge movl $10, %edi jmp putchar@PLT # TAILCALL .Lfunc_end1: .size _Z9printDataPfi, .Lfunc_end1-_Z9printDataPfi .cfi_endproc # -- End function .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function _Z11checkResultPfS_ii .LCPI2_0: .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI2_1: .quad 0x3e45798ee2308c3a # double 1.0E-8 .text .globl _Z11checkResultPfS_ii .p2align 4, 0x90 .type _Z11checkResultPfS_ii,@function _Z11checkResultPfS_ii: # @_Z11checkResultPfS_ii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $40, %rsp .cfi_def_cfa_offset 96 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 testl %edx, %edx jle .LBB2_11 # %bb.1: # %.preheader.lr.ph movq %rsi, %r14 movq %rdi, %r15 movslq %ecx, %rbp movl %edx, %eax movq %rax, 24(%rsp) # 8-byte Spill leaq (,%rbp,4), %rax movq %rax, 16(%rsp) # 8-byte Spill movl %ebp, %eax movq %rax, 8(%rsp) # 8-byte Spill movb $1, %al xorl %ebx, %ebx movaps .LCPI2_0(%rip), %xmm3 # xmm3 = [NaN,NaN,NaN,NaN] movsd .LCPI2_1(%rip), %xmm4 # xmm4 = mem[0],zero xorl %r12d, %r12d movl %ecx, 4(%rsp) # 4-byte Spill .p2align 4, 0x90 .LBB2_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB2_4 Depth 2 testl %ecx, %ecx jle .LBB2_8 # %bb.3: # %.lr.ph # in Loop: Header=BB2_2 Depth=1 xorl %r13d, %r13d .p2align 4, 0x90 .LBB2_4: # Parent Loop BB2_2 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r15,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero movss (%r14,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero movaps %xmm0, %xmm2 subss %xmm1, %xmm2 andps %xmm3, %xmm2 cvtss2sd %xmm2, %xmm2 ucomisd %xmm4, %xmm2 jbe .LBB2_6 # %bb.5: # in Loop: Header=BB2_4 Depth=2 leal (%rbx,%r13), %ecx cvtss2sd %xmm0, %xmm0 cvtss2sd %xmm1, %xmm1 movl $.L.str.2, %edi movl %r12d, %esi movl %r13d, %edx movb $2, %al movsd %xmm2, 32(%rsp) # 8-byte Spill callq printf movsd 32(%rsp), %xmm2 # 8-byte Reload # xmm2 = mem[0],zero movsd .LCPI2_1(%rip), %xmm4 # xmm4 = mem[0],zero movaps .LCPI2_0(%rip), %xmm3 # xmm3 = [NaN,NaN,NaN,NaN] xorl %eax, %eax .LBB2_6: # in Loop: Header=BB2_4 Depth=2 ucomisd %xmm4, %xmm2 ja .LBB2_8 # %bb.7: # in Loop: Header=BB2_4 Depth=2 incq %r13 cmpq %rbp, %r13 jl .LBB2_4 .LBB2_8: # %._crit_edge # in Loop: Header=BB2_2 Depth=1 testb $1, %al movl 4(%rsp), %ecx # 4-byte Reload je .LBB2_10 # %bb.9: # %._crit_edge # in Loop: Header=BB2_2 Depth=1 incq %r12 movq 16(%rsp), %rdx # 8-byte Reload addq %rdx, %r15 addq %rdx, %r14 addq 8(%rsp), %rbx # 8-byte Folded Reload cmpq 24(%rsp), %r12 # 8-byte Folded Reload jne .LBB2_2 .LBB2_10: # %._crit_edge34.loopexit testb $1, %al jne .LBB2_11 # %bb.12: movl $.Lstr, %edi addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp puts@PLT # TAILCALL .LBB2_11: # %.critedge .cfi_def_cfa_offset 96 addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z11checkResultPfS_ii, .Lfunc_end2-_Z11checkResultPfS_ii .cfi_endproc # -- End function .globl _Z13transposeHostPfS_ii # -- Begin function _Z13transposeHostPfS_ii .p2align 4, 0x90 .type _Z13transposeHostPfS_ii,@function _Z13transposeHostPfS_ii: # @_Z13transposeHostPfS_ii .cfi_startproc # %bb.0: testl %ecx, %ecx jle .LBB3_7 # %bb.1: # %.preheader.lr.ph pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl %ecx, %eax movl %edx, %ecx leaq (,%rax,4), %r8 xorl %r9d, %r9d xorl %r10d, %r10d jmp .LBB3_2 .p2align 4, 0x90 .LBB3_5: # %._crit_edge # in Loop: Header=BB3_2 Depth=1 incq %r10 addl %edx, %r9d addq $4, %rdi cmpq %rax, %r10 je .LBB3_6 .LBB3_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB3_4 Depth 2 testl %edx, %edx jle .LBB3_5 # %bb.3: # %.lr.ph # in Loop: Header=BB3_2 Depth=1 movl %r9d, %r11d leaq (%rsi,%r11,4), %r11 movq %rdi, %rbx xorl %r14d, %r14d .p2align 4, 0x90 .LBB3_4: # Parent Loop BB3_2 Depth=1 # => This Inner Loop Header: Depth=2 movss (%r11,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero movss %xmm0, (%rbx) incq %r14 addq %r8, %rbx cmpq %r14, %rcx jne .LBB3_4 jmp .LBB3_5 .LBB3_6: popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .LBB3_7: # %._crit_edge18 retq .Lfunc_end3: .size _Z13transposeHostPfS_ii, .Lfunc_end3-_Z13transposeHostPfS_ii .cfi_endproc # -- End function .globl _Z23__device_stub__justcopyPfS_ii # -- Begin function _Z23__device_stub__justcopyPfS_ii .p2align 4, 0x90 .type _Z23__device_stub__justcopyPfS_ii,@function _Z23__device_stub__justcopyPfS_ii: # @_Z23__device_stub__justcopyPfS_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8justcopyPfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end4: .size _Z23__device_stub__justcopyPfS_ii, .Lfunc_end4-_Z23__device_stub__justcopyPfS_ii .cfi_endproc # -- End function .globl _Z29__device_stub__naivetransposePfS_ii # -- Begin function _Z29__device_stub__naivetransposePfS_ii .p2align 4, 0x90 .type _Z29__device_stub__naivetransposePfS_ii,@function _Z29__device_stub__naivetransposePfS_ii: # @_Z29__device_stub__naivetransposePfS_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14naivetransposePfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end5: .size _Z29__device_stub__naivetransposePfS_ii, .Lfunc_end5-_Z29__device_stub__naivetransposePfS_ii .cfi_endproc # -- End function .globl _Z33__device_stub__lessnaivetransposePfS_ii # -- Begin function _Z33__device_stub__lessnaivetransposePfS_ii .p2align 4, 0x90 .type _Z33__device_stub__lessnaivetransposePfS_ii,@function _Z33__device_stub__lessnaivetransposePfS_ii: # @_Z33__device_stub__lessnaivetransposePfS_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18lessnaivetransposePfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end6: .size _Z33__device_stub__lessnaivetransposePfS_ii, .Lfunc_end6-_Z33__device_stub__lessnaivetransposePfS_ii .cfi_endproc # -- End function .globl _Z27__device_stub__opttransposePfS_ii # -- Begin function _Z27__device_stub__opttransposePfS_ii .p2align 4, 0x90 .type _Z27__device_stub__opttransposePfS_ii,@function _Z27__device_stub__opttransposePfS_ii: # @_Z27__device_stub__opttransposePfS_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12opttransposePfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end7: .size _Z27__device_stub__opttransposePfS_ii, .Lfunc_end7-_Z27__device_stub__opttransposePfS_ii .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI8_0: .long 0x41200000 # float 10 .LCPI8_4: .long 0x447a0000 # float 1000 .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 .LCPI8_1: .long 1127219200 # 0x43300000 .long 1160773632 # 0x45300000 .long 0 # 0x0 .long 0 # 0x0 .LCPI8_2: .quad 0x4330000000000000 # double 4503599627370496 .quad 0x4530000000000000 # double 1.9342813113834067E+25 .LCPI8_5: .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI8_3: .quad 0x3e10000000000000 # double 9.3132257461547852E-10 .LCPI8_6: .quad 0x3e45798ee2308c3a # double 1.0E-8 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $1752, %rsp # imm = 0x6D8 .cfi_def_cfa_offset 1808 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rsi, %r14 movl %edi, %ebp leaq 280(%rsp), %rdi xorl %esi, %esi callq hipGetDevicePropertiesR0600 testl %eax, %eax jne .LBB8_1 # %bb.4: movq (%r14), %rsi movl $.L.str.7, %edi xorl %eax, %eax callq printf leaq 280(%rsp), %rdx movl $.L.str.8, %edi xorl %esi, %esi xorl %eax, %eax callq printf xorl %edi, %edi callq hipSetDevice testl %eax, %eax jne .LBB8_5 # %bb.6: leaq 96(%rsp), %rdi callq hipEventCreate leaq 64(%rsp), %rdi callq hipEventCreate cmpl $2, %ebp jl .LBB8_7 # %bb.8: movq 8(%r14), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol testl %eax, %eax setne %al movl %eax, 40(%rsp) # 4-byte Spill jmp .LBB8_9 .LBB8_7: movl $0, 40(%rsp) # 4-byte Folded Spill .LBB8_9: movl $16, %ebx movl $16, %eax movq %rax, 24(%rsp) # 8-byte Spill cmpl $3, %ebp jl .LBB8_11 # %bb.10: movq 16(%r14), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, 24(%rsp) # 8-byte Spill .LBB8_11: cmpl $4, %ebp jl .LBB8_13 # %bb.12: movq 24(%r14), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %rbx .LBB8_13: movl $1024, %eax # imm = 0x400 movq %rax, 16(%rsp) # 8-byte Spill movl $1024, %r12d # imm = 0x400 cmpl $5, %ebp jl .LBB8_15 # %bb.14: movq 32(%r14), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r12 .LBB8_15: cmpl $6, %ebp jl .LBB8_17 # %bb.16: movq 40(%r14), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, 16(%rsp) # 8-byte Spill .LBB8_17: movl $.L.str.9, %edi movl %r12d, %esi movq 16(%rsp), %r14 # 8-byte Reload movl %r14d, %edx xorl %eax, %eax callq printf movl %r14d, %eax imull %r12d, %eax movl %eax, 12(%rsp) # 4-byte Spill movslq %eax, %r15 leaq (,%r15,4), %r13 movq 24(%rsp), %rcx # 8-byte Reload leal (%rcx,%r12), %eax decl %eax xorl %edx, %edx divl %ecx # kill: def $eax killed $eax def $rax movq %rax, 200(%rsp) # 8-byte Spill leal (%rbx,%r14), %eax decl %eax xorl %edx, %edx movq %rbx, 72(%rsp) # 8-byte Spill divl %ebx # kill: def $eax killed $eax def $rax movq %rax, 192(%rsp) # 8-byte Spill movl %r12d, %eax xorl %edx, %edx divl %ecx # kill: def $eax killed $eax def $rax movq %rax, 184(%rsp) # 8-byte Spill movl %r14d, %eax xorl %edx, %edx divl %ecx # kill: def $eax killed $eax def $rax movq %rax, 176(%rsp) # 8-byte Spill movq %r13, %rdi callq malloc movq %rax, %rbp movq %r13, %rdi callq malloc movq %rax, 104(%rsp) # 8-byte Spill movq %r13, 80(%rsp) # 8-byte Spill movq %r13, %rdi callq malloc movq %rax, 56(%rsp) # 8-byte Spill movq %r15, 240(%rsp) # 8-byte Spill testl %r15d, %r15d jle .LBB8_20 # %bb.18: # %.lr.ph.preheader.i movl 12(%rsp), %ebx # 4-byte Reload xorl %r14d, %r14d .p2align 4, 0x90 .LBB8_19: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 callq rand movzbl %al, %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 divss .LCPI8_0(%rip), %xmm0 movss %xmm0, (%rbp,%r14,4) incq %r14 cmpq %r14, %rbx jne .LBB8_19 .LBB8_20: # %_Z11initialDataPfi.exit movq 16(%rsp), %rax # 8-byte Reload movl %eax, %ebx movl %r12d, %r14d testl %eax, %eax movq 104(%rsp), %rdx # 8-byte Reload jle .LBB8_26 # %bb.21: # %.preheader.lr.ph.i leaq (,%rbx,4), %rax xorl %ecx, %ecx xorl %esi, %esi jmp .LBB8_22 .p2align 4, 0x90 .LBB8_25: # %._crit_edge.i # in Loop: Header=BB8_22 Depth=1 incq %rsi addl %r12d, %ecx addq $4, %rdx cmpq %rbx, %rsi je .LBB8_26 .LBB8_22: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB8_24 Depth 2 testl %r12d, %r12d jle .LBB8_25 # %bb.23: # %.lr.ph.i159 # in Loop: Header=BB8_22 Depth=1 movl %ecx, %edi leaq (,%rdi,4), %rdi addq %rbp, %rdi movq %rdx, %r8 xorl %r9d, %r9d .p2align 4, 0x90 .LBB8_24: # Parent Loop BB8_22 Depth=1 # => This Inner Loop Header: Depth=2 movss (%rdi,%r9,4), %xmm0 # xmm0 = mem[0],zero,zero,zero movss %xmm0, (%r8) incq %r9 addq %rax, %r8 cmpq %r9, %r14 jne .LBB8_24 jmp .LBB8_25 .LBB8_26: # %_Z13transposeHostPfS_ii.exit leaq 88(%rsp), %rdi movq 80(%rsp), %r13 # 8-byte Reload movq %r13, %rsi callq hipMalloc testl %eax, %eax jne .LBB8_27 # %bb.28: movq %rbx, 272(%rsp) # 8-byte Spill leaq 32(%rsp), %rdi movq %r13, %rsi callq hipMalloc testl %eax, %eax jne .LBB8_29 # %bb.30: movq 88(%rsp), %rdi movq %rbp, %rsi movq %r13, %rdx movl $1, %ecx callq hipMemcpy testl %eax, %eax movq 72(%rsp), %rbx # 8-byte Reload jne .LBB8_31 # %bb.32: movq 32(%rsp), %rdi xorl %esi, %esi movq %r13, %rdx callq hipMemset testl %eax, %eax jne .LBB8_33 # %bb.34: movl 24(%rsp), %eax # 4-byte Reload movq %rbx, %rcx shlq $32, %rcx orq %rax, %rcx movq 192(%rsp), %r15 # 8-byte Reload shlq $32, %r15 addq 200(%rsp), %r15 # 8-byte Folded Reload movq 56(%rsp), %rdi # 8-byte Reload xorl %esi, %esi movq %r13, %rdx movq %rcx, %r13 callq memset@PLT movq 96(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq %r15, %rdi movl $1, %esi movq %r13, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB8_36 # %bb.35: movq 32(%rsp), %rax movq 88(%rsp), %rcx movq %rax, 168(%rsp) movq %rcx, 160(%rsp) movl %r12d, 52(%rsp) movq 16(%rsp), %rax # 8-byte Reload movl %eax, 48(%rsp) leaq 168(%rsp), %rax movq %rax, 208(%rsp) leaq 160(%rsp), %rax movq %rax, 216(%rsp) leaq 52(%rsp), %rax movq %rax, 224(%rsp) leaq 48(%rsp), %rax movq %rax, 232(%rsp) leaq 144(%rsp), %rdi leaq 128(%rsp), %rsi leaq 120(%rsp), %rdx leaq 112(%rsp), %rcx callq __hipPopCallConfiguration movq 144(%rsp), %rsi movl 152(%rsp), %edx movq 128(%rsp), %rcx movl 136(%rsp), %r8d leaq 208(%rsp), %r9 movl $_Z8justcopyPfS_ii, %edi pushq 112(%rsp) .cfi_adjust_cfa_offset 8 pushq 128(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB8_36: callq hipDeviceSynchronize testl %eax, %eax jne .LBB8_37 # %bb.38: movq 64(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 64(%rsp), %rdi callq hipEventSynchronize movq 96(%rsp), %rsi movq 64(%rsp), %rdx leaq 44(%rsp), %rdi callq hipEventElapsedTime movq 32(%rsp), %rsi movq 56(%rsp), %rdi # 8-byte Reload movq 80(%rsp), %rdx # 8-byte Reload movl $2, %ecx callq hipMemcpy testl %eax, %eax jne .LBB8_39 # %bb.40: cmpb $0, 40(%rsp) # 1-byte Folded Reload movq %r14, 264(%rsp) # 8-byte Spill je .LBB8_45 # %bb.41: cmpl $0, 12(%rsp) # 4-byte Folded Reload movq 56(%rsp), %r15 # 8-byte Reload jle .LBB8_44 # %bb.42: # %.lr.ph.preheader.i165 movl 12(%rsp), %ebx # 4-byte Reload xorl %r14d, %r14d .p2align 4, 0x90 .LBB8_43: # %.lr.ph.i167 # =>This Inner Loop Header: Depth=1 movss (%r15,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r14 cmpq %r14, %rbx jne .LBB8_43 .LBB8_44: # %_Z9printDataPfi.exit movl $10, %edi callq putchar@PLT movq 72(%rsp), %rbx # 8-byte Reload .LBB8_45: movq 240(%rsp), %rax # 8-byte Reload shlq $3, %rax movq %rax, %xmm0 punpckldq .LCPI8_1(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] subpd .LCPI8_2(%rip), %xmm0 movapd %xmm0, %xmm2 unpckhpd %xmm0, %xmm2 # xmm2 = xmm2[1],xmm0[1] addsd %xmm0, %xmm2 mulsd .LCPI8_3(%rip), %xmm2 movss 44(%rsp), %xmm1 # xmm1 = mem[0],zero,zero,zero xorps %xmm0, %xmm0 cvtss2sd %xmm1, %xmm0 divss .LCPI8_4(%rip), %xmm1 cvtss2sd %xmm1, %xmm1 movapd %xmm2, 240(%rsp) # 16-byte Spill divsd %xmm1, %xmm2 xorps %xmm1, %xmm1 cvtsd2ss %xmm2, %xmm1 cvtss2sd %xmm1, %xmm1 movl $.L.str.10, %edi movq 200(%rsp), %rsi # 8-byte Reload # kill: def $esi killed $esi killed $rsi movq 192(%rsp), %rdx # 8-byte Reload # kill: def $edx killed $edx killed $rdx movq 24(%rsp), %rcx # 8-byte Reload # kill: def $ecx killed $ecx killed $rcx movl %ebx, %r8d movb $2, %al callq printf movq 32(%rsp), %rdi xorl %esi, %esi movq 80(%rsp), %r14 # 8-byte Reload movq %r14, %rdx callq hipMemset testl %eax, %eax jne .LBB8_46 # %bb.47: movq 176(%rsp), %r15 # 8-byte Reload shlq $32, %r15 addq 184(%rsp), %r15 # 8-byte Folded Reload movq 56(%rsp), %rdi # 8-byte Reload xorl %esi, %esi movq %r14, %rdx callq memset@PLT movq 96(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq %r15, %rdi movl $1, %esi movq %r13, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB8_49 # %bb.48: movq 32(%rsp), %rax movq 88(%rsp), %rcx movq %rax, 168(%rsp) movq %rcx, 160(%rsp) movl %r12d, 52(%rsp) movq 16(%rsp), %rax # 8-byte Reload movl %eax, 48(%rsp) leaq 168(%rsp), %rax movq %rax, 208(%rsp) leaq 160(%rsp), %rax movq %rax, 216(%rsp) leaq 52(%rsp), %rax movq %rax, 224(%rsp) leaq 48(%rsp), %rax movq %rax, 232(%rsp) leaq 144(%rsp), %rdi leaq 128(%rsp), %rsi leaq 120(%rsp), %rdx leaq 112(%rsp), %rcx callq __hipPopCallConfiguration movq 144(%rsp), %rsi movl 152(%rsp), %edx movq 128(%rsp), %rcx movl 136(%rsp), %r8d leaq 208(%rsp), %r9 movl $_Z12opttransposePfS_ii, %edi pushq 112(%rsp) .cfi_adjust_cfa_offset 8 pushq 128(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB8_49: callq hipDeviceSynchronize testl %eax, %eax movq 104(%rsp), %r15 # 8-byte Reload jne .LBB8_50 # %bb.51: movq 64(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 64(%rsp), %rdi callq hipEventSynchronize movq 96(%rsp), %rsi movq 64(%rsp), %rdx leaq 44(%rsp), %rdi callq hipEventElapsedTime movq 32(%rsp), %rsi movq 56(%rsp), %r13 # 8-byte Reload movq %r13, %rdi movq %r14, %rdx movl $2, %ecx callq hipMemcpy testl %eax, %eax jne .LBB8_52 # %bb.53: cmpb $0, 40(%rsp) # 1-byte Folded Reload je .LBB8_58 # %bb.54: movl 12(%rsp), %eax # 4-byte Reload testl %eax, %eax jle .LBB8_57 # %bb.55: # %.lr.ph.preheader.i179 movl %eax, %ebx xorl %r14d, %r14d .p2align 4, 0x90 .LBB8_56: # %.lr.ph.i181 # =>This Inner Loop Header: Depth=1 movss (%r13,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf incq %r14 cmpq %r14, %rbx jne .LBB8_56 .LBB8_57: # %_Z9printDataPfi.exit185 movl $10, %edi callq putchar@PLT movq 72(%rsp), %rbx # 8-byte Reload .LBB8_58: movss 44(%rsp), %xmm1 # xmm1 = mem[0],zero,zero,zero xorps %xmm0, %xmm0 cvtss2sd %xmm1, %xmm0 divss .LCPI8_4(%rip), %xmm1 cvtss2sd %xmm1, %xmm1 movapd 240(%rsp), %xmm2 # 16-byte Reload divsd %xmm1, %xmm2 xorps %xmm1, %xmm1 cvtsd2ss %xmm2, %xmm1 cvtss2sd %xmm1, %xmm1 movl $.L.str.11, %edi movq 184(%rsp), %rsi # 8-byte Reload # kill: def $esi killed $esi killed $rsi movq 176(%rsp), %rdx # 8-byte Reload # kill: def $edx killed $edx killed $rdx movq 24(%rsp), %rcx # 8-byte Reload # kill: def $ecx killed $ecx killed $rcx movl %ebx, %r8d movb $2, %al callq printf cmpl $0, 16(%rsp) # 4-byte Folded Reload jle .LBB8_70 # %bb.59: # %.preheader.lr.ph.i186 movq %rbp, 72(%rsp) # 8-byte Spill movq %r12, 16(%rsp) # 8-byte Spill movslq %r12d, %r14 leaq (,%r14,4), %rax movq %rax, 80(%rsp) # 8-byte Spill movb $1, %al xorl %ebx, %ebx movaps .LCPI8_5(%rip), %xmm3 # xmm3 = [NaN,NaN,NaN,NaN] movsd .LCPI8_6(%rip), %xmm4 # xmm4 = mem[0],zero movq %r15, %rbp movq %r13, %r12 xorl %r15d, %r15d .p2align 4, 0x90 .LBB8_60: # %.preheader.i188 # =>This Loop Header: Depth=1 # Child Loop BB8_62 Depth 2 cmpl $0, 16(%rsp) # 4-byte Folded Reload jle .LBB8_66 # %bb.61: # %.lr.ph.i191 # in Loop: Header=BB8_60 Depth=1 xorl %r13d, %r13d .p2align 4, 0x90 .LBB8_62: # Parent Loop BB8_60 Depth=1 # => This Inner Loop Header: Depth=2 movss (%rbp,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero movss (%r12,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero movaps %xmm0, %xmm2 subss %xmm1, %xmm2 andps %xmm3, %xmm2 cvtss2sd %xmm2, %xmm2 ucomisd %xmm4, %xmm2 jbe .LBB8_64 # %bb.63: # in Loop: Header=BB8_62 Depth=2 leal (%rbx,%r13), %ecx cvtss2sd %xmm0, %xmm0 cvtss2sd %xmm1, %xmm1 movl $.L.str.2, %edi movl %r15d, %esi movl %r13d, %edx movb $2, %al movsd %xmm2, 24(%rsp) # 8-byte Spill callq printf movsd 24(%rsp), %xmm2 # 8-byte Reload # xmm2 = mem[0],zero movsd .LCPI8_6(%rip), %xmm4 # xmm4 = mem[0],zero movaps .LCPI8_5(%rip), %xmm3 # xmm3 = [NaN,NaN,NaN,NaN] xorl %eax, %eax .LBB8_64: # in Loop: Header=BB8_62 Depth=2 ucomisd %xmm4, %xmm2 ja .LBB8_66 # %bb.65: # in Loop: Header=BB8_62 Depth=2 incq %r13 cmpq %r14, %r13 jl .LBB8_62 .LBB8_66: # %._crit_edge.i189 # in Loop: Header=BB8_60 Depth=1 testb $1, %al je .LBB8_68 # %bb.67: # %._crit_edge.i189 # in Loop: Header=BB8_60 Depth=1 incq %r15 movq 80(%rsp), %rcx # 8-byte Reload addq %rcx, %r12 addq %rcx, %rbp addq 264(%rsp), %rbx # 8-byte Folded Reload cmpq 272(%rsp), %r15 # 8-byte Folded Reload jne .LBB8_60 .LBB8_68: # %._crit_edge34.loopexit.i testb $1, %al movq 72(%rsp), %rbp # 8-byte Reload movq 104(%rsp), %r15 # 8-byte Reload movq 56(%rsp), %r13 # 8-byte Reload jne .LBB8_70 # %bb.69: movl $.Lstr, %edi callq puts@PLT .LBB8_70: # %_Z11checkResultPfS_ii.exit movq 88(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB8_71 # %bb.72: movq 32(%rsp), %rdi callq hipFree testl %eax, %eax jne .LBB8_73 # %bb.74: movq %rbp, %rdi callq free movq %r15, %rdi callq free movq %r13, %rdi callq free callq hipDeviceReset testl %eax, %eax jne .LBB8_75 # %bb.76: xorl %eax, %eax addq $1752, %rsp # imm = 0x6D8 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB8_1: .cfi_def_cfa_offset 1808 movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $126, %ecx jmp .LBB8_2 .LBB8_5: movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $129, %ecx jmp .LBB8_2 .LBB8_27: movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $179, %ecx jmp .LBB8_2 .LBB8_29: movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $180, %ecx jmp .LBB8_2 .LBB8_31: movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $183, %ecx jmp .LBB8_2 .LBB8_33: movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $186, %ecx jmp .LBB8_2 .LBB8_37: movl %eax, %r15d movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $191, %ecx xorl %eax, %eax callq fprintf movq stderr(%rip), %rbx movl %r15d, %edi callq hipGetErrorString movl $.L.str.6, %esi movq %rbx, %rdi movl %r15d, %edx jmp .LBB8_3 .LBB8_39: movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $196, %ecx jmp .LBB8_2 .LBB8_46: movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $205, %ecx jmp .LBB8_2 .LBB8_50: movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $212, %ecx jmp .LBB8_2 .LBB8_52: movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $217, %ecx jmp .LBB8_2 .LBB8_71: movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $226, %ecx jmp .LBB8_2 .LBB8_73: movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $227, %ecx jmp .LBB8_2 .LBB8_75: movq stderr(%rip), %rdi movl $.L.str.4, %esi movl $.L.str.5, %edx movl $233, %ecx .LBB8_2: movl %eax, %ebx xorl %eax, %eax callq fprintf movq stderr(%rip), %r14 movl %ebx, %edi callq hipGetErrorString movl $.L.str.6, %esi movq %r14, %rdi movl %ebx, %edx .LBB8_3: movq %rax, %rcx xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end8: .size main, .Lfunc_end8-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB9_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB9_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8justcopyPfS_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14naivetransposePfS_ii, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18lessnaivetransposePfS_ii, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12opttransposePfS_ii, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end9: .size __hip_module_ctor, .Lfunc_end9-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB10_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB10_2: retq .Lfunc_end10: .size __hip_module_dtor, .Lfunc_end10-__hip_module_dtor .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%3.0f " .size .L.str, 7 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n" .size .L.str.2, 80 .type _Z8justcopyPfS_ii,@object # @_Z8justcopyPfS_ii .section .rodata,"a",@progbits .globl _Z8justcopyPfS_ii .p2align 3, 0x0 _Z8justcopyPfS_ii: .quad _Z23__device_stub__justcopyPfS_ii .size _Z8justcopyPfS_ii, 8 .type _Z14naivetransposePfS_ii,@object # @_Z14naivetransposePfS_ii .globl _Z14naivetransposePfS_ii .p2align 3, 0x0 _Z14naivetransposePfS_ii: .quad _Z29__device_stub__naivetransposePfS_ii .size _Z14naivetransposePfS_ii, 8 .type _Z18lessnaivetransposePfS_ii,@object # @_Z18lessnaivetransposePfS_ii .globl _Z18lessnaivetransposePfS_ii .p2align 3, 0x0 _Z18lessnaivetransposePfS_ii: .quad _Z33__device_stub__lessnaivetransposePfS_ii .size _Z18lessnaivetransposePfS_ii, 8 .type _Z12opttransposePfS_ii,@object # @_Z12opttransposePfS_ii .globl _Z12opttransposePfS_ii .p2align 3, 0x0 _Z12opttransposePfS_ii: .quad _Z27__device_stub__opttransposePfS_ii .size _Z12opttransposePfS_ii, 8 .type .L.str.4,@object # @.str.4 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.4: .asciz "Error: %s:%d, " .size .L.str.4, 15 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/RhodesCS2016/gpgpu/master/glinklater/exam/junetranspose-opt.hip" .size .L.str.5, 121 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "code: %d, reason: %s\n" .size .L.str.6, 22 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "%s starting transpose at " .size .L.str.7, 26 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "device %d: %s " .size .L.str.8, 15 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz " with matrix nrows %d ncols %d\n" .size .L.str.9, 32 .type .L.str.10,@object # @.str.10 .L.str.10: .asciz "justcopy kernel elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n" .size .L.str.10, 95 .type .L.str.11,@object # @.str.11 .L.str.11: .asciz "optimised transpose elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n" .size .L.str.11, 99 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z8justcopyPfS_ii" .size .L__unnamed_1, 18 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z14naivetransposePfS_ii" .size .L__unnamed_2, 25 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z18lessnaivetransposePfS_ii" .size .L__unnamed_3, 29 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "_Z12opttransposePfS_ii" .size .L__unnamed_4, 23 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Arrays do not match.\n" .size .Lstr, 22 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z23__device_stub__justcopyPfS_ii .addrsig_sym _Z29__device_stub__naivetransposePfS_ii .addrsig_sym _Z33__device_stub__lessnaivetransposePfS_ii .addrsig_sym _Z27__device_stub__opttransposePfS_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z8justcopyPfS_ii .addrsig_sym _Z14naivetransposePfS_ii .addrsig_sym _Z18lessnaivetransposePfS_ii .addrsig_sym _Z12opttransposePfS_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z12opttransposePfS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */ /* 0x000e220000002500 */ /*0020*/ ISETP.NE.AND P1, PT, RZ, c[0x0][0x0], PT ; /* 0x00000000ff007a0c */ /* 0x000fe20003f25270 */ /*0030*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe40000000800 */ /*0040*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e620000002100 */ /*0050*/ ULDC UR5, c[0x0][0xc] ; /* 0x0000030000057ab9 */ /* 0x000fe40000000800 */ /*0060*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fe20000000a00 */ /*0070*/ S2R R8, SR_CTAID.Y ; /* 0x0000000000087919 */ /* 0x000ea20000002600 */ /*0080*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */ /* 0x000fc6000f8e023f */ /*0090*/ S2R R11, SR_TID.Y ; /* 0x00000000000b7919 */ /* 0x000ee40000002200 */ /*00a0*/ @!P1 BRA 0x190 ; /* 0x000000e000009947 */ /* 0x000fea0003800000 */ /*00b0*/ HFMA2.MMA R5, -RZ, RZ, 0, 0 ; /* 0x00000000ff057435 */ /* 0x000fe200000001ff */ /*00c0*/ IMAD R0, R8, c[0x0][0x0], R11 ; /* 0x0000000008007a24 */ /* 0x00cfe400078e020b */ /*00d0*/ IMAD R4, R7, c[0x0][0x0], R9 ; /* 0x0000000007047a24 */ /* 0x003fce00078e0209 */ /*00e0*/ IMAD.IADD R3, R0, 0x1, R5 ; /* 0x0000000100037824 */ /* 0x000fe200078e0205 */ /*00f0*/ MOV R2, 0x4 ; /* 0x0000000400027802 */ /* 0x001fc60000000f00 */ /*0100*/ IMAD R3, R3, UR4, R4 ; /* 0x0000000403037c24 */ /* 0x000fc8000f8e0204 */ /*0110*/ IMAD.WIDE R2, R3, R2, c[0x0][0x168] ; /* 0x00005a0003027625 */ /* 0x000fcc00078e0202 */ /*0120*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */ /* 0x000ea2000c1e1900 */ /*0130*/ IMAD.IADD R6, R11, 0x1, R5 ; /* 0x000000010b067824 */ /* 0x000fe200078e0205 */ /*0140*/ IADD3 R5, R5, c[0x0][0x4], RZ ; /* 0x0000010005057a10 */ /* 0x000fc80007ffe0ff */ /*0150*/ LEA R13, R6, R9, 0x5 ; /* 0x00000009060d7211 */ /* 0x000fe400078e28ff */ /*0160*/ ISETP.GE.U32.AND P0, PT, R5, c[0x0][0x0], PT ; /* 0x0000000005007a0c */ /* 0x000fc60003f06070 */ /*0170*/ STS [R13.X4], R2 ; /* 0x000000020d007388 */ /* 0x0041f40000004800 */ /*0180*/ @!P0 BRA 0xe0 ; /* 0xffffff5000008947 */ /* 0x000fea000383ffff */ /*0190*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*01a0*/ @!P1 EXIT ; /* 0x000000000000994d */ /* 0x000fea0003800000 */ /*01b0*/ IMAD R0, R8, c[0x0][0x0], R9 ; /* 0x0000000008007a24 */ /* 0x006fe200078e0209 */ /*01c0*/ MOV R4, RZ ; /* 0x000000ff00047202 */ /* 0x000fe20000000f00 */ /*01d0*/ IMAD.MOV.U32 R6, RZ, RZ, 0x4 ; /* 0x00000004ff067424 */ /* 0x000fc600078e00ff */ /*01e0*/ IADD3 R2, R11, R4, RZ ; /* 0x000000040b027210 */ /* 0x009fe40007ffe0ff */ /*01f0*/ IADD3 R4, R4, c[0x0][0x4], RZ ; /* 0x0000010004047a10 */ /* 0x000fc40007ffe0ff */ /*0200*/ LEA R5, R9, R2, 0x5 ; /* 0x0000000209057211 */ /* 0x000fe200078e28ff */ /*0210*/ IMAD R3, R7, c[0x0][0x0], R2 ; /* 0x0000000007037a24 */ /* 0x000fe200078e0202 */ /*0220*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x0], PT ; /* 0x0000000004007a0c */ /* 0x000fc60003f06070 */ /*0230*/ IMAD R3, R3, UR4, R0 ; /* 0x0000000403037c24 */ /* 0x000fe2000f8e0200 */ /*0240*/ LDS R5, [R5.X4] ; /* 0x0000000005057984 */ /* 0x000e260000004800 */ /*0250*/ IMAD.WIDE R2, R3, R6, c[0x0][0x160] ; /* 0x0000580003027625 */ /* 0x000fca00078e0206 */ /*0260*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x0011e2000c101906 */ /*0270*/ @!P0 BRA 0x1e0 ; /* 0xffffff6000008947 */ /* 0x000fea000383ffff */ /*0280*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0290*/ BRA 0x290; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z18lessnaivetransposePfS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x0], PT ; /* 0x00000000ff007a0c */ /* 0x000fc60003f05270 */ /*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e740000002100 */ /*0040*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0050*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */ /* 0x000ea20000002200 */ /*0060*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe20000000800 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x003fe200078e0203 */ /*0080*/ ULDC UR5, c[0x0][0xc] ; /* 0x0000030000057ab9 */ /* 0x000fe20000000800 */ /*0090*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000ea20000002600 */ /*00a0*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */ /* 0x000fe2000f8e023f */ /*00b0*/ HFMA2.MMA R8, -RZ, RZ, 0, 0 ; /* 0x00000000ff087435 */ /* 0x000fe200000001ff */ /*00c0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fc80000000a00 */ /*00d0*/ IMAD R6, R0, UR4, RZ ; /* 0x0000000400067c24 */ /* 0x000fe4000f8e02ff */ /*00e0*/ IMAD R7, R2, c[0x0][0x0], R7 ; /* 0x0000000002077a24 */ /* 0x004fca00078e0207 */ /*00f0*/ IADD3 R5, R7, R8, RZ ; /* 0x0000000807057210 */ /* 0x001fe40007ffe0ff */ /*0100*/ MOV R4, 0x4 ; /* 0x0000000400047802 */ /* 0x000fc60000000f00 */ /*0110*/ IMAD R2, R5, UR4, R0 ; /* 0x0000000405027c24 */ /* 0x000fc8000f8e0200 */ /*0120*/ IMAD.WIDE.U32 R2, R2, R4, c[0x0][0x168] ; /* 0x00005a0002027625 */ /* 0x000fcc00078e0004 */ /*0130*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */ /* 0x000ea2000c1e1900 */ /*0140*/ IADD3 R8, R8, c[0x0][0x4], RZ ; /* 0x0000010008087a10 */ /* 0x000fe40007ffe0ff */ /*0150*/ IADD3 R5, R6, R5, RZ ; /* 0x0000000506057210 */ /* 0x000fe40007ffe0ff */ /*0160*/ ISETP.GE.U32.AND P0, PT, R8, c[0x0][0x0], PT ; /* 0x0000000008007a0c */ /* 0x000fc60003f06070 */ /*0170*/ IMAD.WIDE.U32 R4, R5, R4, c[0x0][0x160] ; /* 0x0000580005047625 */ /* 0x000fca00078e0004 */ /*0180*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x0041ea000c101906 */ /*0190*/ @!P0 BRA 0xf0 ; /* 0xffffff5000008947 */ /* 0x000fea000383ffff */ /*01a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01b0*/ BRA 0x1b0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z14naivetransposePfS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */ /* 0x000e280000002600 */ /*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R5, R5, c[0x0][0x4], R2 ; /* 0x0000010005057a24 */ /* 0x001fca00078e0202 */ /*0060*/ ISETP.GE.U32.AND P0, PT, R5, c[0x0][0x174], PT ; /* 0x00005d0005007a0c */ /* 0x000fe20003f06070 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0203 */ /*0080*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */ /* 0x000fda0000706470 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */ /* 0x000fe200000001ff */ /*00b0*/ IMAD R2, R5, c[0x0][0x170], R0 ; /* 0x00005c0005027a24 */ /* 0x000fe200078e0200 */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE.U32 R2, R2, R4, c[0x0][0x168] ; /* 0x00005a0002027625 */ /* 0x000fcc00078e0004 */ /*00e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ IMAD R5, R0, c[0x0][0x174], R5 ; /* 0x00005d0000057a24 */ /* 0x000fc800078e0205 */ /*0100*/ IMAD.WIDE.U32 R4, R5, R4, c[0x0][0x160] ; /* 0x0000580005047625 */ /* 0x000fca00078e0004 */ /*0110*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe2000c101904 */ /*0120*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0130*/ BRA 0x130; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z8justcopyPfS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002600 */ /*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */ /* 0x001fca00078e0202 */ /*0060*/ ISETP.GE.U32.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */ /* 0x000fe20003f06070 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0205 */ /*0080*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */ /* 0x000fda0000706470 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*00b0*/ IMAD R0, R0, c[0x0][0x174], R3 ; /* 0x00005d0000007a24 */ /* 0x000fe200078e0203 */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x168] ; /* 0x00005a0000027625 */ /* 0x000fcc00078e0005 */ /*00e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ IMAD.WIDE.U32 R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */ /* 0x000fca00078e0005 */ /*0100*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe2000c101904 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z8justcopyPfS_ii .globl _Z8justcopyPfS_ii .p2align 8 .type _Z8justcopyPfS_ii,@function _Z8justcopyPfS_ii: s_clause 0x1 s_load_b32 s4, s[0:1], 0x24 s_load_b64 s[2:3], s[0:1], 0x10 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s5, s4, 0xffff s_lshr_b32 s4, s4, 16 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[0:1], null, s14, s5, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s4, v[3:4] v_cmp_gt_u32_e32 vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_u32_e64 s2, s3, v1 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s4, s2 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[2:3], null, v0, s3, v[1:2] v_mov_b32_e32 v3, 0 v_lshlrev_b64 v[0:1], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo global_load_b32 v2, v[2:3], off s_waitcnt vmcnt(0) global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z8justcopyPfS_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z8justcopyPfS_ii, .Lfunc_end0-_Z8justcopyPfS_ii .section .AMDGPU.csdata,"",@progbits .text .protected _Z14naivetransposePfS_ii .globl _Z14naivetransposePfS_ii .p2align 8 .type _Z14naivetransposePfS_ii,@function _Z14naivetransposePfS_ii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b64 s[4:5], s[0:1], 0x10 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4] v_cmp_gt_u32_e32 vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_u32_e64 s2, s5, v1 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB1_2 s_load_b128 s[0:3], s[0:1], 0x0 v_mad_u64_u32 v[2:3], null, v1, s4, v[0:1] v_mov_b32_e32 v3, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[2:3] s_waitcnt lgkmcnt(0) v_add_co_u32 v4, vcc_lo, s2, v4 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1) v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo global_load_b32 v2, v[4:5], off s_waitcnt vmcnt(0) v_mad_u64_u32 v[4:5], null, v0, s5, v[1:2] v_mov_b32_e32 v5, v3 v_lshlrev_b64 v[0:1], 2, v[4:5] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v2, off .LBB1_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14naivetransposePfS_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z14naivetransposePfS_ii, .Lfunc_end1-_Z14naivetransposePfS_ii .section .AMDGPU.csdata,"",@progbits .text .protected _Z18lessnaivetransposePfS_ii .globl _Z18lessnaivetransposePfS_ii .p2align 8 .type _Z18lessnaivetransposePfS_ii,@function _Z18lessnaivetransposePfS_ii: s_load_b32 s6, s[0:1], 0x24 s_add_u32 s2, s0, 24 s_addc_u32 s3, s1, 0 s_waitcnt lgkmcnt(0) v_cmp_eq_u16_e64 s5, s6, 0 s_delay_alu instid0(VALU_DEP_1) s_and_b32 vcc_lo, exec_lo, s5 s_mov_b32 s5, 0 s_cbranch_vccnz .LBB2_3 s_clause 0x1 s_load_b32 s7, s[2:3], 0x0 s_load_b32 s8, s[2:3], 0xc v_and_b32_e32 v3, 0x3ff, v0 v_bfe_u32 v2, v0, 10, 10 s_and_b32 s6, 0xffff, s6 s_mov_b32 s4, s15 s_load_b128 s[0:3], s[0:1], 0x0 v_mad_u64_u32 v[0:1], null, s14, s6, v[3:4] v_mad_u64_u32 v[4:5], null, s4, s6, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mad_u64_u32 v[5:6], null, s7, v0, s[4:5] v_mad_u64_u32 v[6:7], null, s7, v4, s[14:15] s_lshr_b32 s4, s8, 16 s_mul_i32 s7, s7, s6 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2) s_mul_i32 s7, s7, s4 v_mad_u64_u32 v[0:1], null, v5, s6, v[2:3] s_delay_alu instid0(VALU_DEP_2) v_mad_u64_u32 v[1:2], null, v6, s6, v[3:4] v_mov_b32_e32 v2, 0 .p2align 6 .LBB2_2: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[3:4], 2, v[1:2] v_add_nc_u32_e32 v1, s7, v1 v_add_co_u32 v3, vcc_lo, s2, v3 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo global_load_b32 v5, v[3:4], off v_dual_mov_b32 v4, v2 :: v_dual_add_nc_u32 v3, s5, v0 s_add_i32 s5, s5, s4 s_cmp_ge_u32 s5, s6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 2, v[3:4] v_add_co_u32 v3, vcc_lo, s0, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[3:4], v5, off s_cbranch_scc0 .LBB2_2 .LBB2_3: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18lessnaivetransposePfS_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z18lessnaivetransposePfS_ii, .Lfunc_end2-_Z18lessnaivetransposePfS_ii .section .AMDGPU.csdata,"",@progbits .text .protected _Z12opttransposePfS_ii .globl _Z12opttransposePfS_ii .p2align 8 .type _Z12opttransposePfS_ii,@function _Z12opttransposePfS_ii: s_clause 0x1 s_load_b32 s3, s[0:1], 0x24 s_load_b32 s8, s[0:1], 0x18 s_add_u32 s4, s0, 24 v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_addc_u32 s5, s1, 0 s_mov_b32 s2, s15 s_mov_b32 s10, 0 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) s_cmp_eq_u32 s3, 0 s_mul_i32 s9, s8, s3 s_cbranch_scc1 .LBB3_3 s_load_b32 s11, s[4:5], 0xc v_mad_u64_u32 v[2:3], null, s2, s3, v[0:1] s_load_b64 s[6:7], s[0:1], 0x8 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[4:5], null, s8, v2, s[14:15] v_lshlrev_b32_e32 v5, 2, v1 v_mad_u64_u32 v[2:3], null, v4, s3, v[1:2] s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_lshl_add_u32 v4, v0, 7, v5 s_waitcnt lgkmcnt(0) s_lshr_b32 s11, s11, 16 s_lshl_b32 s12, s11, 7 s_mul_i32 s13, s9, s11 .LBB3_2: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_ashrrev_i32_e32 v3, 31, v2 s_add_i32 s10, s10, s11 s_cmp_ge_u32 s10, s3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[5:6], 2, v[2:3] v_add_nc_u32_e32 v2, s13, v2 v_add_co_u32 v5, vcc_lo, s6, v5 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v6, vcc_lo, s7, v6, vcc_lo global_load_b32 v3, v[5:6], off s_waitcnt vmcnt(0) ds_store_b32 v4, v3 v_add_nc_u32_e32 v4, s12, v4 s_cbranch_scc0 .LBB3_2 .LBB3_3: s_cmp_eq_u32 s3, 0 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB3_6 s_load_b32 s4, s[4:5], 0xc v_mad_u64_u32 v[2:3], null, s14, s3, v[0:1] s_load_b64 s[0:1], s[0:1], 0x0 v_lshlrev_b32_e32 v0, 2, v0 s_mov_b32 s5, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshl_add_u32 v0, v1, 7, v0 v_mad_u64_u32 v[4:5], null, s8, v2, s[2:3] s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_mad_u64_u32 v[2:3], null, v4, s3, v[1:2] s_waitcnt lgkmcnt(0) s_lshr_b32 s2, s4, 16 s_mul_i32 s9, s9, s2 s_lshl_b32 s4, s2, 2 .LBB3_5: ds_load_b32 v1, v0 v_ashrrev_i32_e32 v3, 31, v2 v_add_nc_u32_e32 v0, s4, v0 s_add_i32 s5, s5, s2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2) s_cmp_ge_u32 s5, s3 v_lshlrev_b64 v[3:4], 2, v[2:3] v_add_nc_u32_e32 v2, s9, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v3, vcc_lo, s0, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo s_waitcnt lgkmcnt(0) global_store_b32 v[3:4], v1, off s_cbranch_scc0 .LBB3_5 .LBB3_6: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12opttransposePfS_ii .amdhsa_group_segment_fixed_size 4096 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end3: .size _Z12opttransposePfS_ii, .Lfunc_end3-_Z12opttransposePfS_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z8justcopyPfS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z8justcopyPfS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14naivetransposePfS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14naivetransposePfS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18lessnaivetransposePfS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18lessnaivetransposePfS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 4096 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12opttransposePfS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12opttransposePfS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> // CUDA Kernel __global__ void matrixMul( float* C, float* A, float* B, int TM) { float cc; int k; // calcul des coordonnees du thread int i = blockIdx.x; int j = threadIdx.x; cc = 0.; // calcul de c[i][j] for(k=0;k<TM;k++){ cc += A[i*TM+k] * B[k*TM+j]; } C[i*TM+j] = cc; } ///////////////////////////////////////////////////////// // Programme main ///////////////////////////////////////////////////////// int main(int argc, char** argv) { int i, j, GRID_SIZE_X, BLOCK_SIZE_X; int TM=1024; int size = TM*TM*sizeof(float); cudaError_t cerror; if (argc>1) { TM=atoi(argv[1]); } BLOCK_SIZE_X = TM; GRID_SIZE_X = TM; // definiton de la grille et des blocs dim3 block(BLOCK_SIZE_X); dim3 grid(GRID_SIZE_X); // allocation des matrices sur CPU float *h_A, *h_B, *h_C ; h_A = (float*) malloc(size); h_B = (float*) malloc(size); h_C = (float*) malloc(size); // initialisation des matrices avec des valeurs permettant de verifier le resultat for(i = 0; i < TM; i++){ for(j = 0; j < TM; j++){ h_A[i*TM+j] = 1.0; h_B[i*TM+j] = 1.0; h_C[i*TM+j] = 0.0; if (i==j) { h_A[i*TM+j]=(float) (i+1); h_B[i*TM+j]=(float) (i+1); } } } // Pour mesurer le temps de calcul float tc, tt_cpuGpu, tt_gpuCpu; cudaEvent_t depart, arret,cpuGpuDebut, cpuGpuFin, gpuCpuDebut, gpuCpuFin; cudaEventCreate(&depart); cudaEventCreate(&arret); cudaEventCreate(&cpuGpuDebut); cudaEventCreate(&cpuGpuFin); cudaEventCreate(&gpuCpuDebut); cudaEventCreate(&gpuCpuFin); cudaEventRecord(depart,0); cudaEventRecord(cpuGpuDebut,0); cudaEventRecord(gpuCpuDebut,0); // allocation des matrices sur GPU float *d_A, *d_B, *d_C; cudaMalloc((void**) &d_A,size); cudaMalloc((void**) &d_B,size); cudaMalloc((void**) &d_C,size); // copie des matrives A et B depuis le CPU vers le GPU cudaMemcpy(d_A, h_A, size,cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size,cudaMemcpyHostToDevice); // Mesure du temps : transfert + calcul cudaEventRecord(cpuGpuFin,0); cudaEventSynchronize(cpuGpuFin); cudaEventElapsedTime(&tt_cpuGpu,cpuGpuDebut, cpuGpuFin); cudaEventDestroy(cpuGpuDebut); cudaEventDestroy(cpuGpuFin); printf("Transfert A,B de CPU vers GPU : %f seconde\n", tt_cpuGpu/1000.0); // lancement des threads matrixMul<<< grid, block >>>(d_C, d_A, d_B, TM); // Vérification en cas d'erreur cerror=cudaGetLastError(); if ((int)cerror !=0) { printf("Erreur appel kernel %d \n", (int) cerror); exit(cerror); } // copie de la matrive C depuis le GPU cudaMemcpy(h_C,d_C, size,cudaMemcpyDeviceToHost); // Mesure du temps : transfert + calcul cudaEventRecord(gpuCpuFin,0); cudaEventSynchronize(gpuCpuFin); cudaEventElapsedTime(&tt_gpuCpu,gpuCpuDebut, gpuCpuFin); cudaEventDestroy(gpuCpuDebut); cudaEventDestroy(gpuCpuFin); printf("Transfert C de GPU vers CPU : %f seconde\n", tt_gpuCpu/1000.0); // Mesure du temps : transfert + calcul cudaEventRecord(arret,0); cudaEventSynchronize(arret); cudaEventElapsedTime(&tc,depart, arret); cudaEventDestroy(depart); cudaEventDestroy(arret); printf("Temps consommé : %f seconde\n", tc/1000.0); // verification du resultat for(i = 0; i < TM; i++){ for(j = 0; j < TM; j++){ if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) { printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1); } else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))) { printf("Erreur i: %d j: %d\n", i, j); exit(1); } } } // liberation de la memoire free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
code for sm_80 Function : _Z9matrixMulPfS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */ /* 0x000e220000002500 */ /*0020*/ MOV R2, c[0x0][0x178] ; /* 0x00005e0000027a02 */ /* 0x000fe20000000f00 */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0040*/ HFMA2.MMA R21, -RZ, RZ, 0, 0 ; /* 0x00000000ff157435 */ /* 0x000fe200000001ff */ /*0050*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e620000002100 */ /*0060*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */ /* 0x000fe20003f06270 */ /*0070*/ IMAD R4, R4, c[0x0][0x178], RZ ; /* 0x00005e0004047a24 */ /* 0x001fd800078e02ff */ /*0080*/ @!P0 BRA 0xb70 ; /* 0x00000ae000008947 */ /* 0x000fea0003800000 */ /*0090*/ IADD3 R3, R2.reuse, -0x1, RZ ; /* 0xffffffff02037810 */ /* 0x040fe40007ffe0ff */ /*00a0*/ LOP3.LUT R5, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302057812 */ /* 0x000fe400078ec0ff */ /*00b0*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */ /* 0x000fe40003f06070 */ /*00c0*/ MOV R21, RZ ; /* 0x000000ff00157202 */ /* 0x000fe40000000f00 */ /*00d0*/ MOV R3, RZ ; /* 0x000000ff00037202 */ /* 0x000fd20000000f00 */ /*00e0*/ @!P0 BRA 0xa70 ; /* 0x0000098000008947 */ /* 0x000fea0003800000 */ /*00f0*/ IADD3 R6, -R5, c[0x0][0x178], RZ ; /* 0x00005e0005067a10 */ /* 0x000fe20007ffe1ff */ /*0100*/ HFMA2.MMA R19, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff137435 */ /* 0x000fe200000001ff */ /*0110*/ MOV R21, RZ ; /* 0x000000ff00157202 */ /* 0x000fe20000000f00 */ /*0120*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */ /* 0x000fe200000001ff */ /*0130*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe40003f04270 */ /*0140*/ MOV R8, c[0x0][0x168] ; /* 0x00005a0000087a02 */ /* 0x000fe40000000f00 */ /*0150*/ MOV R7, c[0x0][0x16c] ; /* 0x00005b0000077a02 */ /* 0x000fc60000000f00 */ /*0160*/ IMAD.WIDE R18, R0, R19, c[0x0][0x170] ; /* 0x00005c0000127625 */ /* 0x002fcc00078e0213 */ /*0170*/ @!P0 BRA 0x8f0 ; /* 0x0000077000008947 */ /* 0x000fea0003800000 */ /*0180*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */ /* 0x000fe40003f24270 */ /*0190*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*01a0*/ @!P1 BRA 0x640 ; /* 0x0000049000009947 */ /* 0x000fea0003800000 */ /*01b0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*01c0*/ MOV R9, R7 ; /* 0x0000000700097202 */ /* 0x000fe20000000f00 */ /*01d0*/ LDG.E R10, [R18.64] ; /* 0x00000004120a7981 */ /* 0x0000a8000c1e1900 */ /*01e0*/ IMAD.WIDE R12, R4, 0x4, R8 ; /* 0x00000004040c7825 */ /* 0x000fc800078e0208 */ /*01f0*/ IMAD.WIDE R22, R2.reuse, 0x4, R18 ; /* 0x0000000402167825 */ /* 0x040fe200078e0212 */ /*0200*/ LDG.E R25, [R12.64] ; /* 0x000000040c197981 */ /* 0x000ea8000c1e1900 */ /*0210*/ LDG.E R24, [R22.64] ; /* 0x0000000416187981 */ /* 0x0002e8000c1e1900 */ /*0220*/ LDG.E R11, [R12.64+0x4] ; /* 0x000004040c0b7981 */ /* 0x000ee8000c1e1900 */ /*0230*/ LDG.E R14, [R12.64+0x8] ; /* 0x000008040c0e7981 */ /* 0x000f22000c1e1900 */ /*0240*/ IMAD.WIDE R22, R2, 0x4, R22 ; /* 0x0000000402167825 */ /* 0x002fc600078e0216 */ /*0250*/ LDG.E R19, [R12.64+0x10] ; /* 0x000010040c137981 */ /* 0x001f68000c1e1900 */ /*0260*/ LDG.E R15, [R22.64] ; /* 0x00000004160f7981 */ /* 0x000122000c1e1900 */ /*0270*/ IMAD.WIDE R16, R2, 0x4, R22 ; /* 0x0000000402107825 */ /* 0x000fc600078e0216 */ /*0280*/ LDG.E R9, [R12.64+0x14] ; /* 0x000014040c097981 */ /* 0x000f66000c1e1900 */ /*0290*/ IMAD.WIDE R26, R2.reuse, 0x4, R16 ; /* 0x00000004021a7825 */ /* 0x040fe400078e0210 */ /*02a0*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */ /* 0x000368000c1e1900 */ /*02b0*/ IMAD.WIDE R28, R2.reuse, 0x4, R26 ; /* 0x00000004021c7825 */ /* 0x040fe200078e021a */ /*02c0*/ LDG.E R18, [R26.64] ; /* 0x000000041a127981 */ /* 0x000168000c1e1900 */ /*02d0*/ LDG.E R20, [R28.64] ; /* 0x000000041c147981 */ /* 0x000968000c1e1900 */ /*02e0*/ LDG.E R17, [R12.64+0xc] ; /* 0x00000c040c117981 */ /* 0x002f62000c1e1900 */ /*02f0*/ IMAD.WIDE R22, R2, 0x4, R28 ; /* 0x0000000402167825 */ /* 0x001fc600078e021c */ /*0300*/ LDG.E R26, [R12.64+0x1c] ; /* 0x00001c040c1a7981 */ /* 0x000f62000c1e1900 */ /*0310*/ FFMA R25, R10, R25, R21 ; /* 0x000000190a197223 */ /* 0x004fc60000000015 */ /*0320*/ LDG.E R10, [R22.64] ; /* 0x00000004160a7981 */ /* 0x0000a8000c1e1900 */ /*0330*/ LDG.E R21, [R12.64+0x18] ; /* 0x000018040c157981 */ /* 0x000ea2000c1e1900 */ /*0340*/ IMAD.WIDE R22, R2, 0x4, R22 ; /* 0x0000000402167825 */ /* 0x001fc800078e0216 */ /*0350*/ FFMA R27, R24, R11, R25 ; /* 0x0000000b181b7223 */ /* 0x008fe40000000019 */ /*0360*/ IMAD.WIDE R24, R2.reuse, 0x4, R22 ; /* 0x0000000402187825 */ /* 0x040fe200078e0216 */ /*0370*/ LDG.E R11, [R22.64] ; /* 0x00000004160b7981 */ /* 0x0000e6000c1e1900 */ /*0380*/ FFMA R29, R15, R14, R27 ; /* 0x0000000e0f1d7223 */ /* 0x010fe2000000001b */ /*0390*/ LDG.E R28, [R24.64] ; /* 0x00000004181c7981 */ /* 0x000328000c1e1900 */ /*03a0*/ LDG.E R27, [R12.64+0x20] ; /* 0x000020040c1b7981 */ /* 0x000f22000c1e1900 */ /*03b0*/ IMAD.WIDE R14, R2, 0x4, R24 ; /* 0x00000004020e7825 */ /* 0x000fc600078e0218 */ /*03c0*/ LDG.E R25, [R12.64+0x28] ; /* 0x000028040c197981 */ /* 0x002f22000c1e1900 */ /*03d0*/ FFMA R29, R16, R17, R29 ; /* 0x00000011101d7223 */ /* 0x020fe4000000001d */ /*03e0*/ IMAD.WIDE R16, R2, 0x4, R14 ; /* 0x0000000402107825 */ /* 0x000fe400078e020e */ /*03f0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x000364000c1e1900 */ /*0400*/ FFMA R29, R18, R19, R29 ; /* 0x00000013121d7223 */ /* 0x000fe4000000001d */ /*0410*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */ /* 0x000fe400078e0210 */ /*0420*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */ /* 0x000364000c1e1900 */ /*0430*/ FFMA R20, R20, R9, R29 ; /* 0x0000000914147223 */ /* 0x000fc4000000001d */ /*0440*/ LDG.E R9, [R12.64+0x24] ; /* 0x000024040c097981 */ /* 0x000f62000c1e1900 */ /*0450*/ IMAD.WIDE R22, R2, 0x4, R18 ; /* 0x0000000402167825 */ /* 0x001fc600078e0212 */ /*0460*/ LDG.E R24, [R18.64] ; /* 0x0000000412187981 */ /* 0x000768000c1e1900 */ /*0470*/ LDG.E R15, [R12.64+0x2c] ; /* 0x00002c040c0f7981 */ /* 0x002f68000c1e1900 */ /*0480*/ LDG.E R29, [R22.64] ; /* 0x00000004161d7981 */ /* 0x000162000c1e1900 */ /*0490*/ FFMA R10, R10, R21, R20 ; /* 0x000000150a0a7223 */ /* 0x004fe40000000014 */ /*04a0*/ IMAD.WIDE R20, R2, 0x4, R22 ; /* 0x0000000402147825 */ /* 0x000fc400078e0216 */ /*04b0*/ LDG.E R22, [R12.64+0x38] ; /* 0x000038040c167981 */ /* 0x001ea8000c1e1900 */ /*04c0*/ LDG.E R17, [R20.64] ; /* 0x0000000414117981 */ /* 0x0008a2000c1e1900 */ /*04d0*/ FFMA R18, R11, R26, R10 ; /* 0x0000001a0b127223 */ /* 0x008fc6000000000a */ /*04e0*/ LDG.E R26, [R12.64+0x30] ; /* 0x000030040c1a7981 */ /* 0x000ee2000c1e1900 */ /*04f0*/ IMAD.WIDE R10, R2, 0x4, R20 ; /* 0x00000004020a7825 */ /* 0x000fc800078e0214 */ /*0500*/ FFMA R20, R28, R27, R18 ; /* 0x0000001b1c147223 */ /* 0x010fe40000000012 */ /*0510*/ LDG.E R28, [R12.64+0x34] ; /* 0x000034040c1c7981 */ /* 0x000ea2000c1e1900 */ /*0520*/ IMAD.WIDE R18, R2, 0x4, R10 ; /* 0x0000000402127825 */ /* 0x000fc600078e020a */ /*0530*/ LDG.E R27, [R10.64] ; /* 0x000000040a1b7981 */ /* 0x000128000c1e1900 */ /*0540*/ LDG.E R23, [R18.64] ; /* 0x0000000412177981 */ /* 0x000328000c1e1900 */ /*0550*/ LDG.E R10, [R12.64+0x3c] ; /* 0x00003c040c0a7981 */ /* 0x001f22000c1e1900 */ /*0560*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */ /* 0x000fe20007ffe0ff */ /*0570*/ FFMA R9, R14, R9, R20 ; /* 0x000000090e097223 */ /* 0x020fc80000000014 */ /*0580*/ FFMA R9, R16, R25, R9 ; /* 0x0000001910097223 */ /* 0x000fe20000000009 */ /*0590*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */ /* 0x000fc60003f24270 */ /*05a0*/ FFMA R9, R24, R15, R9 ; /* 0x0000000f18097223 */ /* 0x000fe20000000009 */ /*05b0*/ IADD3 R8, P2, R8, 0x40, RZ ; /* 0x0000004008087810 */ /* 0x000fe20007f5e0ff */ /*05c0*/ IMAD.WIDE R18, R2, 0x4, R18 ; /* 0x0000000402127825 */ /* 0x002fe200078e0212 */ /*05d0*/ IADD3 R3, R3, 0x10, RZ ; /* 0x0000001003037810 */ /* 0x000fe40007ffe0ff */ /*05e0*/ IADD3.X R7, RZ, R7, RZ, P2, !PT ; /* 0x00000007ff077210 */ /* 0x000fe200017fe4ff */ /*05f0*/ FFMA R9, R29, R26, R9 ; /* 0x0000001a1d097223 */ /* 0x008fc80000000009 */ /*0600*/ FFMA R9, R17, R28, R9 ; /* 0x0000001c11097223 */ /* 0x004fc80000000009 */ /*0610*/ FFMA R9, R27, R22, R9 ; /* 0x000000161b097223 */ /* 0x010fc80000000009 */ /*0620*/ FFMA R21, R23, R10, R9 ; /* 0x0000000a17157223 */ /* 0x000fe20000000009 */ /*0630*/ @P1 BRA 0x1c0 ; /* 0xfffffb8000001947 */ /* 0x000fea000383ffff */ /*0640*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */ /* 0x000fda0003f24270 */ /*0650*/ @!P1 BRA 0x8d0 ; /* 0x0000027000009947 */ /* 0x000fea0003800000 */ /*0660*/ MOV R9, R7 ; /* 0x0000000700097202 */ /* 0x000fe20000000f00 */ /*0670*/ LDG.E R20, [R18.64] ; /* 0x0000000412147981 */ /* 0x0000a8000c1e1900 */ /*0680*/ IMAD.WIDE R10, R4, 0x4, R8 ; /* 0x00000004040a7825 */ /* 0x000fca00078e0208 */ /*0690*/ LDG.E R9, [R10.64] ; /* 0x000000040a097981 */ /* 0x000ea2000c1e1900 */ /*06a0*/ IMAD.WIDE R22, R2, 0x4, R18 ; /* 0x0000000402167825 */ /* 0x000fc600078e0212 */ /*06b0*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */ /* 0x000ee6000c1e1900 */ /*06c0*/ IMAD.WIDE R12, R2.reuse, 0x4, R22 ; /* 0x00000004020c7825 */ /* 0x040fe200078e0216 */ /*06d0*/ LDG.E R24, [R22.64] ; /* 0x0000000416187981 */ /* 0x0002e8000c1e1900 */ /*06e0*/ LDG.E R26, [R12.64] ; /* 0x000000040c1a7981 */ /* 0x000962000c1e1900 */ /*06f0*/ IMAD.WIDE R14, R2, 0x4, R12 ; /* 0x00000004020e7825 */ /* 0x000fc600078e020c */ /*0700*/ LDG.E R27, [R10.64+0x8] ; /* 0x000008040a1b7981 */ /* 0x000f66000c1e1900 */ /*0710*/ IMAD.WIDE R16, R2.reuse, 0x4, R14 ; /* 0x0000000402107825 */ /* 0x040fe200078e020e */ /*0720*/ LDG.E R29, [R10.64+0xc] ; /* 0x00000c040a1d7981 */ /* 0x000f68000c1e1900 */ /*0730*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x000962000c1e1900 */ /*0740*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */ /* 0x001fc600078e0210 */ /*0750*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */ /* 0x000166000c1e1900 */ /*0760*/ IMAD.WIDE R22, R2.reuse, 0x4, R18 ; /* 0x0000000402167825 */ /* 0x042fe200078e0212 */ /*0770*/ LDG.E R28, [R18.64] ; /* 0x00000004121c7981 */ /* 0x000368000c1e1900 */ /*0780*/ LDG.E R15, [R10.64+0x10] ; /* 0x000010040a0f7981 */ /* 0x010f22000c1e1900 */ /*0790*/ IMAD.WIDE R12, R2, 0x4, R22 ; /* 0x00000004020c7825 */ /* 0x000fc600078e0216 */ /*07a0*/ LDG.E R18, [R10.64+0x1c] ; /* 0x00001c040a127981 */ /* 0x002f28000c1e1900 */ /*07b0*/ LDG.E R17, [R12.64] ; /* 0x000000040c117981 */ /* 0x001f22000c1e1900 */ /*07c0*/ FFMA R19, R20, R9, R21 ; /* 0x0000000914137223 */ /* 0x004fc60000000015 */ /*07d0*/ LDG.E R21, [R10.64+0x14] ; /* 0x000014040a157981 */ /* 0x000ea8000c1e1900 */ /*07e0*/ LDG.E R9, [R22.64] ; /* 0x0000000416097981 */ /* 0x000ea8000c1e1900 */ /*07f0*/ LDG.E R20, [R10.64+0x18] ; /* 0x000018040a147981 */ /* 0x000ea2000c1e1900 */ /*0800*/ FFMA R24, R24, R25, R19 ; /* 0x0000001918187223 */ /* 0x008fc80000000013 */ /*0810*/ FFMA R24, R26, R27, R24 ; /* 0x0000001b1a187223 */ /* 0x020fc80000000018 */ /*0820*/ FFMA R14, R14, R29, R24 ; /* 0x0000001d0e0e7223 */ /* 0x000fe20000000018 */ /*0830*/ IADD3 R8, P1, R8, 0x20, RZ ; /* 0x0000002008087810 */ /* 0x000fc60007f3e0ff */ /*0840*/ FFMA R14, R16, R15, R14 ; /* 0x0000000f100e7223 */ /* 0x010fe2000000000e */ /*0850*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0860*/ IADD3 R3, R3, 0x8, RZ ; /* 0x0000000803037810 */ /* 0x000fe40007ffe0ff */ /*0870*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */ /* 0x000fe40007ffe0ff */ /*0880*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */ /* 0x000fe20000ffe4ff */ /*0890*/ FFMA R14, R28, R21, R14 ; /* 0x000000151c0e7223 */ /* 0x004fc8000000000e */ /*08a0*/ FFMA R9, R9, R20, R14 ; /* 0x0000001409097223 */ /* 0x000fc8000000000e */ /*08b0*/ FFMA R21, R17, R18, R9 ; /* 0x0000001211157223 */ /* 0x000fe40000000009 */ /*08c0*/ IMAD.WIDE R18, R2, 0x4, R12 ; /* 0x0000000402127825 */ /* 0x000fc800078e020c */ /*08d0*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */ /* 0x000fda0000705670 */ /*08e0*/ @!P0 BRA 0xa70 ; /* 0x0000018000008947 */ /* 0x000fea0003800000 */ /*08f0*/ MOV R9, R7 ; /* 0x0000000700097202 */ /* 0x000fe20000000f00 */ /*0900*/ IMAD.WIDE R12, R2, 0x4, R18 ; /* 0x00000004020c7825 */ /* 0x000fe400078e0212 */ /*0910*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */ /* 0x000ea4000c1e1900 */ /*0920*/ IMAD.WIDE R10, R4, 0x4, R8 ; /* 0x00000004040a7825 */ /* 0x000fc800078e0208 */ /*0930*/ IMAD.WIDE R14, R2.reuse, 0x4, R12 ; /* 0x00000004020e7825 */ /* 0x040fe200078e020c */ /*0940*/ LDG.E R9, [R10.64] ; /* 0x000000040a097981 */ /* 0x000ea8000c1e1900 */ /*0950*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */ /* 0x000ee2000c1e1900 */ /*0960*/ IMAD.WIDE R16, R2, 0x4, R14 ; /* 0x0000000402107825 */ /* 0x000fc600078e020e */ /*0970*/ LDG.E R20, [R10.64+0x4] ; /* 0x000004040a147981 */ /* 0x000ee8000c1e1900 */ /*0980*/ LDG.E R22, [R14.64] ; /* 0x000000040e167981 */ /* 0x000f28000c1e1900 */ /*0990*/ LDG.E R23, [R10.64+0x8] ; /* 0x000008040a177981 */ /* 0x000f28000c1e1900 */ /*09a0*/ LDG.E R25, [R10.64+0xc] ; /* 0x00000c040a197981 */ /* 0x000f68000c1e1900 */ /*09b0*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */ /* 0x000f62000c1e1900 */ /*09c0*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */ /* 0x000fc80007ffe0ff */ /*09d0*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe40003f05270 */ /*09e0*/ IADD3 R8, P1, R8, 0x10, RZ ; /* 0x0000001008087810 */ /* 0x000fe40007f3e0ff */ /*09f0*/ IADD3 R3, R3, 0x4, RZ ; /* 0x0000000403037810 */ /* 0x000fe40007ffe0ff */ /*0a00*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */ /* 0x000fe20000ffe4ff */ /*0a10*/ FFMA R9, R18, R9, R21 ; /* 0x0000000912097223 */ /* 0x004fc80000000015 */ /*0a20*/ FFMA R9, R12, R20, R9 ; /* 0x000000140c097223 */ /* 0x008fe40000000009 */ /*0a30*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */ /* 0x000fc800078e0210 */ /*0a40*/ FFMA R9, R22, R23, R9 ; /* 0x0000001716097223 */ /* 0x010fc80000000009 */ /*0a50*/ FFMA R21, R24, R25, R9 ; /* 0x0000001918157223 */ /* 0x020fe20000000009 */ /*0a60*/ @P0 BRA 0x8f0 ; /* 0xfffffe8000000947 */ /* 0x000fea000383ffff */ /*0a70*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fda0003f05270 */ /*0a80*/ @!P0 BRA 0xb70 ; /* 0x000000e000008947 */ /* 0x000fea0003800000 */ /*0a90*/ HFMA2.MMA R8, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff087435 */ /* 0x000fe200000001ff */ /*0aa0*/ IADD3 R6, R4, R3, RZ ; /* 0x0000000304067210 */ /* 0x000fe20007ffe0ff */ /*0ab0*/ IMAD R3, R3, c[0x0][0x178], R0 ; /* 0x00005e0003037a24 */ /* 0x002fd000078e0200 */ /*0ac0*/ IMAD.WIDE R6, R6, R8, c[0x0][0x168] ; /* 0x00005a0006067625 */ /* 0x000fc800078e0208 */ /*0ad0*/ IMAD.WIDE R8, R3, R8, c[0x0][0x170] ; /* 0x00005c0003087625 */ /* 0x000fca00078e0208 */ /*0ae0*/ LDG.E R10, [R8.64] ; /* 0x00000004080a7981 */ /* 0x0000a8000c1e1900 */ /*0af0*/ LDG.E R3, [R6.64] ; /* 0x0000000406037981 */ /* 0x0002a2000c1e1900 */ /*0b00*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fc80007ffe0ff */ /*0b10*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fe20003f05270 */ /*0b20*/ IMAD.WIDE R8, R2, 0x4, R8 ; /* 0x0000000402087825 */ /* 0x001fe200078e0208 */ /*0b30*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */ /* 0x002fc80007f3e0ff */ /*0b40*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */ /* 0x000fe20000ffe4ff */ /*0b50*/ FFMA R21, R10, R3, R21 ; /* 0x000000030a157223 */ /* 0x004fcc0000000015 */ /*0b60*/ @P0 BRA 0xae0 ; /* 0xffffff7000000947 */ /* 0x000fea000383ffff */ /*0b70*/ IADD3 R2, R4, R0, RZ ; /* 0x0000000004027210 */ /* 0x002fe40007ffe0ff */ /*0b80*/ MOV R3, 0x4 ; /* 0x0000000400037802 */ /* 0x000fca0000000f00 */ /*0b90*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fca00078e0203 */ /*0ba0*/ STG.E [R2.64], R21 ; /* 0x0000001502007986 */ /* 0x000fe2000c101904 */ /*0bb0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0bc0*/ BRA 0xbc0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0bd0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0be0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0bf0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> // CUDA Kernel __global__ void matrixMul( float* C, float* A, float* B, int TM) { float cc; int k; // calcul des coordonnees du thread int i = blockIdx.x; int j = threadIdx.x; cc = 0.; // calcul de c[i][j] for(k=0;k<TM;k++){ cc += A[i*TM+k] * B[k*TM+j]; } C[i*TM+j] = cc; } ///////////////////////////////////////////////////////// // Programme main ///////////////////////////////////////////////////////// int main(int argc, char** argv) { int i, j, GRID_SIZE_X, BLOCK_SIZE_X; int TM=1024; int size = TM*TM*sizeof(float); cudaError_t cerror; if (argc>1) { TM=atoi(argv[1]); } BLOCK_SIZE_X = TM; GRID_SIZE_X = TM; // definiton de la grille et des blocs dim3 block(BLOCK_SIZE_X); dim3 grid(GRID_SIZE_X); // allocation des matrices sur CPU float *h_A, *h_B, *h_C ; h_A = (float*) malloc(size); h_B = (float*) malloc(size); h_C = (float*) malloc(size); // initialisation des matrices avec des valeurs permettant de verifier le resultat for(i = 0; i < TM; i++){ for(j = 0; j < TM; j++){ h_A[i*TM+j] = 1.0; h_B[i*TM+j] = 1.0; h_C[i*TM+j] = 0.0; if (i==j) { h_A[i*TM+j]=(float) (i+1); h_B[i*TM+j]=(float) (i+1); } } } // Pour mesurer le temps de calcul float tc, tt_cpuGpu, tt_gpuCpu; cudaEvent_t depart, arret,cpuGpuDebut, cpuGpuFin, gpuCpuDebut, gpuCpuFin; cudaEventCreate(&depart); cudaEventCreate(&arret); cudaEventCreate(&cpuGpuDebut); cudaEventCreate(&cpuGpuFin); cudaEventCreate(&gpuCpuDebut); cudaEventCreate(&gpuCpuFin); cudaEventRecord(depart,0); cudaEventRecord(cpuGpuDebut,0); cudaEventRecord(gpuCpuDebut,0); // allocation des matrices sur GPU float *d_A, *d_B, *d_C; cudaMalloc((void**) &d_A,size); cudaMalloc((void**) &d_B,size); cudaMalloc((void**) &d_C,size); // copie des matrives A et B depuis le CPU vers le GPU cudaMemcpy(d_A, h_A, size,cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size,cudaMemcpyHostToDevice); // Mesure du temps : transfert + calcul cudaEventRecord(cpuGpuFin,0); cudaEventSynchronize(cpuGpuFin); cudaEventElapsedTime(&tt_cpuGpu,cpuGpuDebut, cpuGpuFin); cudaEventDestroy(cpuGpuDebut); cudaEventDestroy(cpuGpuFin); printf("Transfert A,B de CPU vers GPU : %f seconde\n", tt_cpuGpu/1000.0); // lancement des threads matrixMul<<< grid, block >>>(d_C, d_A, d_B, TM); // Vérification en cas d'erreur cerror=cudaGetLastError(); if ((int)cerror !=0) { printf("Erreur appel kernel %d \n", (int) cerror); exit(cerror); } // copie de la matrive C depuis le GPU cudaMemcpy(h_C,d_C, size,cudaMemcpyDeviceToHost); // Mesure du temps : transfert + calcul cudaEventRecord(gpuCpuFin,0); cudaEventSynchronize(gpuCpuFin); cudaEventElapsedTime(&tt_gpuCpu,gpuCpuDebut, gpuCpuFin); cudaEventDestroy(gpuCpuDebut); cudaEventDestroy(gpuCpuFin); printf("Transfert C de GPU vers CPU : %f seconde\n", tt_gpuCpu/1000.0); // Mesure du temps : transfert + calcul cudaEventRecord(arret,0); cudaEventSynchronize(arret); cudaEventElapsedTime(&tc,depart, arret); cudaEventDestroy(depart); cudaEventDestroy(arret); printf("Temps consommé : %f seconde\n", tc/1000.0); // verification du resultat for(i = 0; i < TM; i++){ for(j = 0; j < TM; j++){ if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) { printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1); } else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))) { printf("Erreur i: %d j: %d\n", i, j); exit(1); } } } // liberation de la memoire free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
.file "tmpxft_00041c3f_00000000-6_mul_matG1B1_et_1.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i .type _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i, @function _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i: .LFB2082: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9matrixMulPfS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i, .-_Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i .globl _Z9matrixMulPfS_S_i .type _Z9matrixMulPfS_S_i, @function _Z9matrixMulPfS_S_i: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z9matrixMulPfS_S_i, .-_Z9matrixMulPfS_S_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC3: .string "Transfert A,B de CPU vers GPU : %f seconde\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC4: .string "Erreur appel kernel %d \n" .section .rodata.str1.8 .align 8 .LC5: .string "Transfert C de GPU vers CPU : %f seconde\n" .section .rodata.str1.1 .LC6: .string "Temps consomm\303\251 : %f seconde\n" .LC7: .string "Erreur i: %d j: %d %f\n" .LC8: .string "Erreur i: %d j: %d\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $136, %rsp .cfi_def_cfa_offset 192 movq %fs:40, %rdx movq %rdx, 120(%rsp) xorl %edx, %edx cmpl $1, %edi jg .L36 movl $1024, 96(%rsp) movl $1, 100(%rsp) movl $1, 104(%rsp) movl $1024, 108(%rsp) movl $1, 112(%rsp) movl $1, 116(%rsp) movl $4194304, %edi call malloc@PLT movq %rax, %r15 movl $4194304, %edi call malloc@PLT movq %rax, %r14 movl $4194304, %edi call malloc@PLT movq %rax, %rbx movl $1024, %r12d movl $1024, %r13d .L26: movslq %r12d, %r8 leaq 0(,%r8,4), %r9 movq %r15, %rsi movq %r14, %rcx movq %rbx, %rdi movl $0, %edx movss .LC0(%rip), %xmm0 jmp .L14 .L36: movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %rbp movl %eax, %r12d movl %eax, %r13d movl %eax, 96(%rsp) movl $1, 100(%rsp) movl $1, 104(%rsp) movl %eax, 108(%rsp) movl $1, 112(%rsp) movl $1, 116(%rsp) movl $4194304, %edi call malloc@PLT movq %rax, %r15 movl $4194304, %edi call malloc@PLT movq %rax, %r14 movl $4194304, %edi call malloc@PLT movq %rax, %rbx testl %ebp, %ebp jg .L26 .L13: leaq 24(%rsp), %rdi call cudaEventCreate@PLT leaq 32(%rsp), %rdi call cudaEventCreate@PLT leaq 40(%rsp), %rdi call cudaEventCreate@PLT leaq 48(%rsp), %rdi call cudaEventCreate@PLT leaq 56(%rsp), %rdi call cudaEventCreate@PLT leaq 64(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 24(%rsp), %rdi call cudaEventRecord@PLT movl $0, %esi movq 40(%rsp), %rdi call cudaEventRecord@PLT movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT leaq 72(%rsp), %rdi movl $4194304, %esi call cudaMalloc@PLT leaq 80(%rsp), %rdi movl $4194304, %esi call cudaMalloc@PLT leaq 88(%rsp), %rdi movl $4194304, %esi call cudaMalloc@PLT movl $1, %ecx movl $4194304, %edx movq %r15, %rsi movq 72(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $4194304, %edx movq %r14, %rsi movq 80(%rsp), %rdi call cudaMemcpy@PLT movl $0, %esi movq 48(%rsp), %rdi call cudaEventRecord@PLT movq 48(%rsp), %rdi call cudaEventSynchronize@PLT leaq 16(%rsp), %rdi movq 48(%rsp), %rdx movq 40(%rsp), %rsi call cudaEventElapsedTime@PLT movq 40(%rsp), %rdi call cudaEventDestroy@PLT movq 48(%rsp), %rdi call cudaEventDestroy@PLT pxor %xmm0, %xmm0 cvtss2sd 16(%rsp), %xmm0 divsd .LC2(%rip), %xmm0 leaq .LC3(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl 104(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 96(%rsp), %rdx movq 108(%rsp), %rdi movl 116(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L37 .L17: call cudaGetLastError@PLT movl %eax, %ebp testl %eax, %eax jne .L38 movl $2, %ecx movl $4194304, %edx movq 88(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT movq 64(%rsp), %rdi call cudaEventSynchronize@PLT leaq 20(%rsp), %rdi movq 64(%rsp), %rdx movq 56(%rsp), %rsi call cudaEventElapsedTime@PLT movq 56(%rsp), %rdi call cudaEventDestroy@PLT movq 64(%rsp), %rdi call cudaEventDestroy@PLT pxor %xmm0, %xmm0 cvtss2sd 20(%rsp), %xmm0 divsd .LC2(%rip), %xmm0 leaq .LC5(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl $0, %esi movq 32(%rsp), %rdi call cudaEventRecord@PLT movq 32(%rsp), %rdi call cudaEventSynchronize@PLT leaq 12(%rsp), %rdi movq 32(%rsp), %rdx movq 24(%rsp), %rsi call cudaEventElapsedTime@PLT movq 24(%rsp), %rdi call cudaEventDestroy@PLT movq 32(%rsp), %rdi call cudaEventDestroy@PLT pxor %xmm0, %xmm0 cvtss2sd 12(%rsp), %xmm0 divsd .LC2(%rip), %xmm0 leaq .LC6(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl %r13d, %esi movl $0, %edx leal -1(%r13), %r9d movl $0, %r8d testl %r12d, %r12d jg .L19 .L20: movq %r15, %rdi call free@PLT movq %r14, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq 72(%rsp), %rdi call cudaFree@PLT movq 80(%rsp), %rdi call cudaFree@PLT movq 88(%rsp), %rdi call cudaFree@PLT movq 120(%rsp), %rax subq %fs:40, %rax jne .L39 movl $0, %eax addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state addq $1, %rax cmpq %r8, %rax je .L40 .L16: movss %xmm0, (%rsi,%rax,4) movss %xmm0, (%rcx,%rax,4) movl $0x00000000, (%rdi,%rax,4) cmpl %eax, %edx jne .L15 pxor %xmm1, %xmm1 cvtsi2ssl %r10d, %xmm1 movss %xmm1, (%rsi,%rax,4) movss %xmm1, (%rcx,%rax,4) jmp .L15 .L40: addl $1, %edx addq %r9, %rsi addq %r9, %rcx addq %r9, %rdi cmpl %r12d, %edx je .L13 .L14: movl $0, %eax leal 1(%rdx), %r10d jmp .L16 .L37: movl %r12d, %ecx movq 80(%rsp), %rdx movq 72(%rsp), %rsi movq 88(%rsp), %rdi call _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i jmp .L17 .L38: movl %eax, %edx leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl %ebp, %edi call exit@PLT .L42: leal (%rcx,%rbp), %eax cltq movss (%rbx,%rax,4), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssl %edi, %xmm1 ucomiss %xmm1, %xmm0 jp .L30 jne .L30 .L22: leal 1(%rcx), %eax cmpl %r12d, %eax je .L41 movl %eax, %ecx .L25: cmpl %ecx, %edx je .L42 leal (%rcx,%rbp), %eax cltq leal (%rcx,%rsi), %r10d pxor %xmm0, %xmm0 cvtsi2ssl %r10d, %xmm0 ucomiss (%rbx,%rax,4), %xmm0 jp .L31 je .L22 .L31: leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L30: cvtss2sd %xmm0, %xmm0 leaq .LC7(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L41: leal 1(%rdx), %eax addl %r13d, %ebp addl $1, %esi cmpl %ecx, %edx je .L20 movl %eax, %edx .L19: leal 1(%rdx), %edi imull %edi, %edi addl %r9d, %edi movl %r8d, %ecx jmp .L25 .L39: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC9: .string "_Z9matrixMulPfS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z9matrixMulPfS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1065353216 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC2: .long 0 .long 1083129856 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> // CUDA Kernel __global__ void matrixMul( float* C, float* A, float* B, int TM) { float cc; int k; // calcul des coordonnees du thread int i = blockIdx.x; int j = threadIdx.x; cc = 0.; // calcul de c[i][j] for(k=0;k<TM;k++){ cc += A[i*TM+k] * B[k*TM+j]; } C[i*TM+j] = cc; } ///////////////////////////////////////////////////////// // Programme main ///////////////////////////////////////////////////////// int main(int argc, char** argv) { int i, j, GRID_SIZE_X, BLOCK_SIZE_X; int TM=1024; int size = TM*TM*sizeof(float); cudaError_t cerror; if (argc>1) { TM=atoi(argv[1]); } BLOCK_SIZE_X = TM; GRID_SIZE_X = TM; // definiton de la grille et des blocs dim3 block(BLOCK_SIZE_X); dim3 grid(GRID_SIZE_X); // allocation des matrices sur CPU float *h_A, *h_B, *h_C ; h_A = (float*) malloc(size); h_B = (float*) malloc(size); h_C = (float*) malloc(size); // initialisation des matrices avec des valeurs permettant de verifier le resultat for(i = 0; i < TM; i++){ for(j = 0; j < TM; j++){ h_A[i*TM+j] = 1.0; h_B[i*TM+j] = 1.0; h_C[i*TM+j] = 0.0; if (i==j) { h_A[i*TM+j]=(float) (i+1); h_B[i*TM+j]=(float) (i+1); } } } // Pour mesurer le temps de calcul float tc, tt_cpuGpu, tt_gpuCpu; cudaEvent_t depart, arret,cpuGpuDebut, cpuGpuFin, gpuCpuDebut, gpuCpuFin; cudaEventCreate(&depart); cudaEventCreate(&arret); cudaEventCreate(&cpuGpuDebut); cudaEventCreate(&cpuGpuFin); cudaEventCreate(&gpuCpuDebut); cudaEventCreate(&gpuCpuFin); cudaEventRecord(depart,0); cudaEventRecord(cpuGpuDebut,0); cudaEventRecord(gpuCpuDebut,0); // allocation des matrices sur GPU float *d_A, *d_B, *d_C; cudaMalloc((void**) &d_A,size); cudaMalloc((void**) &d_B,size); cudaMalloc((void**) &d_C,size); // copie des matrives A et B depuis le CPU vers le GPU cudaMemcpy(d_A, h_A, size,cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size,cudaMemcpyHostToDevice); // Mesure du temps : transfert + calcul cudaEventRecord(cpuGpuFin,0); cudaEventSynchronize(cpuGpuFin); cudaEventElapsedTime(&tt_cpuGpu,cpuGpuDebut, cpuGpuFin); cudaEventDestroy(cpuGpuDebut); cudaEventDestroy(cpuGpuFin); printf("Transfert A,B de CPU vers GPU : %f seconde\n", tt_cpuGpu/1000.0); // lancement des threads matrixMul<<< grid, block >>>(d_C, d_A, d_B, TM); // Vérification en cas d'erreur cerror=cudaGetLastError(); if ((int)cerror !=0) { printf("Erreur appel kernel %d \n", (int) cerror); exit(cerror); } // copie de la matrive C depuis le GPU cudaMemcpy(h_C,d_C, size,cudaMemcpyDeviceToHost); // Mesure du temps : transfert + calcul cudaEventRecord(gpuCpuFin,0); cudaEventSynchronize(gpuCpuFin); cudaEventElapsedTime(&tt_gpuCpu,gpuCpuDebut, gpuCpuFin); cudaEventDestroy(gpuCpuDebut); cudaEventDestroy(gpuCpuFin); printf("Transfert C de GPU vers CPU : %f seconde\n", tt_gpuCpu/1000.0); // Mesure du temps : transfert + calcul cudaEventRecord(arret,0); cudaEventSynchronize(arret); cudaEventElapsedTime(&tc,depart, arret); cudaEventDestroy(depart); cudaEventDestroy(arret); printf("Temps consommé : %f seconde\n", tc/1000.0); // verification du resultat for(i = 0; i < TM; i++){ for(j = 0; j < TM; j++){ if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) { printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1); } else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))) { printf("Erreur i: %d j: %d\n", i, j); exit(1); } } } // liberation de la memoire free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> // CUDA Kernel __global__ void matrixMul( float* C, float* A, float* B, int TM) { float cc; int k; // calcul des coordonnees du thread int i = blockIdx.x; int j = threadIdx.x; cc = 0.; // calcul de c[i][j] for(k=0;k<TM;k++){ cc += A[i*TM+k] * B[k*TM+j]; } C[i*TM+j] = cc; } ///////////////////////////////////////////////////////// // Programme main ///////////////////////////////////////////////////////// int main(int argc, char** argv) { int i, j, GRID_SIZE_X, BLOCK_SIZE_X; int TM=1024; int size = TM*TM*sizeof(float); hipError_t cerror; if (argc>1) { TM=atoi(argv[1]); } BLOCK_SIZE_X = TM; GRID_SIZE_X = TM; // definiton de la grille et des blocs dim3 block(BLOCK_SIZE_X); dim3 grid(GRID_SIZE_X); // allocation des matrices sur CPU float *h_A, *h_B, *h_C ; h_A = (float*) malloc(size); h_B = (float*) malloc(size); h_C = (float*) malloc(size); // initialisation des matrices avec des valeurs permettant de verifier le resultat for(i = 0; i < TM; i++){ for(j = 0; j < TM; j++){ h_A[i*TM+j] = 1.0; h_B[i*TM+j] = 1.0; h_C[i*TM+j] = 0.0; if (i==j) { h_A[i*TM+j]=(float) (i+1); h_B[i*TM+j]=(float) (i+1); } } } // Pour mesurer le temps de calcul float tc, tt_cpuGpu, tt_gpuCpu; hipEvent_t depart, arret,cpuGpuDebut, cpuGpuFin, gpuCpuDebut, gpuCpuFin; hipEventCreate(&depart); hipEventCreate(&arret); hipEventCreate(&cpuGpuDebut); hipEventCreate(&cpuGpuFin); hipEventCreate(&gpuCpuDebut); hipEventCreate(&gpuCpuFin); hipEventRecord(depart,0); hipEventRecord(cpuGpuDebut,0); hipEventRecord(gpuCpuDebut,0); // allocation des matrices sur GPU float *d_A, *d_B, *d_C; hipMalloc((void**) &d_A,size); hipMalloc((void**) &d_B,size); hipMalloc((void**) &d_C,size); // copie des matrives A et B depuis le CPU vers le GPU hipMemcpy(d_A, h_A, size,hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size,hipMemcpyHostToDevice); // Mesure du temps : transfert + calcul hipEventRecord(cpuGpuFin,0); hipEventSynchronize(cpuGpuFin); hipEventElapsedTime(&tt_cpuGpu,cpuGpuDebut, cpuGpuFin); hipEventDestroy(cpuGpuDebut); hipEventDestroy(cpuGpuFin); printf("Transfert A,B de CPU vers GPU : %f seconde\n", tt_cpuGpu/1000.0); // lancement des threads matrixMul<<< grid, block >>>(d_C, d_A, d_B, TM); // Vérification en cas d'erreur cerror=hipGetLastError(); if ((int)cerror !=0) { printf("Erreur appel kernel %d \n", (int) cerror); exit(cerror); } // copie de la matrive C depuis le GPU hipMemcpy(h_C,d_C, size,hipMemcpyDeviceToHost); // Mesure du temps : transfert + calcul hipEventRecord(gpuCpuFin,0); hipEventSynchronize(gpuCpuFin); hipEventElapsedTime(&tt_gpuCpu,gpuCpuDebut, gpuCpuFin); hipEventDestroy(gpuCpuDebut); hipEventDestroy(gpuCpuFin); printf("Transfert C de GPU vers CPU : %f seconde\n", tt_gpuCpu/1000.0); // Mesure du temps : transfert + calcul hipEventRecord(arret,0); hipEventSynchronize(arret); hipEventElapsedTime(&tc,depart, arret); hipEventDestroy(depart); hipEventDestroy(arret); printf("Temps consommé : %f seconde\n", tc/1000.0); // verification du resultat for(i = 0; i < TM; i++){ for(j = 0; j < TM; j++){ if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) { printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1); } else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))) { printf("Erreur i: %d j: %d\n", i, j); exit(1); } } } // liberation de la memoire free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> // CUDA Kernel __global__ void matrixMul( float* C, float* A, float* B, int TM) { float cc; int k; // calcul des coordonnees du thread int i = blockIdx.x; int j = threadIdx.x; cc = 0.; // calcul de c[i][j] for(k=0;k<TM;k++){ cc += A[i*TM+k] * B[k*TM+j]; } C[i*TM+j] = cc; } ///////////////////////////////////////////////////////// // Programme main ///////////////////////////////////////////////////////// int main(int argc, char** argv) { int i, j, GRID_SIZE_X, BLOCK_SIZE_X; int TM=1024; int size = TM*TM*sizeof(float); hipError_t cerror; if (argc>1) { TM=atoi(argv[1]); } BLOCK_SIZE_X = TM; GRID_SIZE_X = TM; // definiton de la grille et des blocs dim3 block(BLOCK_SIZE_X); dim3 grid(GRID_SIZE_X); // allocation des matrices sur CPU float *h_A, *h_B, *h_C ; h_A = (float*) malloc(size); h_B = (float*) malloc(size); h_C = (float*) malloc(size); // initialisation des matrices avec des valeurs permettant de verifier le resultat for(i = 0; i < TM; i++){ for(j = 0; j < TM; j++){ h_A[i*TM+j] = 1.0; h_B[i*TM+j] = 1.0; h_C[i*TM+j] = 0.0; if (i==j) { h_A[i*TM+j]=(float) (i+1); h_B[i*TM+j]=(float) (i+1); } } } // Pour mesurer le temps de calcul float tc, tt_cpuGpu, tt_gpuCpu; hipEvent_t depart, arret,cpuGpuDebut, cpuGpuFin, gpuCpuDebut, gpuCpuFin; hipEventCreate(&depart); hipEventCreate(&arret); hipEventCreate(&cpuGpuDebut); hipEventCreate(&cpuGpuFin); hipEventCreate(&gpuCpuDebut); hipEventCreate(&gpuCpuFin); hipEventRecord(depart,0); hipEventRecord(cpuGpuDebut,0); hipEventRecord(gpuCpuDebut,0); // allocation des matrices sur GPU float *d_A, *d_B, *d_C; hipMalloc((void**) &d_A,size); hipMalloc((void**) &d_B,size); hipMalloc((void**) &d_C,size); // copie des matrives A et B depuis le CPU vers le GPU hipMemcpy(d_A, h_A, size,hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size,hipMemcpyHostToDevice); // Mesure du temps : transfert + calcul hipEventRecord(cpuGpuFin,0); hipEventSynchronize(cpuGpuFin); hipEventElapsedTime(&tt_cpuGpu,cpuGpuDebut, cpuGpuFin); hipEventDestroy(cpuGpuDebut); hipEventDestroy(cpuGpuFin); printf("Transfert A,B de CPU vers GPU : %f seconde\n", tt_cpuGpu/1000.0); // lancement des threads matrixMul<<< grid, block >>>(d_C, d_A, d_B, TM); // Vérification en cas d'erreur cerror=hipGetLastError(); if ((int)cerror !=0) { printf("Erreur appel kernel %d \n", (int) cerror); exit(cerror); } // copie de la matrive C depuis le GPU hipMemcpy(h_C,d_C, size,hipMemcpyDeviceToHost); // Mesure du temps : transfert + calcul hipEventRecord(gpuCpuFin,0); hipEventSynchronize(gpuCpuFin); hipEventElapsedTime(&tt_gpuCpu,gpuCpuDebut, gpuCpuFin); hipEventDestroy(gpuCpuDebut); hipEventDestroy(gpuCpuFin); printf("Transfert C de GPU vers CPU : %f seconde\n", tt_gpuCpu/1000.0); // Mesure du temps : transfert + calcul hipEventRecord(arret,0); hipEventSynchronize(arret); hipEventElapsedTime(&tc,depart, arret); hipEventDestroy(depart); hipEventDestroy(arret); printf("Temps consommé : %f seconde\n", tc/1000.0); // verification du resultat for(i = 0; i < TM; i++){ for(j = 0; j < TM; j++){ if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) { printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1); } else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))) { printf("Erreur i: %d j: %d\n", i, j); exit(1); } } } // liberation de la memoire free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9matrixMulPfS_S_i .globl _Z9matrixMulPfS_S_i .p2align 8 .type _Z9matrixMulPfS_S_i,@function _Z9matrixMulPfS_S_i: s_load_b32 s8, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s8, 1 s_cbranch_scc1 .LBB0_3 s_load_b128 s[4:7], s[0:1], 0x8 v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, v0 s_mul_i32 s2, s15, s8 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) s_ashr_i32 s3, s2, 31 v_mov_b32_e32 v3, v2 s_lshl_b64 s[2:3], s[2:3], 2 s_waitcnt lgkmcnt(0) s_add_u32 s2, s4, s2 s_addc_u32 s3, s5, s3 s_mov_b32 s4, s8 .p2align 6 .LBB0_2: v_lshlrev_b64 v[4:5], 2, v[1:2] s_load_b32 s5, s[2:3], 0x0 s_add_i32 s4, s4, -1 v_add_nc_u32_e32 v1, s8, v1 s_add_u32 s2, s2, 4 s_addc_u32 s3, s3, 0 v_add_co_u32 v4, vcc_lo, s6, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo s_cmp_lg_u32 s4, 0 global_load_b32 v4, v[4:5], off s_waitcnt vmcnt(0) lgkmcnt(0) v_fmac_f32_e32 v3, s5, v4 s_cbranch_scc1 .LBB0_2 s_branch .LBB0_4 .LBB0_3: v_mov_b32_e32 v3, 0 .LBB0_4: s_load_b64 s[0:1], s[0:1], 0x0 v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v3, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9matrixMulPfS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9matrixMulPfS_S_i, .Lfunc_end0-_Z9matrixMulPfS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9matrixMulPfS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z9matrixMulPfS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> // CUDA Kernel __global__ void matrixMul( float* C, float* A, float* B, int TM) { float cc; int k; // calcul des coordonnees du thread int i = blockIdx.x; int j = threadIdx.x; cc = 0.; // calcul de c[i][j] for(k=0;k<TM;k++){ cc += A[i*TM+k] * B[k*TM+j]; } C[i*TM+j] = cc; } ///////////////////////////////////////////////////////// // Programme main ///////////////////////////////////////////////////////// int main(int argc, char** argv) { int i, j, GRID_SIZE_X, BLOCK_SIZE_X; int TM=1024; int size = TM*TM*sizeof(float); hipError_t cerror; if (argc>1) { TM=atoi(argv[1]); } BLOCK_SIZE_X = TM; GRID_SIZE_X = TM; // definiton de la grille et des blocs dim3 block(BLOCK_SIZE_X); dim3 grid(GRID_SIZE_X); // allocation des matrices sur CPU float *h_A, *h_B, *h_C ; h_A = (float*) malloc(size); h_B = (float*) malloc(size); h_C = (float*) malloc(size); // initialisation des matrices avec des valeurs permettant de verifier le resultat for(i = 0; i < TM; i++){ for(j = 0; j < TM; j++){ h_A[i*TM+j] = 1.0; h_B[i*TM+j] = 1.0; h_C[i*TM+j] = 0.0; if (i==j) { h_A[i*TM+j]=(float) (i+1); h_B[i*TM+j]=(float) (i+1); } } } // Pour mesurer le temps de calcul float tc, tt_cpuGpu, tt_gpuCpu; hipEvent_t depart, arret,cpuGpuDebut, cpuGpuFin, gpuCpuDebut, gpuCpuFin; hipEventCreate(&depart); hipEventCreate(&arret); hipEventCreate(&cpuGpuDebut); hipEventCreate(&cpuGpuFin); hipEventCreate(&gpuCpuDebut); hipEventCreate(&gpuCpuFin); hipEventRecord(depart,0); hipEventRecord(cpuGpuDebut,0); hipEventRecord(gpuCpuDebut,0); // allocation des matrices sur GPU float *d_A, *d_B, *d_C; hipMalloc((void**) &d_A,size); hipMalloc((void**) &d_B,size); hipMalloc((void**) &d_C,size); // copie des matrives A et B depuis le CPU vers le GPU hipMemcpy(d_A, h_A, size,hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size,hipMemcpyHostToDevice); // Mesure du temps : transfert + calcul hipEventRecord(cpuGpuFin,0); hipEventSynchronize(cpuGpuFin); hipEventElapsedTime(&tt_cpuGpu,cpuGpuDebut, cpuGpuFin); hipEventDestroy(cpuGpuDebut); hipEventDestroy(cpuGpuFin); printf("Transfert A,B de CPU vers GPU : %f seconde\n", tt_cpuGpu/1000.0); // lancement des threads matrixMul<<< grid, block >>>(d_C, d_A, d_B, TM); // Vérification en cas d'erreur cerror=hipGetLastError(); if ((int)cerror !=0) { printf("Erreur appel kernel %d \n", (int) cerror); exit(cerror); } // copie de la matrive C depuis le GPU hipMemcpy(h_C,d_C, size,hipMemcpyDeviceToHost); // Mesure du temps : transfert + calcul hipEventRecord(gpuCpuFin,0); hipEventSynchronize(gpuCpuFin); hipEventElapsedTime(&tt_gpuCpu,gpuCpuDebut, gpuCpuFin); hipEventDestroy(gpuCpuDebut); hipEventDestroy(gpuCpuFin); printf("Transfert C de GPU vers CPU : %f seconde\n", tt_gpuCpu/1000.0); // Mesure du temps : transfert + calcul hipEventRecord(arret,0); hipEventSynchronize(arret); hipEventElapsedTime(&tc,depart, arret); hipEventDestroy(depart); hipEventDestroy(arret); printf("Temps consommé : %f seconde\n", tc/1000.0); // verification du resultat for(i = 0; i < TM; i++){ for(j = 0; j < TM; j++){ if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) { printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1); } else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))) { printf("Erreur i: %d j: %d\n", i, j); exit(1); } } } // liberation de la memoire free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); }
.text .file "mul_matG1B1_et_1.hip" .globl _Z24__device_stub__matrixMulPfS_S_i # -- Begin function _Z24__device_stub__matrixMulPfS_S_i .p2align 4, 0x90 .type _Z24__device_stub__matrixMulPfS_S_i,@function _Z24__device_stub__matrixMulPfS_S_i: # @_Z24__device_stub__matrixMulPfS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9matrixMulPfS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z24__device_stub__matrixMulPfS_S_i, .Lfunc_end0-_Z24__device_stub__matrixMulPfS_S_i .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI1_0: .quad 0x408f400000000000 # double 1000 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $248, %rsp .cfi_def_cfa_offset 304 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $1024, %eax # imm = 0x400 cmpl $2, %edi jl .LBB1_2 # %bb.1: movq 8(%rsi), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol .LBB1_2: movq %rax, 8(%rsp) # 8-byte Spill movl %eax, %r13d movabsq $4294967296, %rax # imm = 0x100000000 orq %r13, %rax movq %rax, 144(%rsp) # 8-byte Spill movl $4194304, %edi # imm = 0x400000 callq malloc movq %rax, 48(%rsp) # 8-byte Spill movl $4194304, %edi # imm = 0x400000 callq malloc movq %rax, 40(%rsp) # 8-byte Spill movl $4194304, %edi # imm = 0x400000 callq malloc movq 8(%rsp), %rcx # 8-byte Reload movq %rax, 136(%rsp) # 8-byte Spill testl %ecx, %ecx jle .LBB1_9 # %bb.3: # %.preheader104.lr.ph leaq (,%r13,4), %rbp xorl %r14d, %r14d xorl %r15d, %r15d jmp .LBB1_4 .p2align 4, 0x90 .LBB1_8: # %._crit_edge # in Loop: Header=BB1_4 Depth=1 incq %r15 movq 8(%rsp), %rcx # 8-byte Reload addl %ecx, %r14d cmpq %r13, %r15 je .LBB1_9 .LBB1_4: # %.preheader104 # =>This Loop Header: Depth=1 # Child Loop BB1_5 Depth 2 movl %r14d, %eax movq 40(%rsp), %rdx # 8-byte Reload leaq (%rdx,%rax,4), %r12 movq 48(%rsp), %rdx # 8-byte Reload leaq (%rdx,%rax,4), %rbx movl %ecx, %eax imull %r15d, %eax movq 136(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %rdi leal 1(%r15), %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movss %xmm0, 108(%rsp) # 4-byte Spill xorl %esi, %esi movq %rbp, %rdx callq memset@PLT xorl %eax, %eax jmp .LBB1_5 .p2align 4, 0x90 .LBB1_7: # in Loop: Header=BB1_5 Depth=2 incq %rax cmpq %rax, %r13 je .LBB1_8 .LBB1_5: # Parent Loop BB1_4 Depth=1 # => This Inner Loop Header: Depth=2 movl $1065353216, (%rbx,%rax,4) # imm = 0x3F800000 movl $1065353216, (%r12,%rax,4) # imm = 0x3F800000 cmpq %rax, %r15 jne .LBB1_7 # %bb.6: # in Loop: Header=BB1_5 Depth=2 movss 108(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero movss %xmm0, (%rbx,%rax,4) movss %xmm0, (%r12,%rax,4) jmp .LBB1_7 .LBB1_9: # %._crit_edge111 leaq 96(%rsp), %rdi callq hipEventCreate leaq 32(%rsp), %rdi callq hipEventCreate leaq 88(%rsp), %rdi callq hipEventCreate leaq 24(%rsp), %rdi callq hipEventCreate leaq 80(%rsp), %rdi callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate movq 96(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 88(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 80(%rsp), %rdi xorl %esi, %esi callq hipEventRecord leaq 72(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc leaq 64(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc leaq 56(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc movq 72(%rsp), %rdi movl $4194304, %edx # imm = 0x400000 movq 48(%rsp), %rsi # 8-byte Reload movl $1, %ecx callq hipMemcpy movq 64(%rsp), %rdi movl $4194304, %edx # imm = 0x400000 movq 40(%rsp), %rsi # 8-byte Reload movl $1, %ecx callq hipMemcpy movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 24(%rsp), %rdi callq hipEventSynchronize movq 88(%rsp), %rsi movq 24(%rsp), %rdx leaq 112(%rsp), %rdi callq hipEventElapsedTime movq 88(%rsp), %rdi callq hipEventDestroy movq 24(%rsp), %rdi callq hipEventDestroy movss 112(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 divsd .LCPI1_0(%rip), %xmm0 movl $.L.str, %edi movb $1, %al callq printf movq 144(%rsp), %rdi # 8-byte Reload movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_11 # %bb.10: movq 56(%rsp), %rax movq 72(%rsp), %rcx movq 64(%rsp), %rdx movq %rax, 200(%rsp) movq %rcx, 192(%rsp) movq %rdx, 184(%rsp) movq 8(%rsp), %rax # 8-byte Reload movl %eax, 116(%rsp) leaq 200(%rsp), %rax movq %rax, 208(%rsp) leaq 192(%rsp), %rax movq %rax, 216(%rsp) leaq 184(%rsp), %rax movq %rax, 224(%rsp) leaq 116(%rsp), %rax movq %rax, 232(%rsp) leaq 120(%rsp), %rdi leaq 168(%rsp), %rsi leaq 160(%rsp), %rdx leaq 152(%rsp), %rcx callq __hipPopCallConfiguration movq 120(%rsp), %rsi movl 128(%rsp), %edx movq 168(%rsp), %rcx movl 176(%rsp), %r8d leaq 208(%rsp), %r9 movl $_Z9matrixMulPfS_S_i, %edi pushq 152(%rsp) .cfi_adjust_cfa_offset 8 pushq 168(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_11: callq hipGetLastError testl %eax, %eax movq 136(%rsp), %rbx # 8-byte Reload jne .LBB1_22 # %bb.12: movq 56(%rsp), %rsi movl $4194304, %edx # imm = 0x400000 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movq 16(%rsp), %rdi xorl %ebp, %ebp xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 80(%rsp), %rsi movq 16(%rsp), %rdx leaq 120(%rsp), %rdi callq hipEventElapsedTime movq 80(%rsp), %rdi callq hipEventDestroy movq 16(%rsp), %rdi callq hipEventDestroy movss 120(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 divsd .LCPI1_0(%rip), %xmm0 movl $.L.str.2, %edi movb $1, %al callq printf movq 32(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 32(%rsp), %rdi callq hipEventSynchronize movq 96(%rsp), %rsi movq 32(%rsp), %rdx leaq 208(%rsp), %rdi callq hipEventElapsedTime movq 96(%rsp), %rdi callq hipEventDestroy movq 32(%rsp), %rdi callq hipEventDestroy movss 208(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 divsd .LCPI1_0(%rip), %xmm0 movl $.L.str.3, %edi movb $1, %al callq printf movq 8(%rsp), %r10 # 8-byte Reload testl %r10d, %r10d jle .LBB1_21 # %bb.13: # %.preheader.lr.ph leal -1(%r10), %eax movq %r13, %rcx xorl %edi, %edi xorl %esi, %esi jmp .LBB1_14 .p2align 4, 0x90 .LBB1_20: # %._crit_edge114 # in Loop: Header=BB1_14 Depth=1 incq %rsi addl %r10d, %ebp decq %rdi incq %rcx cmpq %r13, %rsi je .LBB1_21 .LBB1_14: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_15 Depth 2 movl %ebp, %edx leaq (%rbx,%rdx,4), %r8 leal 1(%rsi), %edx imull %edx, %edx addl %eax, %edx xorps %xmm0, %xmm0 cvtsi2ss %edx, %xmm0 xorl %edx, %edx jmp .LBB1_15 .p2align 4, 0x90 .LBB1_18: # %.critedge # in Loop: Header=BB1_15 Depth=2 leal (%rcx,%rdx), %r9d xorps %xmm1, %xmm1 cvtsi2ss %r9d, %xmm1 movss (%r8,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero ucomiss %xmm1, %xmm2 jne .LBB1_24 jp .LBB1_24 .LBB1_19: # in Loop: Header=BB1_15 Depth=2 incq %rdx cmpq %rdx, %r13 je .LBB1_20 .LBB1_15: # Parent Loop BB1_14 Depth=1 # => This Inner Loop Header: Depth=2 movq %rdi, %r9 addq %rdx, %r9 jne .LBB1_18 # %bb.16: # in Loop: Header=BB1_15 Depth=2 movss (%r8,%rdx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero ucomiss %xmm0, %xmm1 jne .LBB1_23 jp .LBB1_23 # %bb.17: # in Loop: Header=BB1_15 Depth=2 testq %r9, %r9 jne .LBB1_18 jmp .LBB1_19 .LBB1_21: # %._crit_edge116 movq 48(%rsp), %rdi # 8-byte Reload callq free movq 40(%rsp), %rdi # 8-byte Reload callq free movq %rbx, %rdi callq free movq 72(%rsp), %rdi callq hipFree movq 64(%rsp), %rdi callq hipFree movq 56(%rsp), %rdi callq hipFree xorl %eax, %eax addq $248, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB1_24: .cfi_def_cfa_offset 304 movl $.L.str.5, %edi # kill: def $esi killed $esi killed $rsi # kill: def $edx killed $edx killed $rdx xorl %eax, %eax callq printf movl $1, %edi callq exit .LBB1_23: xorps %xmm0, %xmm0 cvtss2sd %xmm1, %xmm0 movl $.L.str.4, %edi # kill: def $esi killed $esi killed $rsi # kill: def $edx killed $edx killed $rdx movb $1, %al callq printf movl $1, %edi callq exit .LBB1_22: movl $.L.str.1, %edi movl %eax, %esi movl %eax, %ebx xorl %eax, %eax callq printf movl %ebx, %edi callq exit .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9matrixMulPfS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z9matrixMulPfS_S_i,@object # @_Z9matrixMulPfS_S_i .section .rodata,"a",@progbits .globl _Z9matrixMulPfS_S_i .p2align 3, 0x0 _Z9matrixMulPfS_S_i: .quad _Z24__device_stub__matrixMulPfS_S_i .size _Z9matrixMulPfS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Transfert A,B de CPU vers GPU : %f seconde\n" .size .L.str, 44 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Erreur appel kernel %d \n" .size .L.str.1, 25 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Transfert C de GPU vers CPU : %f seconde\n" .size .L.str.2, 42 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Temps consomm\303\251 : %f seconde\n" .size .L.str.3, 30 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "Erreur i: %d j: %d %f\n" .size .L.str.4, 23 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "Erreur i: %d j: %d\n" .size .L.str.5, 20 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9matrixMulPfS_S_i" .size .L__unnamed_1, 20 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__matrixMulPfS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9matrixMulPfS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z9matrixMulPfS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */ /* 0x000e220000002500 */ /*0020*/ MOV R2, c[0x0][0x178] ; /* 0x00005e0000027a02 */ /* 0x000fe20000000f00 */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0040*/ HFMA2.MMA R21, -RZ, RZ, 0, 0 ; /* 0x00000000ff157435 */ /* 0x000fe200000001ff */ /*0050*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e620000002100 */ /*0060*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */ /* 0x000fe20003f06270 */ /*0070*/ IMAD R4, R4, c[0x0][0x178], RZ ; /* 0x00005e0004047a24 */ /* 0x001fd800078e02ff */ /*0080*/ @!P0 BRA 0xb70 ; /* 0x00000ae000008947 */ /* 0x000fea0003800000 */ /*0090*/ IADD3 R3, R2.reuse, -0x1, RZ ; /* 0xffffffff02037810 */ /* 0x040fe40007ffe0ff */ /*00a0*/ LOP3.LUT R5, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302057812 */ /* 0x000fe400078ec0ff */ /*00b0*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */ /* 0x000fe40003f06070 */ /*00c0*/ MOV R21, RZ ; /* 0x000000ff00157202 */ /* 0x000fe40000000f00 */ /*00d0*/ MOV R3, RZ ; /* 0x000000ff00037202 */ /* 0x000fd20000000f00 */ /*00e0*/ @!P0 BRA 0xa70 ; /* 0x0000098000008947 */ /* 0x000fea0003800000 */ /*00f0*/ IADD3 R6, -R5, c[0x0][0x178], RZ ; /* 0x00005e0005067a10 */ /* 0x000fe20007ffe1ff */ /*0100*/ HFMA2.MMA R19, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff137435 */ /* 0x000fe200000001ff */ /*0110*/ MOV R21, RZ ; /* 0x000000ff00157202 */ /* 0x000fe20000000f00 */ /*0120*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */ /* 0x000fe200000001ff */ /*0130*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe40003f04270 */ /*0140*/ MOV R8, c[0x0][0x168] ; /* 0x00005a0000087a02 */ /* 0x000fe40000000f00 */ /*0150*/ MOV R7, c[0x0][0x16c] ; /* 0x00005b0000077a02 */ /* 0x000fc60000000f00 */ /*0160*/ IMAD.WIDE R18, R0, R19, c[0x0][0x170] ; /* 0x00005c0000127625 */ /* 0x002fcc00078e0213 */ /*0170*/ @!P0 BRA 0x8f0 ; /* 0x0000077000008947 */ /* 0x000fea0003800000 */ /*0180*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */ /* 0x000fe40003f24270 */ /*0190*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*01a0*/ @!P1 BRA 0x640 ; /* 0x0000049000009947 */ /* 0x000fea0003800000 */ /*01b0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*01c0*/ MOV R9, R7 ; /* 0x0000000700097202 */ /* 0x000fe20000000f00 */ /*01d0*/ LDG.E R10, [R18.64] ; /* 0x00000004120a7981 */ /* 0x0000a8000c1e1900 */ /*01e0*/ IMAD.WIDE R12, R4, 0x4, R8 ; /* 0x00000004040c7825 */ /* 0x000fc800078e0208 */ /*01f0*/ IMAD.WIDE R22, R2.reuse, 0x4, R18 ; /* 0x0000000402167825 */ /* 0x040fe200078e0212 */ /*0200*/ LDG.E R25, [R12.64] ; /* 0x000000040c197981 */ /* 0x000ea8000c1e1900 */ /*0210*/ LDG.E R24, [R22.64] ; /* 0x0000000416187981 */ /* 0x0002e8000c1e1900 */ /*0220*/ LDG.E R11, [R12.64+0x4] ; /* 0x000004040c0b7981 */ /* 0x000ee8000c1e1900 */ /*0230*/ LDG.E R14, [R12.64+0x8] ; /* 0x000008040c0e7981 */ /* 0x000f22000c1e1900 */ /*0240*/ IMAD.WIDE R22, R2, 0x4, R22 ; /* 0x0000000402167825 */ /* 0x002fc600078e0216 */ /*0250*/ LDG.E R19, [R12.64+0x10] ; /* 0x000010040c137981 */ /* 0x001f68000c1e1900 */ /*0260*/ LDG.E R15, [R22.64] ; /* 0x00000004160f7981 */ /* 0x000122000c1e1900 */ /*0270*/ IMAD.WIDE R16, R2, 0x4, R22 ; /* 0x0000000402107825 */ /* 0x000fc600078e0216 */ /*0280*/ LDG.E R9, [R12.64+0x14] ; /* 0x000014040c097981 */ /* 0x000f66000c1e1900 */ /*0290*/ IMAD.WIDE R26, R2.reuse, 0x4, R16 ; /* 0x00000004021a7825 */ /* 0x040fe400078e0210 */ /*02a0*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */ /* 0x000368000c1e1900 */ /*02b0*/ IMAD.WIDE R28, R2.reuse, 0x4, R26 ; /* 0x00000004021c7825 */ /* 0x040fe200078e021a */ /*02c0*/ LDG.E R18, [R26.64] ; /* 0x000000041a127981 */ /* 0x000168000c1e1900 */ /*02d0*/ LDG.E R20, [R28.64] ; /* 0x000000041c147981 */ /* 0x000968000c1e1900 */ /*02e0*/ LDG.E R17, [R12.64+0xc] ; /* 0x00000c040c117981 */ /* 0x002f62000c1e1900 */ /*02f0*/ IMAD.WIDE R22, R2, 0x4, R28 ; /* 0x0000000402167825 */ /* 0x001fc600078e021c */ /*0300*/ LDG.E R26, [R12.64+0x1c] ; /* 0x00001c040c1a7981 */ /* 0x000f62000c1e1900 */ /*0310*/ FFMA R25, R10, R25, R21 ; /* 0x000000190a197223 */ /* 0x004fc60000000015 */ /*0320*/ LDG.E R10, [R22.64] ; /* 0x00000004160a7981 */ /* 0x0000a8000c1e1900 */ /*0330*/ LDG.E R21, [R12.64+0x18] ; /* 0x000018040c157981 */ /* 0x000ea2000c1e1900 */ /*0340*/ IMAD.WIDE R22, R2, 0x4, R22 ; /* 0x0000000402167825 */ /* 0x001fc800078e0216 */ /*0350*/ FFMA R27, R24, R11, R25 ; /* 0x0000000b181b7223 */ /* 0x008fe40000000019 */ /*0360*/ IMAD.WIDE R24, R2.reuse, 0x4, R22 ; /* 0x0000000402187825 */ /* 0x040fe200078e0216 */ /*0370*/ LDG.E R11, [R22.64] ; /* 0x00000004160b7981 */ /* 0x0000e6000c1e1900 */ /*0380*/ FFMA R29, R15, R14, R27 ; /* 0x0000000e0f1d7223 */ /* 0x010fe2000000001b */ /*0390*/ LDG.E R28, [R24.64] ; /* 0x00000004181c7981 */ /* 0x000328000c1e1900 */ /*03a0*/ LDG.E R27, [R12.64+0x20] ; /* 0x000020040c1b7981 */ /* 0x000f22000c1e1900 */ /*03b0*/ IMAD.WIDE R14, R2, 0x4, R24 ; /* 0x00000004020e7825 */ /* 0x000fc600078e0218 */ /*03c0*/ LDG.E R25, [R12.64+0x28] ; /* 0x000028040c197981 */ /* 0x002f22000c1e1900 */ /*03d0*/ FFMA R29, R16, R17, R29 ; /* 0x00000011101d7223 */ /* 0x020fe4000000001d */ /*03e0*/ IMAD.WIDE R16, R2, 0x4, R14 ; /* 0x0000000402107825 */ /* 0x000fe400078e020e */ /*03f0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x000364000c1e1900 */ /*0400*/ FFMA R29, R18, R19, R29 ; /* 0x00000013121d7223 */ /* 0x000fe4000000001d */ /*0410*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */ /* 0x000fe400078e0210 */ /*0420*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */ /* 0x000364000c1e1900 */ /*0430*/ FFMA R20, R20, R9, R29 ; /* 0x0000000914147223 */ /* 0x000fc4000000001d */ /*0440*/ LDG.E R9, [R12.64+0x24] ; /* 0x000024040c097981 */ /* 0x000f62000c1e1900 */ /*0450*/ IMAD.WIDE R22, R2, 0x4, R18 ; /* 0x0000000402167825 */ /* 0x001fc600078e0212 */ /*0460*/ LDG.E R24, [R18.64] ; /* 0x0000000412187981 */ /* 0x000768000c1e1900 */ /*0470*/ LDG.E R15, [R12.64+0x2c] ; /* 0x00002c040c0f7981 */ /* 0x002f68000c1e1900 */ /*0480*/ LDG.E R29, [R22.64] ; /* 0x00000004161d7981 */ /* 0x000162000c1e1900 */ /*0490*/ FFMA R10, R10, R21, R20 ; /* 0x000000150a0a7223 */ /* 0x004fe40000000014 */ /*04a0*/ IMAD.WIDE R20, R2, 0x4, R22 ; /* 0x0000000402147825 */ /* 0x000fc400078e0216 */ /*04b0*/ LDG.E R22, [R12.64+0x38] ; /* 0x000038040c167981 */ /* 0x001ea8000c1e1900 */ /*04c0*/ LDG.E R17, [R20.64] ; /* 0x0000000414117981 */ /* 0x0008a2000c1e1900 */ /*04d0*/ FFMA R18, R11, R26, R10 ; /* 0x0000001a0b127223 */ /* 0x008fc6000000000a */ /*04e0*/ LDG.E R26, [R12.64+0x30] ; /* 0x000030040c1a7981 */ /* 0x000ee2000c1e1900 */ /*04f0*/ IMAD.WIDE R10, R2, 0x4, R20 ; /* 0x00000004020a7825 */ /* 0x000fc800078e0214 */ /*0500*/ FFMA R20, R28, R27, R18 ; /* 0x0000001b1c147223 */ /* 0x010fe40000000012 */ /*0510*/ LDG.E R28, [R12.64+0x34] ; /* 0x000034040c1c7981 */ /* 0x000ea2000c1e1900 */ /*0520*/ IMAD.WIDE R18, R2, 0x4, R10 ; /* 0x0000000402127825 */ /* 0x000fc600078e020a */ /*0530*/ LDG.E R27, [R10.64] ; /* 0x000000040a1b7981 */ /* 0x000128000c1e1900 */ /*0540*/ LDG.E R23, [R18.64] ; /* 0x0000000412177981 */ /* 0x000328000c1e1900 */ /*0550*/ LDG.E R10, [R12.64+0x3c] ; /* 0x00003c040c0a7981 */ /* 0x001f22000c1e1900 */ /*0560*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */ /* 0x000fe20007ffe0ff */ /*0570*/ FFMA R9, R14, R9, R20 ; /* 0x000000090e097223 */ /* 0x020fc80000000014 */ /*0580*/ FFMA R9, R16, R25, R9 ; /* 0x0000001910097223 */ /* 0x000fe20000000009 */ /*0590*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */ /* 0x000fc60003f24270 */ /*05a0*/ FFMA R9, R24, R15, R9 ; /* 0x0000000f18097223 */ /* 0x000fe20000000009 */ /*05b0*/ IADD3 R8, P2, R8, 0x40, RZ ; /* 0x0000004008087810 */ /* 0x000fe20007f5e0ff */ /*05c0*/ IMAD.WIDE R18, R2, 0x4, R18 ; /* 0x0000000402127825 */ /* 0x002fe200078e0212 */ /*05d0*/ IADD3 R3, R3, 0x10, RZ ; /* 0x0000001003037810 */ /* 0x000fe40007ffe0ff */ /*05e0*/ IADD3.X R7, RZ, R7, RZ, P2, !PT ; /* 0x00000007ff077210 */ /* 0x000fe200017fe4ff */ /*05f0*/ FFMA R9, R29, R26, R9 ; /* 0x0000001a1d097223 */ /* 0x008fc80000000009 */ /*0600*/ FFMA R9, R17, R28, R9 ; /* 0x0000001c11097223 */ /* 0x004fc80000000009 */ /*0610*/ FFMA R9, R27, R22, R9 ; /* 0x000000161b097223 */ /* 0x010fc80000000009 */ /*0620*/ FFMA R21, R23, R10, R9 ; /* 0x0000000a17157223 */ /* 0x000fe20000000009 */ /*0630*/ @P1 BRA 0x1c0 ; /* 0xfffffb8000001947 */ /* 0x000fea000383ffff */ /*0640*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */ /* 0x000fda0003f24270 */ /*0650*/ @!P1 BRA 0x8d0 ; /* 0x0000027000009947 */ /* 0x000fea0003800000 */ /*0660*/ MOV R9, R7 ; /* 0x0000000700097202 */ /* 0x000fe20000000f00 */ /*0670*/ LDG.E R20, [R18.64] ; /* 0x0000000412147981 */ /* 0x0000a8000c1e1900 */ /*0680*/ IMAD.WIDE R10, R4, 0x4, R8 ; /* 0x00000004040a7825 */ /* 0x000fca00078e0208 */ /*0690*/ LDG.E R9, [R10.64] ; /* 0x000000040a097981 */ /* 0x000ea2000c1e1900 */ /*06a0*/ IMAD.WIDE R22, R2, 0x4, R18 ; /* 0x0000000402167825 */ /* 0x000fc600078e0212 */ /*06b0*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */ /* 0x000ee6000c1e1900 */ /*06c0*/ IMAD.WIDE R12, R2.reuse, 0x4, R22 ; /* 0x00000004020c7825 */ /* 0x040fe200078e0216 */ /*06d0*/ LDG.E R24, [R22.64] ; /* 0x0000000416187981 */ /* 0x0002e8000c1e1900 */ /*06e0*/ LDG.E R26, [R12.64] ; /* 0x000000040c1a7981 */ /* 0x000962000c1e1900 */ /*06f0*/ IMAD.WIDE R14, R2, 0x4, R12 ; /* 0x00000004020e7825 */ /* 0x000fc600078e020c */ /*0700*/ LDG.E R27, [R10.64+0x8] ; /* 0x000008040a1b7981 */ /* 0x000f66000c1e1900 */ /*0710*/ IMAD.WIDE R16, R2.reuse, 0x4, R14 ; /* 0x0000000402107825 */ /* 0x040fe200078e020e */ /*0720*/ LDG.E R29, [R10.64+0xc] ; /* 0x00000c040a1d7981 */ /* 0x000f68000c1e1900 */ /*0730*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x000962000c1e1900 */ /*0740*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */ /* 0x001fc600078e0210 */ /*0750*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */ /* 0x000166000c1e1900 */ /*0760*/ IMAD.WIDE R22, R2.reuse, 0x4, R18 ; /* 0x0000000402167825 */ /* 0x042fe200078e0212 */ /*0770*/ LDG.E R28, [R18.64] ; /* 0x00000004121c7981 */ /* 0x000368000c1e1900 */ /*0780*/ LDG.E R15, [R10.64+0x10] ; /* 0x000010040a0f7981 */ /* 0x010f22000c1e1900 */ /*0790*/ IMAD.WIDE R12, R2, 0x4, R22 ; /* 0x00000004020c7825 */ /* 0x000fc600078e0216 */ /*07a0*/ LDG.E R18, [R10.64+0x1c] ; /* 0x00001c040a127981 */ /* 0x002f28000c1e1900 */ /*07b0*/ LDG.E R17, [R12.64] ; /* 0x000000040c117981 */ /* 0x001f22000c1e1900 */ /*07c0*/ FFMA R19, R20, R9, R21 ; /* 0x0000000914137223 */ /* 0x004fc60000000015 */ /*07d0*/ LDG.E R21, [R10.64+0x14] ; /* 0x000014040a157981 */ /* 0x000ea8000c1e1900 */ /*07e0*/ LDG.E R9, [R22.64] ; /* 0x0000000416097981 */ /* 0x000ea8000c1e1900 */ /*07f0*/ LDG.E R20, [R10.64+0x18] ; /* 0x000018040a147981 */ /* 0x000ea2000c1e1900 */ /*0800*/ FFMA R24, R24, R25, R19 ; /* 0x0000001918187223 */ /* 0x008fc80000000013 */ /*0810*/ FFMA R24, R26, R27, R24 ; /* 0x0000001b1a187223 */ /* 0x020fc80000000018 */ /*0820*/ FFMA R14, R14, R29, R24 ; /* 0x0000001d0e0e7223 */ /* 0x000fe20000000018 */ /*0830*/ IADD3 R8, P1, R8, 0x20, RZ ; /* 0x0000002008087810 */ /* 0x000fc60007f3e0ff */ /*0840*/ FFMA R14, R16, R15, R14 ; /* 0x0000000f100e7223 */ /* 0x010fe2000000000e */ /*0850*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0860*/ IADD3 R3, R3, 0x8, RZ ; /* 0x0000000803037810 */ /* 0x000fe40007ffe0ff */ /*0870*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */ /* 0x000fe40007ffe0ff */ /*0880*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */ /* 0x000fe20000ffe4ff */ /*0890*/ FFMA R14, R28, R21, R14 ; /* 0x000000151c0e7223 */ /* 0x004fc8000000000e */ /*08a0*/ FFMA R9, R9, R20, R14 ; /* 0x0000001409097223 */ /* 0x000fc8000000000e */ /*08b0*/ FFMA R21, R17, R18, R9 ; /* 0x0000001211157223 */ /* 0x000fe40000000009 */ /*08c0*/ IMAD.WIDE R18, R2, 0x4, R12 ; /* 0x0000000402127825 */ /* 0x000fc800078e020c */ /*08d0*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */ /* 0x000fda0000705670 */ /*08e0*/ @!P0 BRA 0xa70 ; /* 0x0000018000008947 */ /* 0x000fea0003800000 */ /*08f0*/ MOV R9, R7 ; /* 0x0000000700097202 */ /* 0x000fe20000000f00 */ /*0900*/ IMAD.WIDE R12, R2, 0x4, R18 ; /* 0x00000004020c7825 */ /* 0x000fe400078e0212 */ /*0910*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */ /* 0x000ea4000c1e1900 */ /*0920*/ IMAD.WIDE R10, R4, 0x4, R8 ; /* 0x00000004040a7825 */ /* 0x000fc800078e0208 */ /*0930*/ IMAD.WIDE R14, R2.reuse, 0x4, R12 ; /* 0x00000004020e7825 */ /* 0x040fe200078e020c */ /*0940*/ LDG.E R9, [R10.64] ; /* 0x000000040a097981 */ /* 0x000ea8000c1e1900 */ /*0950*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */ /* 0x000ee2000c1e1900 */ /*0960*/ IMAD.WIDE R16, R2, 0x4, R14 ; /* 0x0000000402107825 */ /* 0x000fc600078e020e */ /*0970*/ LDG.E R20, [R10.64+0x4] ; /* 0x000004040a147981 */ /* 0x000ee8000c1e1900 */ /*0980*/ LDG.E R22, [R14.64] ; /* 0x000000040e167981 */ /* 0x000f28000c1e1900 */ /*0990*/ LDG.E R23, [R10.64+0x8] ; /* 0x000008040a177981 */ /* 0x000f28000c1e1900 */ /*09a0*/ LDG.E R25, [R10.64+0xc] ; /* 0x00000c040a197981 */ /* 0x000f68000c1e1900 */ /*09b0*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */ /* 0x000f62000c1e1900 */ /*09c0*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */ /* 0x000fc80007ffe0ff */ /*09d0*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fe40003f05270 */ /*09e0*/ IADD3 R8, P1, R8, 0x10, RZ ; /* 0x0000001008087810 */ /* 0x000fe40007f3e0ff */ /*09f0*/ IADD3 R3, R3, 0x4, RZ ; /* 0x0000000403037810 */ /* 0x000fe40007ffe0ff */ /*0a00*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */ /* 0x000fe20000ffe4ff */ /*0a10*/ FFMA R9, R18, R9, R21 ; /* 0x0000000912097223 */ /* 0x004fc80000000015 */ /*0a20*/ FFMA R9, R12, R20, R9 ; /* 0x000000140c097223 */ /* 0x008fe40000000009 */ /*0a30*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */ /* 0x000fc800078e0210 */ /*0a40*/ FFMA R9, R22, R23, R9 ; /* 0x0000001716097223 */ /* 0x010fc80000000009 */ /*0a50*/ FFMA R21, R24, R25, R9 ; /* 0x0000001918157223 */ /* 0x020fe20000000009 */ /*0a60*/ @P0 BRA 0x8f0 ; /* 0xfffffe8000000947 */ /* 0x000fea000383ffff */ /*0a70*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fda0003f05270 */ /*0a80*/ @!P0 BRA 0xb70 ; /* 0x000000e000008947 */ /* 0x000fea0003800000 */ /*0a90*/ HFMA2.MMA R8, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff087435 */ /* 0x000fe200000001ff */ /*0aa0*/ IADD3 R6, R4, R3, RZ ; /* 0x0000000304067210 */ /* 0x000fe20007ffe0ff */ /*0ab0*/ IMAD R3, R3, c[0x0][0x178], R0 ; /* 0x00005e0003037a24 */ /* 0x002fd000078e0200 */ /*0ac0*/ IMAD.WIDE R6, R6, R8, c[0x0][0x168] ; /* 0x00005a0006067625 */ /* 0x000fc800078e0208 */ /*0ad0*/ IMAD.WIDE R8, R3, R8, c[0x0][0x170] ; /* 0x00005c0003087625 */ /* 0x000fca00078e0208 */ /*0ae0*/ LDG.E R10, [R8.64] ; /* 0x00000004080a7981 */ /* 0x0000a8000c1e1900 */ /*0af0*/ LDG.E R3, [R6.64] ; /* 0x0000000406037981 */ /* 0x0002a2000c1e1900 */ /*0b00*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */ /* 0x000fc80007ffe0ff */ /*0b10*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fe20003f05270 */ /*0b20*/ IMAD.WIDE R8, R2, 0x4, R8 ; /* 0x0000000402087825 */ /* 0x001fe200078e0208 */ /*0b30*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */ /* 0x002fc80007f3e0ff */ /*0b40*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */ /* 0x000fe20000ffe4ff */ /*0b50*/ FFMA R21, R10, R3, R21 ; /* 0x000000030a157223 */ /* 0x004fcc0000000015 */ /*0b60*/ @P0 BRA 0xae0 ; /* 0xffffff7000000947 */ /* 0x000fea000383ffff */ /*0b70*/ IADD3 R2, R4, R0, RZ ; /* 0x0000000004027210 */ /* 0x002fe40007ffe0ff */ /*0b80*/ MOV R3, 0x4 ; /* 0x0000000400037802 */ /* 0x000fca0000000f00 */ /*0b90*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fca00078e0203 */ /*0ba0*/ STG.E [R2.64], R21 ; /* 0x0000001502007986 */ /* 0x000fe2000c101904 */ /*0bb0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0bc0*/ BRA 0xbc0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0bd0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0be0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0bf0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0c70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9matrixMulPfS_S_i .globl _Z9matrixMulPfS_S_i .p2align 8 .type _Z9matrixMulPfS_S_i,@function _Z9matrixMulPfS_S_i: s_load_b32 s8, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s8, 1 s_cbranch_scc1 .LBB0_3 s_load_b128 s[4:7], s[0:1], 0x8 v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, v0 s_mul_i32 s2, s15, s8 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) s_ashr_i32 s3, s2, 31 v_mov_b32_e32 v3, v2 s_lshl_b64 s[2:3], s[2:3], 2 s_waitcnt lgkmcnt(0) s_add_u32 s2, s4, s2 s_addc_u32 s3, s5, s3 s_mov_b32 s4, s8 .p2align 6 .LBB0_2: v_lshlrev_b64 v[4:5], 2, v[1:2] s_load_b32 s5, s[2:3], 0x0 s_add_i32 s4, s4, -1 v_add_nc_u32_e32 v1, s8, v1 s_add_u32 s2, s2, 4 s_addc_u32 s3, s3, 0 v_add_co_u32 v4, vcc_lo, s6, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo s_cmp_lg_u32 s4, 0 global_load_b32 v4, v[4:5], off s_waitcnt vmcnt(0) lgkmcnt(0) v_fmac_f32_e32 v3, s5, v4 s_cbranch_scc1 .LBB0_2 s_branch .LBB0_4 .LBB0_3: v_mov_b32_e32 v3, 0 .LBB0_4: s_load_b64 s[0:1], s[0:1], 0x0 v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v3, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9matrixMulPfS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9matrixMulPfS_S_i, .Lfunc_end0-_Z9matrixMulPfS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9matrixMulPfS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z9matrixMulPfS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00041c3f_00000000-6_mul_matG1B1_et_1.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i .type _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i, @function _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i: .LFB2082: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9matrixMulPfS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i, .-_Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i .globl _Z9matrixMulPfS_S_i .type _Z9matrixMulPfS_S_i, @function _Z9matrixMulPfS_S_i: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z9matrixMulPfS_S_i, .-_Z9matrixMulPfS_S_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC3: .string "Transfert A,B de CPU vers GPU : %f seconde\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC4: .string "Erreur appel kernel %d \n" .section .rodata.str1.8 .align 8 .LC5: .string "Transfert C de GPU vers CPU : %f seconde\n" .section .rodata.str1.1 .LC6: .string "Temps consomm\303\251 : %f seconde\n" .LC7: .string "Erreur i: %d j: %d %f\n" .LC8: .string "Erreur i: %d j: %d\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $136, %rsp .cfi_def_cfa_offset 192 movq %fs:40, %rdx movq %rdx, 120(%rsp) xorl %edx, %edx cmpl $1, %edi jg .L36 movl $1024, 96(%rsp) movl $1, 100(%rsp) movl $1, 104(%rsp) movl $1024, 108(%rsp) movl $1, 112(%rsp) movl $1, 116(%rsp) movl $4194304, %edi call malloc@PLT movq %rax, %r15 movl $4194304, %edi call malloc@PLT movq %rax, %r14 movl $4194304, %edi call malloc@PLT movq %rax, %rbx movl $1024, %r12d movl $1024, %r13d .L26: movslq %r12d, %r8 leaq 0(,%r8,4), %r9 movq %r15, %rsi movq %r14, %rcx movq %rbx, %rdi movl $0, %edx movss .LC0(%rip), %xmm0 jmp .L14 .L36: movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %rbp movl %eax, %r12d movl %eax, %r13d movl %eax, 96(%rsp) movl $1, 100(%rsp) movl $1, 104(%rsp) movl %eax, 108(%rsp) movl $1, 112(%rsp) movl $1, 116(%rsp) movl $4194304, %edi call malloc@PLT movq %rax, %r15 movl $4194304, %edi call malloc@PLT movq %rax, %r14 movl $4194304, %edi call malloc@PLT movq %rax, %rbx testl %ebp, %ebp jg .L26 .L13: leaq 24(%rsp), %rdi call cudaEventCreate@PLT leaq 32(%rsp), %rdi call cudaEventCreate@PLT leaq 40(%rsp), %rdi call cudaEventCreate@PLT leaq 48(%rsp), %rdi call cudaEventCreate@PLT leaq 56(%rsp), %rdi call cudaEventCreate@PLT leaq 64(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 24(%rsp), %rdi call cudaEventRecord@PLT movl $0, %esi movq 40(%rsp), %rdi call cudaEventRecord@PLT movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT leaq 72(%rsp), %rdi movl $4194304, %esi call cudaMalloc@PLT leaq 80(%rsp), %rdi movl $4194304, %esi call cudaMalloc@PLT leaq 88(%rsp), %rdi movl $4194304, %esi call cudaMalloc@PLT movl $1, %ecx movl $4194304, %edx movq %r15, %rsi movq 72(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $4194304, %edx movq %r14, %rsi movq 80(%rsp), %rdi call cudaMemcpy@PLT movl $0, %esi movq 48(%rsp), %rdi call cudaEventRecord@PLT movq 48(%rsp), %rdi call cudaEventSynchronize@PLT leaq 16(%rsp), %rdi movq 48(%rsp), %rdx movq 40(%rsp), %rsi call cudaEventElapsedTime@PLT movq 40(%rsp), %rdi call cudaEventDestroy@PLT movq 48(%rsp), %rdi call cudaEventDestroy@PLT pxor %xmm0, %xmm0 cvtss2sd 16(%rsp), %xmm0 divsd .LC2(%rip), %xmm0 leaq .LC3(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl 104(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 96(%rsp), %rdx movq 108(%rsp), %rdi movl 116(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L37 .L17: call cudaGetLastError@PLT movl %eax, %ebp testl %eax, %eax jne .L38 movl $2, %ecx movl $4194304, %edx movq 88(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT movq 64(%rsp), %rdi call cudaEventSynchronize@PLT leaq 20(%rsp), %rdi movq 64(%rsp), %rdx movq 56(%rsp), %rsi call cudaEventElapsedTime@PLT movq 56(%rsp), %rdi call cudaEventDestroy@PLT movq 64(%rsp), %rdi call cudaEventDestroy@PLT pxor %xmm0, %xmm0 cvtss2sd 20(%rsp), %xmm0 divsd .LC2(%rip), %xmm0 leaq .LC5(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl $0, %esi movq 32(%rsp), %rdi call cudaEventRecord@PLT movq 32(%rsp), %rdi call cudaEventSynchronize@PLT leaq 12(%rsp), %rdi movq 32(%rsp), %rdx movq 24(%rsp), %rsi call cudaEventElapsedTime@PLT movq 24(%rsp), %rdi call cudaEventDestroy@PLT movq 32(%rsp), %rdi call cudaEventDestroy@PLT pxor %xmm0, %xmm0 cvtss2sd 12(%rsp), %xmm0 divsd .LC2(%rip), %xmm0 leaq .LC6(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl %r13d, %esi movl $0, %edx leal -1(%r13), %r9d movl $0, %r8d testl %r12d, %r12d jg .L19 .L20: movq %r15, %rdi call free@PLT movq %r14, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq 72(%rsp), %rdi call cudaFree@PLT movq 80(%rsp), %rdi call cudaFree@PLT movq 88(%rsp), %rdi call cudaFree@PLT movq 120(%rsp), %rax subq %fs:40, %rax jne .L39 movl $0, %eax addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state addq $1, %rax cmpq %r8, %rax je .L40 .L16: movss %xmm0, (%rsi,%rax,4) movss %xmm0, (%rcx,%rax,4) movl $0x00000000, (%rdi,%rax,4) cmpl %eax, %edx jne .L15 pxor %xmm1, %xmm1 cvtsi2ssl %r10d, %xmm1 movss %xmm1, (%rsi,%rax,4) movss %xmm1, (%rcx,%rax,4) jmp .L15 .L40: addl $1, %edx addq %r9, %rsi addq %r9, %rcx addq %r9, %rdi cmpl %r12d, %edx je .L13 .L14: movl $0, %eax leal 1(%rdx), %r10d jmp .L16 .L37: movl %r12d, %ecx movq 80(%rsp), %rdx movq 72(%rsp), %rsi movq 88(%rsp), %rdi call _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i jmp .L17 .L38: movl %eax, %edx leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl %ebp, %edi call exit@PLT .L42: leal (%rcx,%rbp), %eax cltq movss (%rbx,%rax,4), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssl %edi, %xmm1 ucomiss %xmm1, %xmm0 jp .L30 jne .L30 .L22: leal 1(%rcx), %eax cmpl %r12d, %eax je .L41 movl %eax, %ecx .L25: cmpl %ecx, %edx je .L42 leal (%rcx,%rbp), %eax cltq leal (%rcx,%rsi), %r10d pxor %xmm0, %xmm0 cvtsi2ssl %r10d, %xmm0 ucomiss (%rbx,%rax,4), %xmm0 jp .L31 je .L22 .L31: leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L30: cvtss2sd %xmm0, %xmm0 leaq .LC7(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L41: leal 1(%rdx), %eax addl %r13d, %ebp addl $1, %esi cmpl %ecx, %edx je .L20 movl %eax, %edx .L19: leal 1(%rdx), %edi imull %edi, %edi addl %r9d, %edi movl %r8d, %ecx jmp .L25 .L39: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC9: .string "_Z9matrixMulPfS_S_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z9matrixMulPfS_S_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1065353216 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC2: .long 0 .long 1083129856 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "mul_matG1B1_et_1.hip" .globl _Z24__device_stub__matrixMulPfS_S_i # -- Begin function _Z24__device_stub__matrixMulPfS_S_i .p2align 4, 0x90 .type _Z24__device_stub__matrixMulPfS_S_i,@function _Z24__device_stub__matrixMulPfS_S_i: # @_Z24__device_stub__matrixMulPfS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9matrixMulPfS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z24__device_stub__matrixMulPfS_S_i, .Lfunc_end0-_Z24__device_stub__matrixMulPfS_S_i .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI1_0: .quad 0x408f400000000000 # double 1000 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $248, %rsp .cfi_def_cfa_offset 304 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $1024, %eax # imm = 0x400 cmpl $2, %edi jl .LBB1_2 # %bb.1: movq 8(%rsi), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol .LBB1_2: movq %rax, 8(%rsp) # 8-byte Spill movl %eax, %r13d movabsq $4294967296, %rax # imm = 0x100000000 orq %r13, %rax movq %rax, 144(%rsp) # 8-byte Spill movl $4194304, %edi # imm = 0x400000 callq malloc movq %rax, 48(%rsp) # 8-byte Spill movl $4194304, %edi # imm = 0x400000 callq malloc movq %rax, 40(%rsp) # 8-byte Spill movl $4194304, %edi # imm = 0x400000 callq malloc movq 8(%rsp), %rcx # 8-byte Reload movq %rax, 136(%rsp) # 8-byte Spill testl %ecx, %ecx jle .LBB1_9 # %bb.3: # %.preheader104.lr.ph leaq (,%r13,4), %rbp xorl %r14d, %r14d xorl %r15d, %r15d jmp .LBB1_4 .p2align 4, 0x90 .LBB1_8: # %._crit_edge # in Loop: Header=BB1_4 Depth=1 incq %r15 movq 8(%rsp), %rcx # 8-byte Reload addl %ecx, %r14d cmpq %r13, %r15 je .LBB1_9 .LBB1_4: # %.preheader104 # =>This Loop Header: Depth=1 # Child Loop BB1_5 Depth 2 movl %r14d, %eax movq 40(%rsp), %rdx # 8-byte Reload leaq (%rdx,%rax,4), %r12 movq 48(%rsp), %rdx # 8-byte Reload leaq (%rdx,%rax,4), %rbx movl %ecx, %eax imull %r15d, %eax movq 136(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %rdi leal 1(%r15), %eax xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movss %xmm0, 108(%rsp) # 4-byte Spill xorl %esi, %esi movq %rbp, %rdx callq memset@PLT xorl %eax, %eax jmp .LBB1_5 .p2align 4, 0x90 .LBB1_7: # in Loop: Header=BB1_5 Depth=2 incq %rax cmpq %rax, %r13 je .LBB1_8 .LBB1_5: # Parent Loop BB1_4 Depth=1 # => This Inner Loop Header: Depth=2 movl $1065353216, (%rbx,%rax,4) # imm = 0x3F800000 movl $1065353216, (%r12,%rax,4) # imm = 0x3F800000 cmpq %rax, %r15 jne .LBB1_7 # %bb.6: # in Loop: Header=BB1_5 Depth=2 movss 108(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero movss %xmm0, (%rbx,%rax,4) movss %xmm0, (%r12,%rax,4) jmp .LBB1_7 .LBB1_9: # %._crit_edge111 leaq 96(%rsp), %rdi callq hipEventCreate leaq 32(%rsp), %rdi callq hipEventCreate leaq 88(%rsp), %rdi callq hipEventCreate leaq 24(%rsp), %rdi callq hipEventCreate leaq 80(%rsp), %rdi callq hipEventCreate leaq 16(%rsp), %rdi callq hipEventCreate movq 96(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 88(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 80(%rsp), %rdi xorl %esi, %esi callq hipEventRecord leaq 72(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc leaq 64(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc leaq 56(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc movq 72(%rsp), %rdi movl $4194304, %edx # imm = 0x400000 movq 48(%rsp), %rsi # 8-byte Reload movl $1, %ecx callq hipMemcpy movq 64(%rsp), %rdi movl $4194304, %edx # imm = 0x400000 movq 40(%rsp), %rsi # 8-byte Reload movl $1, %ecx callq hipMemcpy movq 24(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 24(%rsp), %rdi callq hipEventSynchronize movq 88(%rsp), %rsi movq 24(%rsp), %rdx leaq 112(%rsp), %rdi callq hipEventElapsedTime movq 88(%rsp), %rdi callq hipEventDestroy movq 24(%rsp), %rdi callq hipEventDestroy movss 112(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 divsd .LCPI1_0(%rip), %xmm0 movl $.L.str, %edi movb $1, %al callq printf movq 144(%rsp), %rdi # 8-byte Reload movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_11 # %bb.10: movq 56(%rsp), %rax movq 72(%rsp), %rcx movq 64(%rsp), %rdx movq %rax, 200(%rsp) movq %rcx, 192(%rsp) movq %rdx, 184(%rsp) movq 8(%rsp), %rax # 8-byte Reload movl %eax, 116(%rsp) leaq 200(%rsp), %rax movq %rax, 208(%rsp) leaq 192(%rsp), %rax movq %rax, 216(%rsp) leaq 184(%rsp), %rax movq %rax, 224(%rsp) leaq 116(%rsp), %rax movq %rax, 232(%rsp) leaq 120(%rsp), %rdi leaq 168(%rsp), %rsi leaq 160(%rsp), %rdx leaq 152(%rsp), %rcx callq __hipPopCallConfiguration movq 120(%rsp), %rsi movl 128(%rsp), %edx movq 168(%rsp), %rcx movl 176(%rsp), %r8d leaq 208(%rsp), %r9 movl $_Z9matrixMulPfS_S_i, %edi pushq 152(%rsp) .cfi_adjust_cfa_offset 8 pushq 168(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_11: callq hipGetLastError testl %eax, %eax movq 136(%rsp), %rbx # 8-byte Reload jne .LBB1_22 # %bb.12: movq 56(%rsp), %rsi movl $4194304, %edx # imm = 0x400000 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movq 16(%rsp), %rdi xorl %ebp, %ebp xorl %esi, %esi callq hipEventRecord movq 16(%rsp), %rdi callq hipEventSynchronize movq 80(%rsp), %rsi movq 16(%rsp), %rdx leaq 120(%rsp), %rdi callq hipEventElapsedTime movq 80(%rsp), %rdi callq hipEventDestroy movq 16(%rsp), %rdi callq hipEventDestroy movss 120(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 divsd .LCPI1_0(%rip), %xmm0 movl $.L.str.2, %edi movb $1, %al callq printf movq 32(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 32(%rsp), %rdi callq hipEventSynchronize movq 96(%rsp), %rsi movq 32(%rsp), %rdx leaq 208(%rsp), %rdi callq hipEventElapsedTime movq 96(%rsp), %rdi callq hipEventDestroy movq 32(%rsp), %rdi callq hipEventDestroy movss 208(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 divsd .LCPI1_0(%rip), %xmm0 movl $.L.str.3, %edi movb $1, %al callq printf movq 8(%rsp), %r10 # 8-byte Reload testl %r10d, %r10d jle .LBB1_21 # %bb.13: # %.preheader.lr.ph leal -1(%r10), %eax movq %r13, %rcx xorl %edi, %edi xorl %esi, %esi jmp .LBB1_14 .p2align 4, 0x90 .LBB1_20: # %._crit_edge114 # in Loop: Header=BB1_14 Depth=1 incq %rsi addl %r10d, %ebp decq %rdi incq %rcx cmpq %r13, %rsi je .LBB1_21 .LBB1_14: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_15 Depth 2 movl %ebp, %edx leaq (%rbx,%rdx,4), %r8 leal 1(%rsi), %edx imull %edx, %edx addl %eax, %edx xorps %xmm0, %xmm0 cvtsi2ss %edx, %xmm0 xorl %edx, %edx jmp .LBB1_15 .p2align 4, 0x90 .LBB1_18: # %.critedge # in Loop: Header=BB1_15 Depth=2 leal (%rcx,%rdx), %r9d xorps %xmm1, %xmm1 cvtsi2ss %r9d, %xmm1 movss (%r8,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero ucomiss %xmm1, %xmm2 jne .LBB1_24 jp .LBB1_24 .LBB1_19: # in Loop: Header=BB1_15 Depth=2 incq %rdx cmpq %rdx, %r13 je .LBB1_20 .LBB1_15: # Parent Loop BB1_14 Depth=1 # => This Inner Loop Header: Depth=2 movq %rdi, %r9 addq %rdx, %r9 jne .LBB1_18 # %bb.16: # in Loop: Header=BB1_15 Depth=2 movss (%r8,%rdx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero ucomiss %xmm0, %xmm1 jne .LBB1_23 jp .LBB1_23 # %bb.17: # in Loop: Header=BB1_15 Depth=2 testq %r9, %r9 jne .LBB1_18 jmp .LBB1_19 .LBB1_21: # %._crit_edge116 movq 48(%rsp), %rdi # 8-byte Reload callq free movq 40(%rsp), %rdi # 8-byte Reload callq free movq %rbx, %rdi callq free movq 72(%rsp), %rdi callq hipFree movq 64(%rsp), %rdi callq hipFree movq 56(%rsp), %rdi callq hipFree xorl %eax, %eax addq $248, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB1_24: .cfi_def_cfa_offset 304 movl $.L.str.5, %edi # kill: def $esi killed $esi killed $rsi # kill: def $edx killed $edx killed $rdx xorl %eax, %eax callq printf movl $1, %edi callq exit .LBB1_23: xorps %xmm0, %xmm0 cvtss2sd %xmm1, %xmm0 movl $.L.str.4, %edi # kill: def $esi killed $esi killed $rsi # kill: def $edx killed $edx killed $rdx movb $1, %al callq printf movl $1, %edi callq exit .LBB1_22: movl $.L.str.1, %edi movl %eax, %esi movl %eax, %ebx xorl %eax, %eax callq printf movl %ebx, %edi callq exit .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9matrixMulPfS_S_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z9matrixMulPfS_S_i,@object # @_Z9matrixMulPfS_S_i .section .rodata,"a",@progbits .globl _Z9matrixMulPfS_S_i .p2align 3, 0x0 _Z9matrixMulPfS_S_i: .quad _Z24__device_stub__matrixMulPfS_S_i .size _Z9matrixMulPfS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Transfert A,B de CPU vers GPU : %f seconde\n" .size .L.str, 44 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Erreur appel kernel %d \n" .size .L.str.1, 25 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Transfert C de GPU vers CPU : %f seconde\n" .size .L.str.2, 42 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Temps consomm\303\251 : %f seconde\n" .size .L.str.3, 30 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "Erreur i: %d j: %d %f\n" .size .L.str.4, 23 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "Erreur i: %d j: %d\n" .size .L.str.5, 20 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9matrixMulPfS_S_i" .size .L__unnamed_1, 20 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__matrixMulPfS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9matrixMulPfS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <cuda_runtime.h> #include <iostream> #include <string> // Define this to turn on error checking #define CUDA_ERROR_CHECK #define CudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__, deviceID) #define CudaSyncAndCheckError() __cudaSyncAndCheckError(__FILE__, __LINE__, deviceID) __host__ inline std::string __cudaSafeCall(cudaError err, const char *file, const int line, const int deviceID) { #ifdef CUDA_ERROR_CHECK if (cudaSuccess != err) return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaGetErrorString(err); else #endif //CUDA_ERROR_CHECK return ""; } __host__ inline std::string __cudaSyncAndCheckError(const char *file, const int line, const int deviceID) { cudaError_t response{ cudaSuccess }; std::string cudaErrors{ "" }; #ifdef CUDA_ERROR_CHECK response = cudaGetLastError(); if (response != cudaSuccess) { while (response != cudaSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += cudaGetErrorString(response); response = cudaGetLastError(); } return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors; } #endif //CUDA_ERROR_CHECK response = cudaDeviceSynchronize(); if (response != cudaSuccess) { while (response != cudaSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += cudaGetErrorString(response); response = cudaGetLastError(); } return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors; } return ""; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cuda_runtime.h> #include <iostream> #include <string> // Define this to turn on error checking #define CUDA_ERROR_CHECK #define CudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__, deviceID) #define CudaSyncAndCheckError() __cudaSyncAndCheckError(__FILE__, __LINE__, deviceID) __host__ inline std::string __cudaSafeCall(cudaError err, const char *file, const int line, const int deviceID) { #ifdef CUDA_ERROR_CHECK if (cudaSuccess != err) return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaGetErrorString(err); else #endif //CUDA_ERROR_CHECK return ""; } __host__ inline std::string __cudaSyncAndCheckError(const char *file, const int line, const int deviceID) { cudaError_t response{ cudaSuccess }; std::string cudaErrors{ "" }; #ifdef CUDA_ERROR_CHECK response = cudaGetLastError(); if (response != cudaSuccess) { while (response != cudaSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += cudaGetErrorString(response); response = cudaGetLastError(); } return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors; } #endif //CUDA_ERROR_CHECK response = cudaDeviceSynchronize(); if (response != cudaSuccess) { while (response != cudaSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += cudaGetErrorString(response); response = cudaGetLastError(); } return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors; } return ""; }
.file "tmpxft_000b3d73_00000000-6_cudaErrorCheck.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3673: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3673: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3696: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3696: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cuda_runtime.h> #include <iostream> #include <string> // Define this to turn on error checking #define CUDA_ERROR_CHECK #define CudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__, deviceID) #define CudaSyncAndCheckError() __cudaSyncAndCheckError(__FILE__, __LINE__, deviceID) __host__ inline std::string __cudaSafeCall(cudaError err, const char *file, const int line, const int deviceID) { #ifdef CUDA_ERROR_CHECK if (cudaSuccess != err) return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaGetErrorString(err); else #endif //CUDA_ERROR_CHECK return ""; } __host__ inline std::string __cudaSyncAndCheckError(const char *file, const int line, const int deviceID) { cudaError_t response{ cudaSuccess }; std::string cudaErrors{ "" }; #ifdef CUDA_ERROR_CHECK response = cudaGetLastError(); if (response != cudaSuccess) { while (response != cudaSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += cudaGetErrorString(response); response = cudaGetLastError(); } return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors; } #endif //CUDA_ERROR_CHECK response = cudaDeviceSynchronize(); if (response != cudaSuccess) { while (response != cudaSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += cudaGetErrorString(response); response = cudaGetLastError(); } return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors; } return ""; }
#include <hip/hip_runtime.h> #include <iostream> #include <string> // Define this to turn on error checking #define CUDA_ERROR_CHECK #define CudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__, deviceID) #define CudaSyncAndCheckError() __cudaSyncAndCheckError(__FILE__, __LINE__, deviceID) __host__ inline std::string __cudaSafeCall(hipError_t err, const char *file, const int line, const int deviceID) { #ifdef CUDA_ERROR_CHECK if (hipSuccess != err) return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + hipGetErrorString(err); else #endif //CUDA_ERROR_CHECK return ""; } __host__ inline std::string __cudaSyncAndCheckError(const char *file, const int line, const int deviceID) { hipError_t response{ hipSuccess }; std::string cudaErrors{ "" }; #ifdef CUDA_ERROR_CHECK response = hipGetLastError(); if (response != hipSuccess) { while (response != hipSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += hipGetErrorString(response); response = hipGetLastError(); } return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors; } #endif //CUDA_ERROR_CHECK response = hipDeviceSynchronize(); if (response != hipSuccess) { while (response != hipSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += hipGetErrorString(response); response = hipGetLastError(); } return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors; } return ""; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <string> // Define this to turn on error checking #define CUDA_ERROR_CHECK #define CudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__, deviceID) #define CudaSyncAndCheckError() __cudaSyncAndCheckError(__FILE__, __LINE__, deviceID) __host__ inline std::string __cudaSafeCall(hipError_t err, const char *file, const int line, const int deviceID) { #ifdef CUDA_ERROR_CHECK if (hipSuccess != err) return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + hipGetErrorString(err); else #endif //CUDA_ERROR_CHECK return ""; } __host__ inline std::string __cudaSyncAndCheckError(const char *file, const int line, const int deviceID) { hipError_t response{ hipSuccess }; std::string cudaErrors{ "" }; #ifdef CUDA_ERROR_CHECK response = hipGetLastError(); if (response != hipSuccess) { while (response != hipSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += hipGetErrorString(response); response = hipGetLastError(); } return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors; } #endif //CUDA_ERROR_CHECK response = hipDeviceSynchronize(); if (response != hipSuccess) { while (response != hipSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += hipGetErrorString(response); response = hipGetLastError(); } return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors; } return ""; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <string> // Define this to turn on error checking #define CUDA_ERROR_CHECK #define CudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__, deviceID) #define CudaSyncAndCheckError() __cudaSyncAndCheckError(__FILE__, __LINE__, deviceID) __host__ inline std::string __cudaSafeCall(hipError_t err, const char *file, const int line, const int deviceID) { #ifdef CUDA_ERROR_CHECK if (hipSuccess != err) return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + hipGetErrorString(err); else #endif //CUDA_ERROR_CHECK return ""; } __host__ inline std::string __cudaSyncAndCheckError(const char *file, const int line, const int deviceID) { hipError_t response{ hipSuccess }; std::string cudaErrors{ "" }; #ifdef CUDA_ERROR_CHECK response = hipGetLastError(); if (response != hipSuccess) { while (response != hipSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += hipGetErrorString(response); response = hipGetLastError(); } return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors; } #endif //CUDA_ERROR_CHECK response = hipDeviceSynchronize(); if (response != hipSuccess) { while (response != hipSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += hipGetErrorString(response); response = hipGetLastError(); } return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors; } return ""; }
.text .file "cudaErrorCheck.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000b3d73_00000000-6_cudaErrorCheck.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3673: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3673: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3696: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3696: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "cudaErrorCheck.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
//********************************************************************** // * // University Of North Carolina Charlotte * // * //Program: Vecotr adder * //Description: This program is for testing GPU performance with vector * // add function. * // * // * //File Name: pb1b_gpu.c * //File Version: 1.0 * //Baseline: Homework_0 * // * //Course: ECGR6090- Heterogeneous Computing * // * //Programmed by: Roy Liu * //Under Suppervision of: Dr. Hamed Tabkhi * // * //Input file: No * // * //Output:Time of program running * //********************************************************************** #include<cuda_runtime.h> #include<stdio.h> #include<stdlib.h> #include<sys/time.h> __global__ void add(int a[], int b[], int c[]) { int i = blockDim.x * blockIdx.x + threadIdx.x; c[i] = a[i] + b[i]; } void random_ints(int* r, int n); int main() { int n=1000000; //for counting run time struct timeval start, end; float timer; gettimeofday(&start, NULL); int *d_a, *d_b, *d_c; //int n = 1024 * 1024; int size = n * sizeof(int); // data initializing d_a = (int *)malloc(size); random_ints(d_a, n); d_b = (int *)malloc(size); random_ints(d_b, n); d_c = (int *)malloc(size); dim3 dimGrid(n/512); dim3 dimBlock(512); //each block has X threads // kernel add<<<dimGrid, dimBlock>>>(d_a,d_b,d_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); gettimeofday(&end, NULL); timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; printf("Number of loop is: %d\nRunning time is: %f ms\n", n,timer/1000); return 0; } //********************************************************************** // Function Name: random_ints * // Description: - Generate random integer * // Input : None * // Output : Random integer * // Return: None * //********************************************************************** void random_ints(int* r, int n) { int i; for (i=0; i < n; ++i) { r[i] = rand()/2; } }
code for sm_80 Function : _Z3addPiS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e220000002500 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fca00078e0203 */ /*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fc800078e0207 */ /*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x0c0fe400078e0207 */ /*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea2000c1e1900 */ /*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fe200078e0207 */ /*00b0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */ /* 0x004fca0007ffe0ff */ /*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
//********************************************************************** // * // University Of North Carolina Charlotte * // * //Program: Vecotr adder * //Description: This program is for testing GPU performance with vector * // add function. * // * // * //File Name: pb1b_gpu.c * //File Version: 1.0 * //Baseline: Homework_0 * // * //Course: ECGR6090- Heterogeneous Computing * // * //Programmed by: Roy Liu * //Under Suppervision of: Dr. Hamed Tabkhi * // * //Input file: No * // * //Output:Time of program running * //********************************************************************** #include<cuda_runtime.h> #include<stdio.h> #include<stdlib.h> #include<sys/time.h> __global__ void add(int a[], int b[], int c[]) { int i = blockDim.x * blockIdx.x + threadIdx.x; c[i] = a[i] + b[i]; } void random_ints(int* r, int n); int main() { int n=1000000; //for counting run time struct timeval start, end; float timer; gettimeofday(&start, NULL); int *d_a, *d_b, *d_c; //int n = 1024 * 1024; int size = n * sizeof(int); // data initializing d_a = (int *)malloc(size); random_ints(d_a, n); d_b = (int *)malloc(size); random_ints(d_b, n); d_c = (int *)malloc(size); dim3 dimGrid(n/512); dim3 dimBlock(512); //each block has X threads // kernel add<<<dimGrid, dimBlock>>>(d_a,d_b,d_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); gettimeofday(&end, NULL); timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; printf("Number of loop is: %d\nRunning time is: %f ms\n", n,timer/1000); return 0; } //********************************************************************** // Function Name: random_ints * // Description: - Generate random integer * // Input : None * // Output : Random integer * // Return: None * //********************************************************************** void random_ints(int* r, int n) { int i; for (i=0; i < n; ++i) { r[i] = rand()/2; } }
.file "tmpxft_00109a34_00000000-6_pb1a_gpu.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z11random_intsPii .type _Z11random_intsPii, @function _Z11random_intsPii: .LFB2058: .cfi_startproc endbr64 testl %esi, %esi jle .L8 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi,4), %rbp .L5: call rand@PLT movl %eax, %edx shrl $31, %edx leal (%rdx,%rax), %eax sarl %eax movl %eax, (%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L5 addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L8: .cfi_restore 3 .cfi_restore 6 ret .cfi_endproc .LFE2058: .size _Z11random_intsPii, .-_Z11random_intsPii .globl _Z26__device_stub__Z3addPiS_S_PiS_S_ .type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function _Z26__device_stub__Z3addPiS_S_PiS_S_: .LFB2083: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 120(%rsp), %rax subq %fs:40, %rax jne .L16 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z3addPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_ .globl _Z3addPiS_S_ .type _Z3addPiS_S_, @function _Z3addPiS_S_: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z3addPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z3addPiS_S_, .-_Z3addPiS_S_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "Number of loop is: %d\nRunning time is: %f ms\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $80, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax leaq 32(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movl $4000000, %edi call malloc@PLT movq %rax, %rbp movl $1000000, %esi movq %rax, %rdi call _Z11random_intsPii movl $4000000, %edi call malloc@PLT movq %rax, %rbx movl $1000000, %esi movq %rax, %rdi call _Z11random_intsPii movl $4000000, %edi call malloc@PLT movq %rax, %r12 movl $1953, 8(%rsp) movl $1, 12(%rsp) movl $512, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L23 .L20: movq %rbp, %rdi call cudaFree@PLT movq %rbx, %rdi call cudaFree@PLT movq %r12, %rdi call cudaFree@PLT leaq 48(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movq 48(%rsp), %rax subq 32(%rsp), %rax imulq $1000000, %rax, %rax addq 56(%rsp), %rax subq 40(%rsp), %rax pxor %xmm0, %xmm0 cvtsi2ssq %rax, %xmm0 divss .LC0(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 movl $1000000, %edx leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 72(%rsp), %rax subq %fs:40, %rax jne .L24 movl $0, %eax addq $80, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state movq %r12, %rdx movq %rbx, %rsi movq %rbp, %rdi call _Z26__device_stub__Z3addPiS_S_PiS_S_ jmp .L20 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "_Z3addPiS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z3addPiS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1148846080 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
//********************************************************************** // * // University Of North Carolina Charlotte * // * //Program: Vecotr adder * //Description: This program is for testing GPU performance with vector * // add function. * // * // * //File Name: pb1b_gpu.c * //File Version: 1.0 * //Baseline: Homework_0 * // * //Course: ECGR6090- Heterogeneous Computing * // * //Programmed by: Roy Liu * //Under Suppervision of: Dr. Hamed Tabkhi * // * //Input file: No * // * //Output:Time of program running * //********************************************************************** #include<cuda_runtime.h> #include<stdio.h> #include<stdlib.h> #include<sys/time.h> __global__ void add(int a[], int b[], int c[]) { int i = blockDim.x * blockIdx.x + threadIdx.x; c[i] = a[i] + b[i]; } void random_ints(int* r, int n); int main() { int n=1000000; //for counting run time struct timeval start, end; float timer; gettimeofday(&start, NULL); int *d_a, *d_b, *d_c; //int n = 1024 * 1024; int size = n * sizeof(int); // data initializing d_a = (int *)malloc(size); random_ints(d_a, n); d_b = (int *)malloc(size); random_ints(d_b, n); d_c = (int *)malloc(size); dim3 dimGrid(n/512); dim3 dimBlock(512); //each block has X threads // kernel add<<<dimGrid, dimBlock>>>(d_a,d_b,d_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); gettimeofday(&end, NULL); timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; printf("Number of loop is: %d\nRunning time is: %f ms\n", n,timer/1000); return 0; } //********************************************************************** // Function Name: random_ints * // Description: - Generate random integer * // Input : None * // Output : Random integer * // Return: None * //********************************************************************** void random_ints(int* r, int n) { int i; for (i=0; i < n; ++i) { r[i] = rand()/2; } }
//********************************************************************** // * // University Of North Carolina Charlotte * // * //Program: Vecotr adder * //Description: This program is for testing GPU performance with vector * // add function. * // * // * //File Name: pb1b_gpu.c * //File Version: 1.0 * //Baseline: Homework_0 * // * //Course: ECGR6090- Heterogeneous Computing * // * //Programmed by: Roy Liu * //Under Suppervision of: Dr. Hamed Tabkhi * // * //Input file: No * // * //Output:Time of program running * //********************************************************************** #include<hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> #include<sys/time.h> __global__ void add(int a[], int b[], int c[]) { int i = blockDim.x * blockIdx.x + threadIdx.x; c[i] = a[i] + b[i]; } void random_ints(int* r, int n); int main() { int n=1000000; //for counting run time struct timeval start, end; float timer; gettimeofday(&start, NULL); int *d_a, *d_b, *d_c; //int n = 1024 * 1024; int size = n * sizeof(int); // data initializing d_a = (int *)malloc(size); random_ints(d_a, n); d_b = (int *)malloc(size); random_ints(d_b, n); d_c = (int *)malloc(size); dim3 dimGrid(n/512); dim3 dimBlock(512); //each block has X threads // kernel add<<<dimGrid, dimBlock>>>(d_a,d_b,d_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); gettimeofday(&end, NULL); timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; printf("Number of loop is: %d\nRunning time is: %f ms\n", n,timer/1000); return 0; } //********************************************************************** // Function Name: random_ints * // Description: - Generate random integer * // Input : None * // Output : Random integer * // Return: None * //********************************************************************** void random_ints(int* r, int n) { int i; for (i=0; i < n; ++i) { r[i] = rand()/2; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
//********************************************************************** // * // University Of North Carolina Charlotte * // * //Program: Vecotr adder * //Description: This program is for testing GPU performance with vector * // add function. * // * // * //File Name: pb1b_gpu.c * //File Version: 1.0 * //Baseline: Homework_0 * // * //Course: ECGR6090- Heterogeneous Computing * // * //Programmed by: Roy Liu * //Under Suppervision of: Dr. Hamed Tabkhi * // * //Input file: No * // * //Output:Time of program running * //********************************************************************** #include<hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> #include<sys/time.h> __global__ void add(int a[], int b[], int c[]) { int i = blockDim.x * blockIdx.x + threadIdx.x; c[i] = a[i] + b[i]; } void random_ints(int* r, int n); int main() { int n=1000000; //for counting run time struct timeval start, end; float timer; gettimeofday(&start, NULL); int *d_a, *d_b, *d_c; //int n = 1024 * 1024; int size = n * sizeof(int); // data initializing d_a = (int *)malloc(size); random_ints(d_a, n); d_b = (int *)malloc(size); random_ints(d_b, n); d_c = (int *)malloc(size); dim3 dimGrid(n/512); dim3 dimBlock(512); //each block has X threads // kernel add<<<dimGrid, dimBlock>>>(d_a,d_b,d_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); gettimeofday(&end, NULL); timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; printf("Number of loop is: %d\nRunning time is: %f ms\n", n,timer/1000); return 0; } //********************************************************************** // Function Name: random_ints * // Description: - Generate random integer * // Input : None * // Output : Random integer * // Return: None * //********************************************************************** void random_ints(int* r, int n) { int i; for (i=0; i < n; ++i) { r[i] = rand()/2; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z3addPiS_S_ .globl _Z3addPiS_S_ .p2align 8 .type _Z3addPiS_S_,@function _Z3addPiS_S_: s_clause 0x2 s_load_b32 s2, s[0:1], 0x24 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, v3, v2 global_store_b32 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z3addPiS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z3addPiS_S_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z3addPiS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
//********************************************************************** // * // University Of North Carolina Charlotte * // * //Program: Vecotr adder * //Description: This program is for testing GPU performance with vector * // add function. * // * // * //File Name: pb1b_gpu.c * //File Version: 1.0 * //Baseline: Homework_0 * // * //Course: ECGR6090- Heterogeneous Computing * // * //Programmed by: Roy Liu * //Under Suppervision of: Dr. Hamed Tabkhi * // * //Input file: No * // * //Output:Time of program running * //********************************************************************** #include<hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> #include<sys/time.h> __global__ void add(int a[], int b[], int c[]) { int i = blockDim.x * blockIdx.x + threadIdx.x; c[i] = a[i] + b[i]; } void random_ints(int* r, int n); int main() { int n=1000000; //for counting run time struct timeval start, end; float timer; gettimeofday(&start, NULL); int *d_a, *d_b, *d_c; //int n = 1024 * 1024; int size = n * sizeof(int); // data initializing d_a = (int *)malloc(size); random_ints(d_a, n); d_b = (int *)malloc(size); random_ints(d_b, n); d_c = (int *)malloc(size); dim3 dimGrid(n/512); dim3 dimBlock(512); //each block has X threads // kernel add<<<dimGrid, dimBlock>>>(d_a,d_b,d_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); gettimeofday(&end, NULL); timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; printf("Number of loop is: %d\nRunning time is: %f ms\n", n,timer/1000); return 0; } //********************************************************************** // Function Name: random_ints * // Description: - Generate random integer * // Input : None * // Output : Random integer * // Return: None * //********************************************************************** void random_ints(int* r, int n) { int i; for (i=0; i < n; ++i) { r[i] = rand()/2; } }
.text .file "pb1a_gpu.hip" .globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_ .p2align 4, 0x90 .type _Z18__device_stub__addPiS_S_,@function _Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z3addPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_ .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI1_0: .long 0x447a0000 # float 1000 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $112, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 xorl %r14d, %r14d leaq 96(%rsp), %rdi xorl %esi, %esi callq gettimeofday movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %rbx .p2align 4, 0x90 .LBB1_1: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 callq rand movl %eax, %ecx shrl $31, %ecx addl %eax, %ecx sarl %ecx movl %ecx, (%rbx,%r14,4) incq %r14 cmpq $1000000, %r14 # imm = 0xF4240 jne .LBB1_1 # %bb.2: # %_Z11random_intsPii.exit movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_3: # %.lr.ph.i22 # =>This Inner Loop Header: Depth=1 callq rand movl %eax, %ecx shrl $31, %ecx addl %eax, %ecx sarl %ecx movl %ecx, (%r14,%r15,4) incq %r15 cmpq $1000000, %r15 # imm = 0xF4240 jne .LBB1_3 # %bb.4: # %_Z11random_intsPii.exit26 movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %r15 movabsq $4294967808, %rdx # imm = 0x100000200 leaq 1441(%rdx), %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_6 # %bb.5: movq %rbx, 88(%rsp) movq %r14, 80(%rsp) movq %r15, 72(%rsp) leaq 88(%rsp), %rax movq %rax, (%rsp) leaq 80(%rsp), %rax movq %rax, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 16(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d movq %rsp, %r9 movl $_Z3addPiS_S_, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_6: movq %rbx, %rdi callq hipFree movq %r14, %rdi callq hipFree movq %r15, %rdi callq hipFree movq %rsp, %rdi xorl %esi, %esi callq gettimeofday movq (%rsp), %rax subq 96(%rsp), %rax imulq $1000000, %rax, %rax # imm = 0xF4240 addq 8(%rsp), %rax subq 104(%rsp), %rax cvtsi2ss %rax, %xmm0 divss .LCPI1_0(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movl $1000000, %esi # imm = 0xF4240 movb $1, %al callq printf xorl %eax, %eax addq $112, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .globl _Z11random_intsPii # -- Begin function _Z11random_intsPii .p2align 4, 0x90 .type _Z11random_intsPii,@function _Z11random_intsPii: # @_Z11random_intsPii .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB2_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx movl %esi, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand movl %eax, %ecx shrl $31, %ecx addl %eax, %ecx sarl %ecx movl %ecx, (%rbx,%r15,4) incq %r15 cmpq %r15, %r14 jne .LBB2_2 # %bb.3: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .cfi_restore %r15 .LBB2_4: # %._crit_edge retq .Lfunc_end2: .size _Z11random_intsPii, .Lfunc_end2-_Z11random_intsPii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z3addPiS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z3addPiS_S_,@object # @_Z3addPiS_S_ .section .rodata,"a",@progbits .globl _Z3addPiS_S_ .p2align 3, 0x0 _Z3addPiS_S_: .quad _Z18__device_stub__addPiS_S_ .size _Z3addPiS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Number of loop is: %d\nRunning time is: %f ms\n" .size .L.str, 46 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z3addPiS_S_" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z18__device_stub__addPiS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z3addPiS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z3addPiS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e220000002500 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fca00078e0203 */ /*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fc800078e0207 */ /*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x0c0fe400078e0207 */ /*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */ /* 0x000ea2000c1e1900 */ /*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fe200078e0207 */ /*00b0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */ /* 0x004fca0007ffe0ff */ /*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*00d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z3addPiS_S_ .globl _Z3addPiS_S_ .p2align 8 .type _Z3addPiS_S_,@function _Z3addPiS_S_: s_clause 0x2 s_load_b32 s2, s[0:1], 0x24 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s4, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, v3, v2 global_store_b32 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z3addPiS_S_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z3addPiS_S_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z3addPiS_S_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00109a34_00000000-6_pb1a_gpu.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z11random_intsPii .type _Z11random_intsPii, @function _Z11random_intsPii: .LFB2058: .cfi_startproc endbr64 testl %esi, %esi jle .L8 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi,4), %rbp .L5: call rand@PLT movl %eax, %edx shrl $31, %edx leal (%rdx,%rax), %eax sarl %eax movl %eax, (%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L5 addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L8: .cfi_restore 3 .cfi_restore 6 ret .cfi_endproc .LFE2058: .size _Z11random_intsPii, .-_Z11random_intsPii .globl _Z26__device_stub__Z3addPiS_S_PiS_S_ .type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function _Z26__device_stub__Z3addPiS_S_PiS_S_: .LFB2083: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 120(%rsp), %rax subq %fs:40, %rax jne .L16 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z3addPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_ .globl _Z3addPiS_S_ .type _Z3addPiS_S_, @function _Z3addPiS_S_: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z3addPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z3addPiS_S_, .-_Z3addPiS_S_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "Number of loop is: %d\nRunning time is: %f ms\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $80, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax leaq 32(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movl $4000000, %edi call malloc@PLT movq %rax, %rbp movl $1000000, %esi movq %rax, %rdi call _Z11random_intsPii movl $4000000, %edi call malloc@PLT movq %rax, %rbx movl $1000000, %esi movq %rax, %rdi call _Z11random_intsPii movl $4000000, %edi call malloc@PLT movq %rax, %r12 movl $1953, 8(%rsp) movl $1, 12(%rsp) movl $512, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L23 .L20: movq %rbp, %rdi call cudaFree@PLT movq %rbx, %rdi call cudaFree@PLT movq %r12, %rdi call cudaFree@PLT leaq 48(%rsp), %rdi movl $0, %esi call gettimeofday@PLT movq 48(%rsp), %rax subq 32(%rsp), %rax imulq $1000000, %rax, %rax addq 56(%rsp), %rax subq 40(%rsp), %rax pxor %xmm0, %xmm0 cvtsi2ssq %rax, %xmm0 divss .LC0(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 movl $1000000, %edx leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 72(%rsp), %rax subq %fs:40, %rax jne .L24 movl $0, %eax addq $80, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state movq %r12, %rdx movq %rbx, %rsi movq %rbp, %rdi call _Z26__device_stub__Z3addPiS_S_PiS_S_ jmp .L20 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "_Z3addPiS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z3addPiS_S_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1148846080 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "pb1a_gpu.hip" .globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_ .p2align 4, 0x90 .type _Z18__device_stub__addPiS_S_,@function _Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z3addPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_ .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI1_0: .long 0x447a0000 # float 1000 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $112, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 xorl %r14d, %r14d leaq 96(%rsp), %rdi xorl %esi, %esi callq gettimeofday movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %rbx .p2align 4, 0x90 .LBB1_1: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 callq rand movl %eax, %ecx shrl $31, %ecx addl %eax, %ecx sarl %ecx movl %ecx, (%rbx,%r14,4) incq %r14 cmpq $1000000, %r14 # imm = 0xF4240 jne .LBB1_1 # %bb.2: # %_Z11random_intsPii.exit movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_3: # %.lr.ph.i22 # =>This Inner Loop Header: Depth=1 callq rand movl %eax, %ecx shrl $31, %ecx addl %eax, %ecx sarl %ecx movl %ecx, (%r14,%r15,4) incq %r15 cmpq $1000000, %r15 # imm = 0xF4240 jne .LBB1_3 # %bb.4: # %_Z11random_intsPii.exit26 movl $4000000, %edi # imm = 0x3D0900 callq malloc movq %rax, %r15 movabsq $4294967808, %rdx # imm = 0x100000200 leaq 1441(%rdx), %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_6 # %bb.5: movq %rbx, 88(%rsp) movq %r14, 80(%rsp) movq %r15, 72(%rsp) leaq 88(%rsp), %rax movq %rax, (%rsp) leaq 80(%rsp), %rax movq %rax, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 16(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d movq %rsp, %r9 movl $_Z3addPiS_S_, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_6: movq %rbx, %rdi callq hipFree movq %r14, %rdi callq hipFree movq %r15, %rdi callq hipFree movq %rsp, %rdi xorl %esi, %esi callq gettimeofday movq (%rsp), %rax subq 96(%rsp), %rax imulq $1000000, %rax, %rax # imm = 0xF4240 addq 8(%rsp), %rax subq 104(%rsp), %rax cvtsi2ss %rax, %xmm0 divss .LCPI1_0(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movl $1000000, %esi # imm = 0xF4240 movb $1, %al callq printf xorl %eax, %eax addq $112, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .globl _Z11random_intsPii # -- Begin function _Z11random_intsPii .p2align 4, 0x90 .type _Z11random_intsPii,@function _Z11random_intsPii: # @_Z11random_intsPii .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB2_4 # %bb.1: # %.lr.ph.preheader pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rdi, %rbx movl %esi, %r14d xorl %r15d, %r15d .p2align 4, 0x90 .LBB2_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 callq rand movl %eax, %ecx shrl $31, %ecx addl %eax, %ecx sarl %ecx movl %ecx, (%rbx,%r15,4) incq %r15 cmpq %r15, %r14 jne .LBB2_2 # %bb.3: popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 .cfi_restore %rbx .cfi_restore %r14 .cfi_restore %r15 .LBB2_4: # %._crit_edge retq .Lfunc_end2: .size _Z11random_intsPii, .Lfunc_end2-_Z11random_intsPii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z3addPiS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z3addPiS_S_,@object # @_Z3addPiS_S_ .section .rodata,"a",@progbits .globl _Z3addPiS_S_ .p2align 3, 0x0 _Z3addPiS_S_: .quad _Z18__device_stub__addPiS_S_ .size _Z3addPiS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Number of loop is: %d\nRunning time is: %f ms\n" .size .L.str, 46 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z3addPiS_S_" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z18__device_stub__addPiS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z3addPiS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <stdlib.h> #include <math.h> void cpu_matrixmult(double *a,double *b, double **c, int n, int m, int p) { int k,i,j; double r; /*kij*/ for (k=0; k<p; k++){ for (i=0; i<n; i++){ r = a[i*p + k]; for(j=0; j<m; j++){ (*c)[i*m+j] += r * b[k*m+j]; } } } } int main(int argc, char *argv[]) { int i, j; // loop counters //int gpucount = 0; // Count of available GPUs //int Grid_Dim = 1; //Grid dimension, x and y, square //int Block_Dim = 1; //Block dimension, x and y, square int n,m,p; // matrix dimension double *a,*b,*c; int size_a, size_b, size_c; // number of bytes in arrays cudaEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also //cudaError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- if (argc<4) { printf("Usage: Task1GPUsp <n> <m> <p>\n"); exit (-1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); /* printf("A Matrix Dimension = %dx%d\n",n,p); printf("B Matrix Dimension = %dx%d\n",p,m); printf("C Matrix Dimension = %dx%d\n",n,m); */ size_a = n * p * sizeof(double); // number of bytes in total in arrays size_b = p * m * sizeof(double); // number of bytes in total in arrays size_c = n * m * sizeof(double); // number of bytes in total in arrays a = (double*) malloc(size_a); // dynamically allocated memory for arrays on host b = (double*) malloc(size_b); c = (double*) malloc(size_c); // results from CPU srand(12345); //int p = n; //Used here only to illustrate proper initialization for non-square case //printf ("a\n"); for(i=0;i < n;i++){ for(j=0;j < p;j++) { a[i * p + j] = (double) rand() / (double) RAND_MAX; //a[i * p + j] = (double) (i+j); //printf("%.2f ", a[i * p + j]); } //printf("\n"); } // printf("b\n"); for(i=0;i < p;i++){ for(j=0;j < m;j++) { b[i * m + j] = (double) rand() / (double) RAND_MAX; //b[i * m + j] = (double) (i+j); // printf("%.2f ", b[i * m + j]); } //printf("\n"); } // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- cudaEventCreate(&start); // instrument code to measure start time cudaEventCreate(&stop); cudaEventRecord(start, 0); cpu_matrixmult(a,b, &c, n, m, p); //kij implementation) cudaEventRecord(stop, 0); // instrument code to measure end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time //printf("c\n"); for(i=0;i < n;i++){ for(j=0;j < m;j++) { printf("%.2f ", c[i * m + j]); } printf("\n"); } free(a); free(b); free(c); return 0; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #include <math.h> void cpu_matrixmult(double *a,double *b, double **c, int n, int m, int p) { int k,i,j; double r; /*kij*/ for (k=0; k<p; k++){ for (i=0; i<n; i++){ r = a[i*p + k]; for(j=0; j<m; j++){ (*c)[i*m+j] += r * b[k*m+j]; } } } } int main(int argc, char *argv[]) { int i, j; // loop counters //int gpucount = 0; // Count of available GPUs //int Grid_Dim = 1; //Grid dimension, x and y, square //int Block_Dim = 1; //Block dimension, x and y, square int n,m,p; // matrix dimension double *a,*b,*c; int size_a, size_b, size_c; // number of bytes in arrays cudaEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also //cudaError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- if (argc<4) { printf("Usage: Task1GPUsp <n> <m> <p>\n"); exit (-1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); /* printf("A Matrix Dimension = %dx%d\n",n,p); printf("B Matrix Dimension = %dx%d\n",p,m); printf("C Matrix Dimension = %dx%d\n",n,m); */ size_a = n * p * sizeof(double); // number of bytes in total in arrays size_b = p * m * sizeof(double); // number of bytes in total in arrays size_c = n * m * sizeof(double); // number of bytes in total in arrays a = (double*) malloc(size_a); // dynamically allocated memory for arrays on host b = (double*) malloc(size_b); c = (double*) malloc(size_c); // results from CPU srand(12345); //int p = n; //Used here only to illustrate proper initialization for non-square case //printf ("a\n"); for(i=0;i < n;i++){ for(j=0;j < p;j++) { a[i * p + j] = (double) rand() / (double) RAND_MAX; //a[i * p + j] = (double) (i+j); //printf("%.2f ", a[i * p + j]); } //printf("\n"); } // printf("b\n"); for(i=0;i < p;i++){ for(j=0;j < m;j++) { b[i * m + j] = (double) rand() / (double) RAND_MAX; //b[i * m + j] = (double) (i+j); // printf("%.2f ", b[i * m + j]); } //printf("\n"); } // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- cudaEventCreate(&start); // instrument code to measure start time cudaEventCreate(&stop); cudaEventRecord(start, 0); cpu_matrixmult(a,b, &c, n, m, p); //kij implementation) cudaEventRecord(stop, 0); // instrument code to measure end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time //printf("c\n"); for(i=0;i < n;i++){ for(j=0;j < m;j++) { printf("%.2f ", c[i * m + j]); } printf("\n"); } free(a); free(b); free(c); return 0; }
.file "tmpxft_00017926_00000000-6_Task1CPUdp.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z14cpu_matrixmultPdS_PS_iii .type _Z14cpu_matrixmultPdS_PS_iii, @function _Z14cpu_matrixmultPdS_PS_iii: .LFB2057: .cfi_startproc endbr64 testl %r9d, %r9d jle .L14 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 movq %rdi, %rax movq %rsi, %rbp movq %rdx, %rdi movl %ecx, %r12d movl %r8d, %r11d movq %rax, %rsi movslq %r9d, %r13 salq $3, %r13 movl $0, %edx movl $0, %eax movslq %r8d, %r14 movq %rsi, %rcx movl %r9d, %r8d jmp .L5 .L9: movslq %edx, %r15 salq $3, %r15 movq %rcx, %r10 movl $0, %ebx movl $0, %r9d movl %eax, -8(%rsp) movq %rcx, -16(%rsp) movl %edx, -4(%rsp) .L8: movsd (%r10), %xmm1 testl %r11d, %r11d jle .L6 movslq %ebx, %rdx leaq 0(,%rdx,8), %rax leaq (%r14,%rdx), %rsi salq $3, %rsi negq %rdx leaq (%r15,%rdx,8), %rcx addq %rbp, %rcx .L7: movq %rax, %rdx addq (%rdi), %rdx movapd %xmm1, %xmm0 mulsd (%rcx,%rax), %xmm0 addsd (%rdx), %xmm0 movsd %xmm0, (%rdx) addq $8, %rax cmpq %rsi, %rax jne .L7 .L6: addl $1, %r9d addq %r13, %r10 addl %r11d, %ebx cmpl %r9d, %r12d jne .L8 movl -8(%rsp), %eax movq -16(%rsp), %rcx movl -4(%rsp), %edx .L10: addl $1, %eax addq $8, %rcx addl %r11d, %edx cmpl %eax, %r8d je .L3 .L5: testl %r12d, %r12d jg .L9 jmp .L10 .L3: popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L14: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 .cfi_restore 13 .cfi_restore 14 .cfi_restore 15 ret .cfi_endproc .LFE2057: .size _Z14cpu_matrixmultPdS_PS_iii, .-_Z14cpu_matrixmultPdS_PS_iii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "Usage: Task1GPUsp <n> <m> <p>\n" .align 8 .LC2: .string "Time to calculate results on CPU: %f ms.\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC3: .string "%.2f " .LC4: .string "\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $120, %rsp .cfi_def_cfa_offset 176 movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax cmpl $3, %edi jle .L41 movq %rsi, %rbp movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %rbx movl %eax, (%rsp) movq 16(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r13 movl %eax, 4(%rsp) movq 24(%rbp), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movq %rax, %r15 movq %rax, 8(%rsp) movl %eax, %r12d movl %eax, %edi imull %ebx, %edi sall $3, %edi movslq %edi, %rdi call malloc@PLT movq %rax, %rbp movq %rax, 48(%rsp) movl %r15d, %edi imull %r13d, %edi sall $3, %edi movslq %edi, %rdi call malloc@PLT movq %rax, 40(%rsp) movl %ebx, %edi imull %r13d, %edi sall $3, %edi movslq %edi, %rdi call malloc@PLT movq %rax, 16(%rsp) movq %rax, 80(%rsp) movl $12345, %edi call srand@PLT testl %ebx, %ebx jle .L19 movl %r15d, 24(%rsp) movl $0, %r15d movl $0, %r14d movq %r13, 32(%rsp) movq %rbp, %r13 movq %rbx, 56(%rsp) jmp .L20 .L41: leaq .LC0(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $-1, %edi call exit@PLT .L22: movslq %r15d, %rdx leaq 0(%r13,%rdx,8), %rbx movq 8(%rsp), %rax leal -1(%rax), %eax addq %rdx, %rax leaq 8(%r13,%rax,8), %rbp .L21: call rand@PLT pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 divsd .LC1(%rip), %xmm0 movsd %xmm0, (%rbx) addq $8, %rbx cmpq %rbp, %rbx jne .L21 .L23: addl $1, %r14d movl 24(%rsp), %eax addl %eax, %r15d movl (%rsp), %eax cmpl %eax, %r14d je .L38 .L20: testl %r12d, %r12d jg .L22 jmp .L23 .L38: movq 32(%rsp), %r13 movq 56(%rsp), %rbx .L19: cmpl $0, 8(%rsp) jle .L24 movl %r13d, 24(%rsp) movl $0, %r15d movl $0, %r14d movq %r13, 32(%rsp) movl %r12d, %r13d movq 40(%rsp), %r12 movq %rbx, 56(%rsp) jmp .L25 .L27: movslq %r15d, %rdx leaq (%r12,%rdx,8), %rbx movq 32(%rsp), %rax leal -1(%rax), %eax addq %rdx, %rax leaq 8(%r12,%rax,8), %rbp .L26: call rand@PLT pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 divsd .LC1(%rip), %xmm0 movsd %xmm0, (%rbx) addq $8, %rbx cmpq %rbp, %rbx jne .L26 .L28: addl $1, %r14d movl 24(%rsp), %eax addl %eax, %r15d cmpl %r13d, %r14d je .L39 .L25: cmpl $0, 4(%rsp) jg .L27 jmp .L28 .L39: movq 32(%rsp), %r13 movq 56(%rsp), %rbx .L24: leaq 88(%rsp), %rdi call cudaEventCreate@PLT leaq 96(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 88(%rsp), %rdi call cudaEventRecord@PLT leaq 80(%rsp), %rdx movl 8(%rsp), %r9d movl %r13d, %r8d movl (%rsp), %ecx movq 40(%rsp), %rsi movq 48(%rsp), %rdi call _Z14cpu_matrixmultPdS_PS_iii movl $0, %esi movq 96(%rsp), %rdi call cudaEventRecord@PLT movq 96(%rsp), %rdi call cudaEventSynchronize@PLT leaq 76(%rsp), %rdi movq 96(%rsp), %rdx movq 88(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 76(%rsp), %xmm0 leaq .LC2(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT testl %ebx, %ebx jle .L29 movl %r13d, %r15d movl $0, %r14d movl $0, %r12d leal -1(%r13), %eax movl %eax, 8(%rsp) movq 16(%rsp), %rax addq $8, %rax movq %rax, 24(%rsp) leaq .LC3(%rip), %r13 jmp .L30 .L32: movslq %r14d, %rdx movq 16(%rsp), %rax leaq (%rax,%rdx,8), %rbx movl 8(%rsp), %eax addq %rdx, %rax movq 24(%rsp), %rcx leaq (%rcx,%rax,8), %rbp .L31: movsd (%rbx), %xmm0 movq %r13, %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT addq $8, %rbx cmpq %rbp, %rbx jne .L31 .L33: leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %r12d addl %r15d, %r14d movl (%rsp), %eax cmpl %eax, %r12d je .L29 .L30: cmpl $0, 4(%rsp) jg .L32 jmp .L33 .L29: movq 48(%rsp), %rdi call free@PLT movq 40(%rsp), %rdi call free@PLT movq 16(%rsp), %rdi call free@PLT movq 104(%rsp), %rax subq %fs:40, %rax jne .L42 movl $0, %eax addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L42: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long -4194304 .long 1105199103 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #include <math.h> void cpu_matrixmult(double *a,double *b, double **c, int n, int m, int p) { int k,i,j; double r; /*kij*/ for (k=0; k<p; k++){ for (i=0; i<n; i++){ r = a[i*p + k]; for(j=0; j<m; j++){ (*c)[i*m+j] += r * b[k*m+j]; } } } } int main(int argc, char *argv[]) { int i, j; // loop counters //int gpucount = 0; // Count of available GPUs //int Grid_Dim = 1; //Grid dimension, x and y, square //int Block_Dim = 1; //Block dimension, x and y, square int n,m,p; // matrix dimension double *a,*b,*c; int size_a, size_b, size_c; // number of bytes in arrays cudaEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also //cudaError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- if (argc<4) { printf("Usage: Task1GPUsp <n> <m> <p>\n"); exit (-1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); /* printf("A Matrix Dimension = %dx%d\n",n,p); printf("B Matrix Dimension = %dx%d\n",p,m); printf("C Matrix Dimension = %dx%d\n",n,m); */ size_a = n * p * sizeof(double); // number of bytes in total in arrays size_b = p * m * sizeof(double); // number of bytes in total in arrays size_c = n * m * sizeof(double); // number of bytes in total in arrays a = (double*) malloc(size_a); // dynamically allocated memory for arrays on host b = (double*) malloc(size_b); c = (double*) malloc(size_c); // results from CPU srand(12345); //int p = n; //Used here only to illustrate proper initialization for non-square case //printf ("a\n"); for(i=0;i < n;i++){ for(j=0;j < p;j++) { a[i * p + j] = (double) rand() / (double) RAND_MAX; //a[i * p + j] = (double) (i+j); //printf("%.2f ", a[i * p + j]); } //printf("\n"); } // printf("b\n"); for(i=0;i < p;i++){ for(j=0;j < m;j++) { b[i * m + j] = (double) rand() / (double) RAND_MAX; //b[i * m + j] = (double) (i+j); // printf("%.2f ", b[i * m + j]); } //printf("\n"); } // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- cudaEventCreate(&start); // instrument code to measure start time cudaEventCreate(&stop); cudaEventRecord(start, 0); cpu_matrixmult(a,b, &c, n, m, p); //kij implementation) cudaEventRecord(stop, 0); // instrument code to measure end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time //printf("c\n"); for(i=0;i < n;i++){ for(j=0;j < m;j++) { printf("%.2f ", c[i * m + j]); } printf("\n"); } free(a); free(b); free(c); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> void cpu_matrixmult(double *a,double *b, double **c, int n, int m, int p) { int k,i,j; double r; /*kij*/ for (k=0; k<p; k++){ for (i=0; i<n; i++){ r = a[i*p + k]; for(j=0; j<m; j++){ (*c)[i*m+j] += r * b[k*m+j]; } } } } int main(int argc, char *argv[]) { int i, j; // loop counters //int gpucount = 0; // Count of available GPUs //int Grid_Dim = 1; //Grid dimension, x and y, square //int Block_Dim = 1; //Block dimension, x and y, square int n,m,p; // matrix dimension double *a,*b,*c; int size_a, size_b, size_c; // number of bytes in arrays hipEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also //cudaError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- if (argc<4) { printf("Usage: Task1GPUsp <n> <m> <p>\n"); exit (-1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); /* printf("A Matrix Dimension = %dx%d\n",n,p); printf("B Matrix Dimension = %dx%d\n",p,m); printf("C Matrix Dimension = %dx%d\n",n,m); */ size_a = n * p * sizeof(double); // number of bytes in total in arrays size_b = p * m * sizeof(double); // number of bytes in total in arrays size_c = n * m * sizeof(double); // number of bytes in total in arrays a = (double*) malloc(size_a); // dynamically allocated memory for arrays on host b = (double*) malloc(size_b); c = (double*) malloc(size_c); // results from CPU srand(12345); //int p = n; //Used here only to illustrate proper initialization for non-square case //printf ("a\n"); for(i=0;i < n;i++){ for(j=0;j < p;j++) { a[i * p + j] = (double) rand() / (double) RAND_MAX; //a[i * p + j] = (double) (i+j); //printf("%.2f ", a[i * p + j]); } //printf("\n"); } // printf("b\n"); for(i=0;i < p;i++){ for(j=0;j < m;j++) { b[i * m + j] = (double) rand() / (double) RAND_MAX; //b[i * m + j] = (double) (i+j); // printf("%.2f ", b[i * m + j]); } //printf("\n"); } // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- hipEventCreate(&start); // instrument code to measure start time hipEventCreate(&stop); hipEventRecord(start, 0); cpu_matrixmult(a,b, &c, n, m, p); //kij implementation) hipEventRecord(stop, 0); // instrument code to measure end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time //printf("c\n"); for(i=0;i < n;i++){ for(j=0;j < m;j++) { printf("%.2f ", c[i * m + j]); } printf("\n"); } free(a); free(b); free(c); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> void cpu_matrixmult(double *a,double *b, double **c, int n, int m, int p) { int k,i,j; double r; /*kij*/ for (k=0; k<p; k++){ for (i=0; i<n; i++){ r = a[i*p + k]; for(j=0; j<m; j++){ (*c)[i*m+j] += r * b[k*m+j]; } } } } int main(int argc, char *argv[]) { int i, j; // loop counters //int gpucount = 0; // Count of available GPUs //int Grid_Dim = 1; //Grid dimension, x and y, square //int Block_Dim = 1; //Block dimension, x and y, square int n,m,p; // matrix dimension double *a,*b,*c; int size_a, size_b, size_c; // number of bytes in arrays hipEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also //cudaError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- if (argc<4) { printf("Usage: Task1GPUsp <n> <m> <p>\n"); exit (-1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); /* printf("A Matrix Dimension = %dx%d\n",n,p); printf("B Matrix Dimension = %dx%d\n",p,m); printf("C Matrix Dimension = %dx%d\n",n,m); */ size_a = n * p * sizeof(double); // number of bytes in total in arrays size_b = p * m * sizeof(double); // number of bytes in total in arrays size_c = n * m * sizeof(double); // number of bytes in total in arrays a = (double*) malloc(size_a); // dynamically allocated memory for arrays on host b = (double*) malloc(size_b); c = (double*) malloc(size_c); // results from CPU srand(12345); //int p = n; //Used here only to illustrate proper initialization for non-square case //printf ("a\n"); for(i=0;i < n;i++){ for(j=0;j < p;j++) { a[i * p + j] = (double) rand() / (double) RAND_MAX; //a[i * p + j] = (double) (i+j); //printf("%.2f ", a[i * p + j]); } //printf("\n"); } // printf("b\n"); for(i=0;i < p;i++){ for(j=0;j < m;j++) { b[i * m + j] = (double) rand() / (double) RAND_MAX; //b[i * m + j] = (double) (i+j); // printf("%.2f ", b[i * m + j]); } //printf("\n"); } // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- hipEventCreate(&start); // instrument code to measure start time hipEventCreate(&stop); hipEventRecord(start, 0); cpu_matrixmult(a,b, &c, n, m, p); //kij implementation) hipEventRecord(stop, 0); // instrument code to measure end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time //printf("c\n"); for(i=0;i < n;i++){ for(j=0;j < m;j++) { printf("%.2f ", c[i * m + j]); } printf("\n"); } free(a); free(b); free(c); return 0; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> void cpu_matrixmult(double *a,double *b, double **c, int n, int m, int p) { int k,i,j; double r; /*kij*/ for (k=0; k<p; k++){ for (i=0; i<n; i++){ r = a[i*p + k]; for(j=0; j<m; j++){ (*c)[i*m+j] += r * b[k*m+j]; } } } } int main(int argc, char *argv[]) { int i, j; // loop counters //int gpucount = 0; // Count of available GPUs //int Grid_Dim = 1; //Grid dimension, x and y, square //int Block_Dim = 1; //Block dimension, x and y, square int n,m,p; // matrix dimension double *a,*b,*c; int size_a, size_b, size_c; // number of bytes in arrays hipEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also //cudaError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- if (argc<4) { printf("Usage: Task1GPUsp <n> <m> <p>\n"); exit (-1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); /* printf("A Matrix Dimension = %dx%d\n",n,p); printf("B Matrix Dimension = %dx%d\n",p,m); printf("C Matrix Dimension = %dx%d\n",n,m); */ size_a = n * p * sizeof(double); // number of bytes in total in arrays size_b = p * m * sizeof(double); // number of bytes in total in arrays size_c = n * m * sizeof(double); // number of bytes in total in arrays a = (double*) malloc(size_a); // dynamically allocated memory for arrays on host b = (double*) malloc(size_b); c = (double*) malloc(size_c); // results from CPU srand(12345); //int p = n; //Used here only to illustrate proper initialization for non-square case //printf ("a\n"); for(i=0;i < n;i++){ for(j=0;j < p;j++) { a[i * p + j] = (double) rand() / (double) RAND_MAX; //a[i * p + j] = (double) (i+j); //printf("%.2f ", a[i * p + j]); } //printf("\n"); } // printf("b\n"); for(i=0;i < p;i++){ for(j=0;j < m;j++) { b[i * m + j] = (double) rand() / (double) RAND_MAX; //b[i * m + j] = (double) (i+j); // printf("%.2f ", b[i * m + j]); } //printf("\n"); } // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- hipEventCreate(&start); // instrument code to measure start time hipEventCreate(&stop); hipEventRecord(start, 0); cpu_matrixmult(a,b, &c, n, m, p); //kij implementation) hipEventRecord(stop, 0); // instrument code to measure end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time //printf("c\n"); for(i=0;i < n;i++){ for(j=0;j < m;j++) { printf("%.2f ", c[i * m + j]); } printf("\n"); } free(a); free(b); free(c); return 0; }
.text .file "Task1CPUdp.hip" .globl _Z14cpu_matrixmultPdS_PS_iii # -- Begin function _Z14cpu_matrixmultPdS_PS_iii .p2align 4, 0x90 .type _Z14cpu_matrixmultPdS_PS_iii,@function _Z14cpu_matrixmultPdS_PS_iii: # @_Z14cpu_matrixmultPdS_PS_iii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rsi, -8(%rsp) # 8-byte Spill testl %r9d, %r9d jle .LBB0_9 # %bb.1: # %.preheader.lr.ph movl %r9d, %eax movl %ecx, %r9d movl %r8d, %r10d xorl %esi, %esi xorl %ebx, %ebx jmp .LBB0_2 .p2align 4, 0x90 .LBB0_8: # %._crit_edge28 # in Loop: Header=BB0_2 Depth=1 incq %rbx addl %r8d, %esi cmpq %rax, %rbx je .LBB0_9 .LBB0_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB0_4 Depth 2 # Child Loop BB0_6 Depth 3 testl %ecx, %ecx jle .LBB0_8 # %bb.3: # %.lr.ph27 # in Loop: Header=BB0_2 Depth=1 movl %esi, %r11d movq -8(%rsp), %r14 # 8-byte Reload leaq (%r14,%r11,8), %r14 leaq (%rdi,%rbx,8), %r15 xorl %r12d, %r12d xorl %r13d, %r13d jmp .LBB0_4 .p2align 4, 0x90 .LBB0_7: # %._crit_edge # in Loop: Header=BB0_4 Depth=2 incq %r13 addl %r8d, %r12d cmpq %r9, %r13 je .LBB0_8 .LBB0_4: # Parent Loop BB0_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB0_6 Depth 3 testl %r8d, %r8d jle .LBB0_7 # %bb.5: # %.lr.ph # in Loop: Header=BB0_4 Depth=2 movl %r12d, %ebp shlq $3, %rbp movq %r13, %r11 imulq %rax, %r11 movsd (%r15,%r11,8), %xmm0 # xmm0 = mem[0],zero addq (%rdx), %rbp xorl %r11d, %r11d .p2align 4, 0x90 .LBB0_6: # Parent Loop BB0_2 Depth=1 # Parent Loop BB0_4 Depth=2 # => This Inner Loop Header: Depth=3 movsd (%r14,%r11,8), %xmm1 # xmm1 = mem[0],zero mulsd %xmm0, %xmm1 addsd (%rbp,%r11,8), %xmm1 movsd %xmm1, (%rbp,%r11,8) incq %r11 cmpq %r11, %r10 jne .LBB0_6 jmp .LBB0_7 .LBB0_9: # %._crit_edge30 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z14cpu_matrixmultPdS_PS_iii, .Lfunc_end0-_Z14cpu_matrixmultPdS_PS_iii .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI1_0: .quad 0x41dfffffffc00000 # double 2147483647 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $72, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 cmpl $3, %edi jle .LBB1_29 # %bb.1: movq %rsi, %r15 movq 8(%rsi), %rdi xorl %r14d, %r14d xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r12 movq 16(%r15), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %rbx movq 24(%r15), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %r13 leal (,%r12,8), %ebp movl %ebp, %eax imull %r13d, %eax movl %ebx, %r15d imull %r13d, %r15d shll $3, %r15d imull %ebx, %ebp movslq %eax, %rdi callq malloc movq %rax, 32(%rsp) # 8-byte Spill movslq %r15d, %rdi callq malloc movq %rax, 24(%rsp) # 8-byte Spill movslq %ebp, %rdi callq malloc movq %rax, 8(%rsp) # 8-byte Spill movl $12345, %edi # imm = 0x3039 callq srand movq %r13, %rax movq %r12, 56(%rsp) # 8-byte Spill testl %r12d, %r12d movq %r13, (%rsp) # 8-byte Spill jle .LBB1_7 # %bb.2: # %.preheader60.lr.ph movl 56(%rsp), %ecx # 4-byte Reload movq %rcx, 16(%rsp) # 8-byte Spill movl %eax, %r13d xorl %ebp, %ebp jmp .LBB1_3 .p2align 4, 0x90 .LBB1_6: # %._crit_edge # in Loop: Header=BB1_3 Depth=1 incq %rbp movq (%rsp), %rax # 8-byte Reload addl %eax, %r14d cmpq 16(%rsp), %rbp # 8-byte Folded Reload je .LBB1_7 .LBB1_3: # %.preheader60 # =>This Loop Header: Depth=1 # Child Loop BB1_5 Depth 2 testl %eax, %eax jle .LBB1_6 # %bb.4: # %.lr.ph # in Loop: Header=BB1_3 Depth=1 movl %r14d, %eax movq 32(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,8), %r15 xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_5: # Parent Loop BB1_3 Depth=1 # => This Inner Loop Header: Depth=2 callq rand movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero xorps %xmm0, %xmm0 cvtsi2sd %eax, %xmm0 divsd %xmm1, %xmm0 movsd %xmm0, (%r15,%r12,8) incq %r12 cmpq %r12, %r13 jne .LBB1_5 jmp .LBB1_6 .LBB1_7: # %.preheader59 testl %eax, %eax jle .LBB1_13 # %bb.8: # %.preheader58.lr.ph movl (%rsp), %eax # 4-byte Reload movq %rax, 16(%rsp) # 8-byte Spill movl %ebx, %r15d xorl %r13d, %r13d xorl %ebp, %ebp jmp .LBB1_9 .p2align 4, 0x90 .LBB1_12: # %._crit_edge65 # in Loop: Header=BB1_9 Depth=1 incq %rbp addl %ebx, %r13d cmpq 16(%rsp), %rbp # 8-byte Folded Reload je .LBB1_13 .LBB1_9: # %.preheader58 # =>This Loop Header: Depth=1 # Child Loop BB1_11 Depth 2 testl %ebx, %ebx jle .LBB1_12 # %bb.10: # %.lr.ph64 # in Loop: Header=BB1_9 Depth=1 movl %r13d, %eax movq 24(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,8), %r14 xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_11: # Parent Loop BB1_9 Depth=1 # => This Inner Loop Header: Depth=2 callq rand movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero xorps %xmm0, %xmm0 cvtsi2sd %eax, %xmm0 divsd %xmm1, %xmm0 movsd %xmm0, (%r14,%r12,8) incq %r12 cmpq %r12, %r15 jne .LBB1_11 jmp .LBB1_12 .LBB1_13: # %._crit_edge67 leaq 64(%rsp), %rdi callq hipEventCreate leaq 40(%rsp), %rdi callq hipEventCreate movq 64(%rsp), %rdi xorl %r14d, %r14d xorl %esi, %esi callq hipEventRecord movq (%rsp), %rax # 8-byte Reload testl %eax, %eax movq 8(%rsp), %r12 # 8-byte Reload movq 56(%rsp), %r13 # 8-byte Reload jle .LBB1_22 # %bb.14: # %.preheader.lr.ph.i movl %eax, %eax movl %r13d, %ecx movl %ebx, %edx xorl %esi, %esi jmp .LBB1_15 .p2align 4, 0x90 .LBB1_21: # %._crit_edge28.i # in Loop: Header=BB1_15 Depth=1 incq %rsi addl %ebx, %r14d cmpq %rax, %rsi je .LBB1_22 .LBB1_15: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB1_17 Depth 2 # Child Loop BB1_19 Depth 3 testl %r13d, %r13d jle .LBB1_21 # %bb.16: # %.lr.ph27.i # in Loop: Header=BB1_15 Depth=1 movl %r14d, %edi movq 24(%rsp), %r8 # 8-byte Reload leaq (%r8,%rdi,8), %rdi movq 32(%rsp), %r8 # 8-byte Reload leaq (%r8,%rsi,8), %r8 xorl %r9d, %r9d xorl %r10d, %r10d jmp .LBB1_17 .p2align 4, 0x90 .LBB1_20: # %._crit_edge.i # in Loop: Header=BB1_17 Depth=2 incq %r10 addl %ebx, %r9d cmpq %rcx, %r10 je .LBB1_21 .LBB1_17: # Parent Loop BB1_15 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB1_19 Depth 3 testl %ebx, %ebx jle .LBB1_20 # %bb.18: # %.lr.ph.i # in Loop: Header=BB1_17 Depth=2 movq %r10, %r15 imulq %rax, %r15 movl %r9d, %r11d leaq (%r12,%r11,8), %r11 movsd (%r8,%r15,8), %xmm0 # xmm0 = mem[0],zero xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_19: # Parent Loop BB1_15 Depth=1 # Parent Loop BB1_17 Depth=2 # => This Inner Loop Header: Depth=3 movsd (%rdi,%r15,8), %xmm1 # xmm1 = mem[0],zero mulsd %xmm0, %xmm1 addsd (%r11,%r15,8), %xmm1 movsd %xmm1, (%r11,%r15,8) incq %r15 cmpq %r15, %rdx jne .LBB1_19 jmp .LBB1_20 .LBB1_22: # %_Z14cpu_matrixmultPdS_PS_iii.exit movq 40(%rsp), %rdi xorl %r14d, %r14d xorl %esi, %esi callq hipEventRecord movq 40(%rsp), %rdi callq hipEventSynchronize movq 64(%rsp), %rsi movq 40(%rsp), %rdx leaq 52(%rsp), %rdi callq hipEventElapsedTime movss 52(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.1, %edi movb $1, %al callq printf testl %r13d, %r13d jle .LBB1_28 # %bb.23: # %.preheader.lr.ph movl %r13d, %eax movq %rax, (%rsp) # 8-byte Spill movl %ebx, %ebp xorl %r15d, %r15d jmp .LBB1_24 .p2align 4, 0x90 .LBB1_27: # %._crit_edge74 # in Loop: Header=BB1_24 Depth=1 movl $10, %edi callq putchar@PLT incq %r15 addl %ebx, %r14d cmpq (%rsp), %r15 # 8-byte Folded Reload je .LBB1_28 .LBB1_24: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_26 Depth 2 testl %ebx, %ebx jle .LBB1_27 # %bb.25: # %.lr.ph73 # in Loop: Header=BB1_24 Depth=1 movl %r14d, %eax movq 8(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,8), %r13 xorl %r12d, %r12d .p2align 4, 0x90 .LBB1_26: # Parent Loop BB1_24 Depth=1 # => This Inner Loop Header: Depth=2 movsd (%r13,%r12,8), %xmm0 # xmm0 = mem[0],zero movl $.L.str.2, %edi movb $1, %al callq printf incq %r12 cmpq %r12, %rbp jne .LBB1_26 jmp .LBB1_27 .LBB1_28: # %._crit_edge76 movq 32(%rsp), %rdi # 8-byte Reload callq free movq 24(%rsp), %rdi # 8-byte Reload callq free movq 8(%rsp), %rdi # 8-byte Reload callq free xorl %eax, %eax addq $72, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB1_29: .cfi_def_cfa_offset 128 movl $.Lstr, %edi callq puts@PLT movl $-1, %edi callq exit .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "Time to calculate results on CPU: %f ms.\n" .size .L.str.1, 42 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%.2f " .size .L.str.2, 7 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Usage: Task1GPUsp <n> <m> <p>" .size .Lstr, 30 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata