system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z8calcAreaPiPfS0_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ MOV R4, c[0x0][0x160] ; /* 0x0000580000047a02 */
/* 0x000fe20000000f00 */
/*0020*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0030*/ MOV R5, c[0x0][0x164] ; /* 0x0000590000057a02 */
/* 0x000fca0000000f00 */
/*0040*/ LDG.E R8, [R4.64] ; /* 0x0000000604087981 */
/* 0x000ea2000c1e1900 */
/*0050*/ HFMA2.MMA R10, -RZ, RZ, 1.25, 0 ; /* 0x3d000000ff0a7435 */
/* 0x000fe200000001ff */
/*0060*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0070*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */
/* 0x000fe200000001ff */
/*0080*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0090*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fc60008011604 */
/*00a0*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e620000002100 */
/*00b0*/ I2F.U32 R3, R0 ; /* 0x0000000000037306 */
/* 0x001e300000201000 */
/*00c0*/ I2F.U32 R6, R2 ; /* 0x0000000200067306 */
/* 0x002e620000201000 */
/*00d0*/ FADD R3, R3, R3 ; /* 0x0000000303037221 */
/* 0x001fc80000000000 */
/*00e0*/ FFMA R3, R3, R10, -1 ; /* 0xbf80000003037423 */
/* 0x000fe4000000000a */
/*00f0*/ FMUL R6, R6, 0.0625 ; /* 0x3d80000006067820 */
/* 0x002fc80000400000 */
/*0100*/ FFMA R7, R6, 0.00390625, R3 ; /* 0x3b80000006077823 */
/* 0x000fe20000000003 */
/*0110*/ MOV R3, UR4 ; /* 0x0000000400037c02 */
/* 0x000fe40008000f00 */
/*0120*/ ISETP.GE.AND P0, PT, R8, 0x1, PT ; /* 0x000000010800780c */
/* 0x004fda0003f06270 */
/*0130*/ @!P0 BRA 0x790 ; /* 0x0000065000008947 */
/* 0x000fea0003800000 */
/*0140*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff0a7624 */
/* 0x000fe200078e00ff */
/*0150*/ MOV R11, c[0x0][0x16c] ; /* 0x00005b00000b7a02 */
/* 0x000fca0000000f00 */
/*0160*/ LDG.E R4, [R10.64] ; /* 0x000000060a047981 */
/* 0x000ea2000c1e1900 */
/*0170*/ IADD3 R5, R8.reuse, -0x1, RZ ; /* 0xffffffff08057810 */
/* 0x040fe40007ffe0ff */
/*0180*/ MOV R9, RZ ; /* 0x000000ff00097202 */
/* 0x000fe40000000f00 */
/*0190*/ ISETP.GE.U32.AND P0, PT, R5, 0x3, PT ; /* 0x000000030500780c */
/* 0x000fe40003f06070 */
/*01a0*/ LOP3.LUT R5, R8, 0x3, RZ, 0xc0, !PT ; /* 0x0000000308057812 */
/* 0x000fe200078ec0ff */
/*01b0*/ FFMA R7, R4, -0.5, R7 ; /* 0xbf00000004077823 */
/* 0x004fd40000000007 */
/*01c0*/ @!P0 BRA 0x630 ; /* 0x0000046000008947 */
/* 0x000fea0003800000 */
/*01d0*/ IADD3 R6, R8, -R5, RZ ; /* 0x8000000508067210 */
/* 0x000fe40007ffe0ff */
/*01e0*/ MOV R9, RZ ; /* 0x000000ff00097202 */
/* 0x000fe40000000f00 */
/*01f0*/ FADD R17, R4, R7 ; /* 0x0000000704117221 */
/* 0x000fe20000000000 */
/*0200*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */
/* 0x000fe20007ffe0ff */
/*0210*/ BSSY B0, 0x300 ; /* 0x000000e000007945 */
/* 0x000fe40003800000 */
/*0220*/ FFMA R14, -R17, R17, 1 ; /* 0x3f800000110e7423 */
/* 0x000fe20000000111 */
/*0230*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fc60003f05270 */
/*0240*/ MUFU.RSQ R7, R14 ; /* 0x0000000e00077308 */
/* 0x0000620000001400 */
/*0250*/ IADD3 R8, R14, -0xd000000, RZ ; /* 0xf30000000e087810 */
/* 0x000fc80007ffe0ff */
/*0260*/ ISETP.GT.U32.AND P1, PT, R8, 0x727fffff, PT ; /* 0x727fffff0800780c */
/* 0x000fda0003f24070 */
/*0270*/ @!P1 BRA 0x2b0 ; /* 0x0000003000009947 */
/* 0x000fea0003800000 */
/*0280*/ MOV R15, 0x2a0 ; /* 0x000002a0000f7802 */
/* 0x003fe40000000f00 */
/*0290*/ CALL.REL.NOINC 0x900 ; /* 0x0000066000007944 */
/* 0x000fea0003c00000 */
/*02a0*/ BRA 0x2f0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*02b0*/ FMUL.FTZ R8, R14, R7 ; /* 0x000000070e087220 */
/* 0x003fe40000410000 */
/*02c0*/ FMUL.FTZ R10, R7, 0.5 ; /* 0x3f000000070a7820 */
/* 0x000fe40000410000 */
/*02d0*/ FFMA R7, -R8, R8, R14 ; /* 0x0000000808077223 */
/* 0x000fc8000000010e */
/*02e0*/ FFMA R8, R7, R10, R8 ; /* 0x0000000a07087223 */
/* 0x000fe40000000008 */
/*02f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0300*/ FADD R7, R4.reuse, R17 ; /* 0x0000001104077221 */
/* 0x040fe20000000000 */
/*0310*/ BSSY B0, 0x400 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*0320*/ FFMA R9, R4, R8, R9 ; /* 0x0000000804097223 */
/* 0x000fe40000000009 */
/*0330*/ FFMA R14, -R7, R7, 1 ; /* 0x3f800000070e7423 */
/* 0x000fc80000000107 */
/*0340*/ MUFU.RSQ R11, R14 ; /* 0x0000000e000b7308 */
/* 0x0000620000001400 */
/*0350*/ IADD3 R10, R14, -0xd000000, RZ ; /* 0xf30000000e0a7810 */
/* 0x000fc80007ffe0ff */
/*0360*/ ISETP.GT.U32.AND P1, PT, R10, 0x727fffff, PT ; /* 0x727fffff0a00780c */
/* 0x000fda0003f24070 */
/*0370*/ @!P1 BRA 0x3b0 ; /* 0x0000003000009947 */
/* 0x000fea0003800000 */
/*0380*/ MOV R15, 0x3a0 ; /* 0x000003a0000f7802 */
/* 0x003fe40000000f00 */
/*0390*/ CALL.REL.NOINC 0x900 ; /* 0x0000056000007944 */
/* 0x000fea0003c00000 */
/*03a0*/ BRA 0x3f0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*03b0*/ FMUL.FTZ R8, R14, R11 ; /* 0x0000000b0e087220 */
/* 0x003fe40000410000 */
/*03c0*/ FMUL.FTZ R10, R11, 0.5 ; /* 0x3f0000000b0a7820 */
/* 0x000fe40000410000 */
/*03d0*/ FFMA R11, -R8, R8, R14 ; /* 0x00000008080b7223 */
/* 0x000fc8000000010e */
/*03e0*/ FFMA R8, R11, R10, R8 ; /* 0x0000000a0b087223 */
/* 0x000fe40000000008 */
/*03f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0400*/ FADD R7, R4.reuse, R7 ; /* 0x0000000704077221 */
/* 0x040fe20000000000 */
/*0410*/ BSSY B0, 0x500 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*0420*/ FFMA R9, R4, R8, R9 ; /* 0x0000000804097223 */
/* 0x000fe40000000009 */
/*0430*/ FFMA R14, -R7, R7, 1 ; /* 0x3f800000070e7423 */
/* 0x000fc80000000107 */
/*0440*/ MUFU.RSQ R11, R14 ; /* 0x0000000e000b7308 */
/* 0x0000620000001400 */
/*0450*/ IADD3 R10, R14, -0xd000000, RZ ; /* 0xf30000000e0a7810 */
/* 0x000fc80007ffe0ff */
/*0460*/ ISETP.GT.U32.AND P1, PT, R10, 0x727fffff, PT ; /* 0x727fffff0a00780c */
/* 0x000fda0003f24070 */
/*0470*/ @!P1 BRA 0x4b0 ; /* 0x0000003000009947 */
/* 0x000fea0003800000 */
/*0480*/ MOV R15, 0x4a0 ; /* 0x000004a0000f7802 */
/* 0x003fe40000000f00 */
/*0490*/ CALL.REL.NOINC 0x900 ; /* 0x0000046000007944 */
/* 0x000fea0003c00000 */
/*04a0*/ BRA 0x4f0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*04b0*/ FMUL.FTZ R8, R14, R11 ; /* 0x0000000b0e087220 */
/* 0x003fe40000410000 */
/*04c0*/ FMUL.FTZ R10, R11, 0.5 ; /* 0x3f0000000b0a7820 */
/* 0x000fe40000410000 */
/*04d0*/ FFMA R11, -R8, R8, R14 ; /* 0x00000008080b7223 */
/* 0x000fc8000000010e */
/*04e0*/ FFMA R8, R11, R10, R8 ; /* 0x0000000a0b087223 */
/* 0x000fe40000000008 */
/*04f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0500*/ FADD R7, R4.reuse, R7 ; /* 0x0000000704077221 */
/* 0x040fe20000000000 */
/*0510*/ BSSY B0, 0x610 ; /* 0x000000f000007945 */
/* 0x000fe20003800000 */
/*0520*/ FFMA R16, R4, R8, R9 ; /* 0x0000000804107223 */
/* 0x000fe40000000009 */
/*0530*/ FFMA R14, -R7, R7, 1 ; /* 0x3f800000070e7423 */
/* 0x000fc80000000107 */
/*0540*/ MUFU.RSQ R11, R14 ; /* 0x0000000e000b7308 */
/* 0x0000620000001400 */
/*0550*/ IADD3 R10, R14, -0xd000000, RZ ; /* 0xf30000000e0a7810 */
/* 0x000fc80007ffe0ff */
/*0560*/ ISETP.GT.U32.AND P1, PT, R10, 0x727fffff, PT ; /* 0x727fffff0a00780c */
/* 0x000fda0003f24070 */
/*0570*/ @!P1 BRA 0x5c0 ; /* 0x0000004000009947 */
/* 0x000fea0003800000 */
/*0580*/ MOV R15, 0x5a0 ; /* 0x000005a0000f7802 */
/* 0x003fe40000000f00 */
/*0590*/ CALL.REL.NOINC 0x900 ; /* 0x0000036000007944 */
/* 0x000fea0003c00000 */
/*05a0*/ IMAD.MOV.U32 R9, RZ, RZ, R8 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0008 */
/*05b0*/ BRA 0x600 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*05c0*/ FMUL.FTZ R9, R14, R11 ; /* 0x0000000b0e097220 */
/* 0x003fe40000410000 */
/*05d0*/ FMUL.FTZ R10, R11, 0.5 ; /* 0x3f0000000b0a7820 */
/* 0x000fe40000410000 */
/*05e0*/ FFMA R8, -R9, R9, R14 ; /* 0x0000000909087223 */
/* 0x000fc8000000010e */
/*05f0*/ FFMA R9, R8, R10, R9 ; /* 0x0000000a08097223 */
/* 0x000fe40000000009 */
/*0600*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0610*/ FFMA R9, R4, R9, R16 ; /* 0x0000000904097223 */
/* 0x000fe20000000010 */
/*0620*/ @P0 BRA 0x1f0 ; /* 0xfffffbc000000947 */
/* 0x000fea000383ffff */
/*0630*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0640*/ @!P0 BRA 0x790 ; /* 0x0000014000008947 */
/* 0x000fea0003800000 */
/*0650*/ FADD R7, R4, R7 ; /* 0x0000000704077221 */
/* 0x000fe20000000000 */
/*0660*/ BSSY B0, 0x750 ; /* 0x000000e000007945 */
/* 0x000fe60003800000 */
/*0670*/ FFMA R14, -R7, R7, 1 ; /* 0x3f800000070e7423 */
/* 0x000fc80000000107 */
/*0680*/ MUFU.RSQ R11, R14 ; /* 0x0000000e000b7308 */
/* 0x0000620000001400 */
/*0690*/ IADD3 R6, R14, -0xd000000, RZ ; /* 0xf30000000e067810 */
/* 0x000fc80007ffe0ff */
/*06a0*/ ISETP.GT.U32.AND P0, PT, R6, 0x727fffff, PT ; /* 0x727fffff0600780c */
/* 0x000fda0003f04070 */
/*06b0*/ @!P0 BRA 0x700 ; /* 0x0000004000008947 */
/* 0x000fea0003800000 */
/*06c0*/ MOV R15, 0x6e0 ; /* 0x000006e0000f7802 */
/* 0x003fe40000000f00 */
/*06d0*/ CALL.REL.NOINC 0x900 ; /* 0x0000022000007944 */
/* 0x000fea0003c00000 */
/*06e0*/ MOV R6, R8 ; /* 0x0000000800067202 */
/* 0x000fe20000000f00 */
/*06f0*/ BRA 0x740 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0700*/ FMUL.FTZ R6, R14, R11 ; /* 0x0000000b0e067220 */
/* 0x003fe40000410000 */
/*0710*/ FMUL.FTZ R8, R11, 0.5 ; /* 0x3f0000000b087820 */
/* 0x000fe40000410000 */
/*0720*/ FFMA R11, -R6, R6, R14 ; /* 0x00000006060b7223 */
/* 0x000fc8000000010e */
/*0730*/ FFMA R6, R11, R8, R6 ; /* 0x000000080b067223 */
/* 0x000fe40000000006 */
/*0740*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0750*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */
/* 0x000fe20007ffe0ff */
/*0760*/ FFMA R9, R4, R6, R9 ; /* 0x0000000604097223 */
/* 0x000fc60000000009 */
/*0770*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0780*/ @P0 BRA 0x650 ; /* 0xfffffec000000947 */
/* 0x000fea000383ffff */
/*0790*/ STS [R2.X4], R9 ; /* 0x0000000902007388 */
/* 0x0001e20000004800 */
/*07a0*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fe40003f25270 */
/*07b0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fd60003f05270 */
/*07c0*/ @!P1 BRA 0x8a0 ; /* 0x000000d000009947 */
/* 0x000fea0003800000 */
/*07d0*/ SHF.L.U32 R4, R2, 0x2, RZ ; /* 0x0000000202047819 */
/* 0x001fe400000006ff */
/*07e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*07f0*/ ISETP.GE.U32.AND P1, PT, R2, R3, PT ; /* 0x000000030200720c */
/* 0x000fda0003f26070 */
/*0800*/ @!P1 LEA R5, R3.reuse, R4, 0x2 ; /* 0x0000000403059211 */
/* 0x040fe200078e10ff */
/*0810*/ @!P1 LDS R6, [R2.X4] ; /* 0x0000000002069984 */
/* 0x000fea0000004800 */
/*0820*/ @!P1 LDS R5, [R5] ; /* 0x0000000005059984 */
/* 0x000e240000000800 */
/*0830*/ @!P1 FADD R7, R6, R5 ; /* 0x0000000506079221 */
/* 0x001fe20000000000 */
/*0840*/ IADD3 R6, R3.reuse, 0x1, RZ ; /* 0x0000000103067810 */
/* 0x040fe40007ffe0ff */
/*0850*/ LEA.HI R3, R3, R3, RZ, 0x1 ; /* 0x0000000303037211 */
/* 0x000fc400078f08ff */
/*0860*/ @!P1 STS [R2.X4], R7 ; /* 0x0000000702009388 */
/* 0x0001e20000004800 */
/*0870*/ ISETP.GT.U32.AND P1, PT, R6, 0x2, PT ; /* 0x000000020600780c */
/* 0x000fe40003f24070 */
/*0880*/ SHF.R.S32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fd60000011403 */
/*0890*/ @P1 BRA 0x7e0 ; /* 0xffffff4000001947 */
/* 0x001fea000383ffff */
/*08a0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*08b0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*08c0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fd400000001ff */
/*08d0*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fca00078e0003 */
/*08e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*08f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0900*/ LOP3.LUT P1, RZ, R14, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff0eff7812 */
/* 0x000fda000782c0ff */
/*0910*/ @!P1 IMAD.MOV.U32 R8, RZ, RZ, R14 ; /* 0x000000ffff089224 */
/* 0x000fe200078e000e */
/*0920*/ @!P1 BRA 0xa30 ; /* 0x0000010000009947 */
/* 0x000fea0003800000 */
/*0930*/ FSETP.GEU.FTZ.AND P1, PT, R14, RZ, PT ; /* 0x000000ff0e00720b */
/* 0x000fda0003f3e000 */
/*0940*/ @!P1 MOV R8, 0x7fffffff ; /* 0x7fffffff00089802 */
/* 0x000fe20000000f00 */
/*0950*/ @!P1 BRA 0xa30 ; /* 0x000000d000009947 */
/* 0x000fea0003800000 */
/*0960*/ FSETP.GTU.FTZ.AND P1, PT, |R14|, +INF , PT ; /* 0x7f8000000e00780b */
/* 0x000fda0003f3c200 */
/*0970*/ @P1 FADD.FTZ R8, R14, 1 ; /* 0x3f8000000e081421 */
/* 0x000fe20000010000 */
/*0980*/ @P1 BRA 0xa30 ; /* 0x000000a000001947 */
/* 0x000fea0003800000 */
/*0990*/ FSETP.NEU.FTZ.AND P1, PT, |R14|, +INF , PT ; /* 0x7f8000000e00780b */
/* 0x000fda0003f3d200 */
/*09a0*/ @P1 FFMA R10, R14, 1.84467440737095516160e+19, RZ ; /* 0x5f8000000e0a1823 */
/* 0x000fc800000000ff */
/*09b0*/ @P1 MUFU.RSQ R11, R10 ; /* 0x0000000a000b1308 */
/* 0x000e240000001400 */
/*09c0*/ @P1 FMUL.FTZ R13, R10, R11 ; /* 0x0000000b0a0d1220 */
/* 0x001fe40000410000 */
/*09d0*/ @P1 FMUL.FTZ R11, R11, 0.5 ; /* 0x3f0000000b0b1820 */
/* 0x000fe40000410000 */
/*09e0*/ @P1 FADD.FTZ R8, -R13, -RZ ; /* 0x800000ff0d081221 */
/* 0x000fc80000010100 */
/*09f0*/ @P1 FFMA R12, R13, R8, R10 ; /* 0x000000080d0c1223 */
/* 0x000fe2000000000a */
/*0a00*/ @!P1 MOV R8, R14 ; /* 0x0000000e00089202 */
/* 0x000fc60000000f00 */
/*0a10*/ @P1 FFMA R11, R12, R11, R13 ; /* 0x0000000b0c0b1223 */
/* 0x000fc8000000000d */
/*0a20*/ @P1 FMUL.FTZ R8, R11, 2.3283064365386962891e-10 ; /* 0x2f8000000b081820 */
/* 0x000fe40000410000 */
/*0a30*/ HFMA2.MMA R11, -RZ, RZ, 0, 0 ; /* 0x00000000ff0b7435 */
/* 0x000fe200000001ff */
/*0a40*/ MOV R10, R15 ; /* 0x0000000f000a7202 */
/* 0x000fca0000000f00 */
/*0a50*/ RET.REL.NODEC R10 0x0 ; /* 0xfffff5a00a007950 */
/* 0x000fea0003c3ffff */
/*0a60*/ BRA 0xa60; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0a70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0aa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ab0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8calcAreaPiPfS0_
.globl _Z8calcAreaPiPfS0_
.p2align 8
.type _Z8calcAreaPiPfS0_,@function
_Z8calcAreaPiPfS0_:
s_load_b64 s[2:3], s[0:1], 0x0
s_mov_b32 s4, s15
s_waitcnt lgkmcnt(0)
s_load_b32 s3, s[2:3], 0x0
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_3
s_load_b64 s[6:7], s[0:1], 0x8
v_cvt_f32_u32_e32 v1, s4
v_cvt_f32_u32_e32 v2, v0
v_mov_b32_e32 v3, -1.0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_add_f32 v1, v1, v1 :: v_dual_mul_f32 v2, 0x3d800000, v2
v_fmamk_f32 v1, v1, 0x3d000000, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_dual_fmamk_f32 v2, v2, 0x3b800000, v1 :: v_dual_mov_b32 v1, 0
s_waitcnt lgkmcnt(0)
s_load_b32 s5, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
v_fmac_f32_e64 v2, s5, -0.5
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_add_f32_e32 v2, s5, v2
s_add_i32 s3, s3, -1
s_cmp_eq_u32 s3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v3, -v2, v2, 1.0
v_mul_f32_e32 v4, 0x4f800000, v3
v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v3, v3, v4, vcc_lo
v_sqrt_f32_e32 v4, v3
s_waitcnt_depctr 0xfff
v_add_nc_u32_e32 v5, -1, v4
v_add_nc_u32_e32 v6, 1, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v7, -v5, v4, v3
v_fma_f32 v8, -v6, v4, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ge_f32_e64 s2, 0, v7
v_cndmask_b32_e64 v4, v4, v5, s2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_lt_f32_e64 s2, 0, v8
v_cndmask_b32_e64 v4, v4, v6, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v5, 0x37800000, v4
v_cndmask_b32_e32 v4, v4, v5, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v3, 0x260
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v3, v4, v3, vcc_lo
v_fmac_f32_e32 v1, s5, v3
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v1, 0
.LBB0_4:
s_set_inst_prefetch_distance 0x2
s_load_b32 s2, s[0:1], 0x24
v_lshlrev_b32_e32 v2, 2, v0
ds_store_b32 v2, v1
s_waitcnt lgkmcnt(0)
v_cmp_lt_u16_e64 s3, s2, 2
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s3
s_cbranch_vccnz .LBB0_9
s_and_b32 s2, 0xffff, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_lshr_b32 s2, s2, 1
s_branch .LBB0_7
.p2align 6
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s3
s_lshr_b32 s3, s2, 1
s_cmp_gt_u32 s2, 1
s_mov_b32 s2, s3
s_cbranch_scc0 .LBB0_9
.LBB0_7:
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB0_6
v_add_lshl_u32 v1, s2, v0, 2
ds_load_b32 v1, v1
ds_load_b32 v3, v2
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v1, v1, v3
ds_store_b32 v2, v1
s_branch .LBB0_6
.LBB0_9:
s_mov_b32 s5, 0
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_11
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x10
s_lshl_b64 s[2:3], s[4:5], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB0_11:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8calcAreaPiPfS0_
.amdhsa_group_segment_fixed_size 1024
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8calcAreaPiPfS0_, .Lfunc_end0-_Z8calcAreaPiPfS0_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 1024
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8calcAreaPiPfS0_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8calcAreaPiPfS0_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00162233_00000000-6_pi_area_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z8calcAreaPiPfS0_PiPfS0_
.type _Z32__device_stub__Z8calcAreaPiPfS0_PiPfS0_, @function
_Z32__device_stub__Z8calcAreaPiPfS0_PiPfS0_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8calcAreaPiPfS0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z32__device_stub__Z8calcAreaPiPfS0_PiPfS0_, .-_Z32__device_stub__Z8calcAreaPiPfS0_PiPfS0_
.globl _Z8calcAreaPiPfS0_
.type _Z8calcAreaPiPfS0_, @function
_Z8calcAreaPiPfS0_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z8calcAreaPiPfS0_PiPfS0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z8calcAreaPiPfS0_, .-_Z8calcAreaPiPfS0_
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "pi"
.LC3:
.string "\n==\n==\t%20s = %15.10f\n"
.LC4:
.string "total rectangles"
.LC5:
.string "==\t%20s = %15d\n"
.LC6:
.string "CUDA threads"
.LC7:
.string "==\t%20s = %15d\n==\n\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $216, %rsp
.cfi_def_cfa_offset 240
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
movl $10000000, %eax
cmpl $2, %edi
je .L19
.L12:
leal 8191(%rax), %ebp
testl %eax, %eax
cmovns %eax, %ebp
sarl $13, %ebp
movl %ebp, 8(%rsp)
sall $13, %ebp
pxor %xmm1, %xmm1
cvtsi2ssl %ebp, %xmm1
movss .LC1(%rip), %xmm0
divss %xmm1, %xmm0
movss %xmm0, 12(%rsp)
leaq 16(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $128, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 12(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $32, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 52(%rsp), %rdx
movl $1, %ecx
movq 40(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L20
.L13:
leaq 64(%rsp), %rbx
movl $2, %ecx
movl $128, %edx
movq 32(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq %rbx, %rax
leaq 192(%rsp), %rdx
pxor %xmm0, %xmm0
.L14:
addss (%rax), %xmm0
addq $4, %rax
cmpq %rdx, %rax
jne .L14
addss %xmm0, %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC2(%rip), %rdx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl %ebp, %ecx
leaq .LC4(%rip), %rdx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $8192, %ecx
leaq .LC6(%rip), %rdx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L21
movl $0, %eax
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
jmp .L12
.L20:
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z32__device_stub__Z8calcAreaPiPfS0_PiPfS0_
jmp .L13
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z8calcAreaPiPfS0_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z8calcAreaPiPfS0_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1073741824
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "pi_area_cuda.hip"
.globl _Z23__device_stub__calcAreaPiPfS0_ # -- Begin function _Z23__device_stub__calcAreaPiPfS0_
.p2align 4, 0x90
.type _Z23__device_stub__calcAreaPiPfS0_,@function
_Z23__device_stub__calcAreaPiPfS0_: # @_Z23__device_stub__calcAreaPiPfS0_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8calcAreaPiPfS0_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z23__device_stub__calcAreaPiPfS0_, .Lfunc_end0-_Z23__device_stub__calcAreaPiPfS0_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x40000000 # float 2
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $240, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -16
movl $1220, %ebx # imm = 0x4C4
cmpl $2, %edi
jne .LBB1_2
# %bb.1:
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
leal 8191(%rax), %ebx
testl %eax, %eax
cmovnsl %eax, %ebx
sarl $13, %ebx
.LBB1_2:
movl %ebx, 12(%rsp)
shll $13, %ebx
cvtsi2ss %ebx, %xmm0
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
divss %xmm0, %xmm1
movss %xmm1, 8(%rsp)
leaq 32(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $128, %esi
callq hipMalloc
movq 32(%rsp), %rdi
leaq 12(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
leaq 8(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967328, %rdi # imm = 0x100000020
leaq 224(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z8calcAreaPiPfS0_, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq 16(%rsp), %rsi
leaq 112(%rsp), %rdi
movl $128, %edx
movl $2, %ecx
callq hipMemcpy
xorps %xmm0, %xmm0
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
addss 112(%rsp,%rax,4), %xmm0
incq %rax
cmpq $32, %rax
jne .LBB1_5
# %bb.6:
addss %xmm0, %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movl $.L.str.1, %esi
movb $1, %al
callq printf
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl %ebx, %edx
xorl %eax, %eax
callq printf
movl $.L.str.4, %edi
movl $.L.str.5, %esi
movl $8192, %edx # imm = 0x2000
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $240, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8calcAreaPiPfS0_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8calcAreaPiPfS0_,@object # @_Z8calcAreaPiPfS0_
.section .rodata,"a",@progbits
.globl _Z8calcAreaPiPfS0_
.p2align 3, 0x0
_Z8calcAreaPiPfS0_:
.quad _Z23__device_stub__calcAreaPiPfS0_
.size _Z8calcAreaPiPfS0_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\n==\n==\t%20s = %15.10f\n"
.size .L.str, 23
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "pi"
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "==\t%20s = %15d\n"
.size .L.str.2, 16
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "total rectangles"
.size .L.str.3, 17
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "==\t%20s = %15d\n==\n\n"
.size .L.str.4, 20
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "CUDA threads"
.size .L.str.5, 13
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8calcAreaPiPfS0_"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__calcAreaPiPfS0_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8calcAreaPiPfS0_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | struct ProgramGPUColorRGB
{
__device__ ProgramGPUColorRGB()
{
}
unsigned char Blue;
unsigned char Green;
unsigned char Red;
unsigned char Alpha;
};
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0);
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0)
{
__syncthreads();
indices[(threadIdx.x)]++;
} | code for sm_80
Function : FindPixel
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fee0000010000 */
/*0050*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x180] ; /* 0x0000600002027625 */
/* 0x001fca00078e0003 */
/*0060*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0070*/ IADD3 R5, R0, 0x1, RZ ; /* 0x0000000100057810 */
/* 0x004fca0007ffe0ff */
/*0080*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0090*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00a0*/ BRA 0xa0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | struct ProgramGPUColorRGB
{
__device__ ProgramGPUColorRGB()
{
}
unsigned char Blue;
unsigned char Green;
unsigned char Red;
unsigned char Alpha;
};
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0);
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0)
{
__syncthreads();
indices[(threadIdx.x)]++;
} | .file "tmpxft_000a236f_00000000-6_CUDAFYSOURCETEMP.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2032:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2032:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z58__device_stub__Z9FindPixelP18ProgramGPUColorRGBiS0_iPiiPfiP18ProgramGPUColorRGBiS0_iPiiPfi
.type _Z58__device_stub__Z9FindPixelP18ProgramGPUColorRGBiS0_iPiiPfiP18ProgramGPUColorRGBiS0_iPiiPfi, @function
_Z58__device_stub__Z9FindPixelP18ProgramGPUColorRGBiS0_iPiiPfiP18ProgramGPUColorRGBiS0_iPiiPfi:
.LFB2054:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 32(%rsp)
movq %r8, 16(%rsp)
movl %r9d, 12(%rsp)
movq 208(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movq %rsp, %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq FindPixel(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2054:
.size _Z58__device_stub__Z9FindPixelP18ProgramGPUColorRGBiS0_iPiiPfiP18ProgramGPUColorRGBiS0_iPiiPfi, .-_Z58__device_stub__Z9FindPixelP18ProgramGPUColorRGBiS0_iPiiPfiP18ProgramGPUColorRGBiS0_iPiiPfi
.globl FindPixel
.type FindPixel, @function
FindPixel:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z58__device_stub__Z9FindPixelP18ProgramGPUColorRGBiS0_iPiiPfiP18ProgramGPUColorRGBiS0_iPiiPfi
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size FindPixel, .-FindPixel
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "FindPixel"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq FindPixel(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | struct ProgramGPUColorRGB
{
__device__ ProgramGPUColorRGB()
{
}
unsigned char Blue;
unsigned char Green;
unsigned char Red;
unsigned char Alpha;
};
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0);
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0)
{
__syncthreads();
indices[(threadIdx.x)]++;
} | #include <hip/hip_runtime.h>
struct ProgramGPUColorRGB
{
__device__ ProgramGPUColorRGB()
{
}
unsigned char Blue;
unsigned char Green;
unsigned char Red;
unsigned char Alpha;
};
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0);
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0)
{
__syncthreads();
indices[(threadIdx.x)]++;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
struct ProgramGPUColorRGB
{
__device__ ProgramGPUColorRGB()
{
}
unsigned char Blue;
unsigned char Green;
unsigned char Red;
unsigned char Alpha;
};
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0);
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0)
{
__syncthreads();
indices[(threadIdx.x)]++;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected FindPixel
.globl FindPixel
.p2align 8
.type FindPixel,@function
FindPixel:
s_load_b64 s[0:1], s[0:1], 0x20
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
global_load_b32 v1, v0, s[0:1]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, 1, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel FindPixel
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 60
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 2
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size FindPixel, .Lfunc_end0-FindPixel
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .offset: 56
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 60
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: FindPixel
.private_segment_fixed_size: 0
.sgpr_count: 2
.sgpr_spill_count: 0
.symbol: FindPixel.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
struct ProgramGPUColorRGB
{
__device__ ProgramGPUColorRGB()
{
}
unsigned char Blue;
unsigned char Green;
unsigned char Red;
unsigned char Alpha;
};
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0);
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0)
{
__syncthreads();
indices[(threadIdx.x)]++;
} | .text
.file "CUDAFYSOURCETEMP.hip"
.globl __device_stub__FindPixel # -- Begin function __device_stub__FindPixel
.p2align 4, 0x90
.type __device_stub__FindPixel,@function
__device_stub__FindPixel: # @__device_stub__FindPixel
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movl %esi, 20(%rsp)
movq %rdx, 80(%rsp)
movl %ecx, 16(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $FindPixel, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size __device_stub__FindPixel, .Lfunc_end0-__device_stub__FindPixel
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $FindPixel, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type FindPixel,@object # @FindPixel
.section .rodata,"a",@progbits
.globl FindPixel
.p2align 3, 0x0
FindPixel:
.quad __device_stub__FindPixel
.size FindPixel, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "FindPixel"
.size .L__unnamed_1, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__FindPixel
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym FindPixel
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : FindPixel
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fee0000010000 */
/*0050*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x180] ; /* 0x0000600002027625 */
/* 0x001fca00078e0003 */
/*0060*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0070*/ IADD3 R5, R0, 0x1, RZ ; /* 0x0000000100057810 */
/* 0x004fca0007ffe0ff */
/*0080*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0090*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00a0*/ BRA 0xa0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected FindPixel
.globl FindPixel
.p2align 8
.type FindPixel,@function
FindPixel:
s_load_b64 s[0:1], s[0:1], 0x20
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
global_load_b32 v1, v0, s[0:1]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, 1, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel FindPixel
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 60
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 2
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size FindPixel, .Lfunc_end0-FindPixel
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .offset: 56
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 60
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: FindPixel
.private_segment_fixed_size: 0
.sgpr_count: 2
.sgpr_spill_count: 0
.symbol: FindPixel.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000a236f_00000000-6_CUDAFYSOURCETEMP.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2032:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2032:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z58__device_stub__Z9FindPixelP18ProgramGPUColorRGBiS0_iPiiPfiP18ProgramGPUColorRGBiS0_iPiiPfi
.type _Z58__device_stub__Z9FindPixelP18ProgramGPUColorRGBiS0_iPiiPfiP18ProgramGPUColorRGBiS0_iPiiPfi, @function
_Z58__device_stub__Z9FindPixelP18ProgramGPUColorRGBiS0_iPiiPfiP18ProgramGPUColorRGBiS0_iPiiPfi:
.LFB2054:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 32(%rsp)
movq %r8, 16(%rsp)
movl %r9d, 12(%rsp)
movq 208(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movq %rsp, %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq FindPixel(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2054:
.size _Z58__device_stub__Z9FindPixelP18ProgramGPUColorRGBiS0_iPiiPfiP18ProgramGPUColorRGBiS0_iPiiPfi, .-_Z58__device_stub__Z9FindPixelP18ProgramGPUColorRGBiS0_iPiiPfiP18ProgramGPUColorRGBiS0_iPiiPfi
.globl FindPixel
.type FindPixel, @function
FindPixel:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z58__device_stub__Z9FindPixelP18ProgramGPUColorRGBiS0_iPiiPfiP18ProgramGPUColorRGBiS0_iPiiPfi
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size FindPixel, .-FindPixel
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "FindPixel"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq FindPixel(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "CUDAFYSOURCETEMP.hip"
.globl __device_stub__FindPixel # -- Begin function __device_stub__FindPixel
.p2align 4, 0x90
.type __device_stub__FindPixel,@function
__device_stub__FindPixel: # @__device_stub__FindPixel
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movl %esi, 20(%rsp)
movq %rdx, 80(%rsp)
movl %ecx, 16(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $FindPixel, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size __device_stub__FindPixel, .Lfunc_end0-__device_stub__FindPixel
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $FindPixel, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type FindPixel,@object # @FindPixel
.section .rodata,"a",@progbits
.globl FindPixel
.p2align 3, 0x0
FindPixel:
.quad __device_stub__FindPixel
.size FindPixel, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "FindPixel"
.size .L__unnamed_1, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__FindPixel
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym FindPixel
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <time.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void GPuEuler(float *y, float t_0, float y_0 ,int N, float delta) {
int myID = threadIdx.x + blockDim.x * blockIdx.x;
if (myID <N) {
y[myID] = y_0;
for(int i=0;i<myID;i++){
float j_del = delta * i+ t_0;
y[myID] =y[myID]+ delta * (9*j_del*j_del-4*j_del+5);
}
}
}
int main(int argc, char const *argv[])
{
printf("seccion 1.b\n");
int hilos1b = 256,n1b,bloque1b;
float delta_t1b,tiempoGPU1b;
float *dev_e1b,*hst_y;
cudaEvent_t start1b, end1b;
for(int i=1;i<5;i++) {
delta_t1b=powf(10,-i);
n1b=10/delta_t1b +1;
hst_y = (float*) malloc(n1b*sizeof(float));
bloque1b = ceil((float) n1b /hilos1b);
cudaEventCreate(&start1b);
cudaEventCreate(&end1b);
cudaEventRecord(start1b,0);
cudaMalloc( (void**) &dev_e1b, n1b*sizeof(float));
GPuEuler<<<bloque1b,hilos1b>>>(dev_e1b,0,4,n1b,delta_t1b);
cudaEventRecord(end1b,0);
cudaEventSynchronize(end1b);
cudaEventElapsedTime(&tiempoGPU1b,start1b,end1b);
cudaMemcpy(hst_y,dev_e1b,n1b*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(dev_e1b);
free(hst_y);
cudaEventDestroy(start1b);
cudaEventDestroy(end1b);
printf("%f\n",tiempoGPU1b);
}
return 0;
} | code for sm_80
Function : _Z8GPuEulerPfffif
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e280000002100 */
/*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R5, R0, c[0x0][0x0], R5 ; /* 0x0000000000057a24 */
/* 0x001fca00078e0205 */
/*0040*/ ISETP.GE.AND P0, PT, R5, c[0x0][0x170], PT ; /* 0x00005c0005007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0070*/ ISETP.GE.AND P0, PT, R5, 0x1, PT ; /* 0x000000010500780c */
/* 0x000fe20003f06270 */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0090*/ MOV R7, c[0x0][0x16c] ; /* 0x00005b0000077a02 */
/* 0x000fce0000000f00 */
/*00a0*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */
/* 0x000fca00078e0202 */
/*00b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e2000c101904 */
/*00c0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00d0*/ IADD3 R0, R5.reuse, -0x1, RZ ; /* 0xffffffff05007810 */
/* 0x040fe20007ffe0ff */
/*00e0*/ BSSY B0, 0x3c0 ; /* 0x000002d000007945 */
/* 0x000fe20003800000 */
/*00f0*/ LOP3.LUT R4, R5, 0x3, RZ, 0xc0, !PT ; /* 0x0000000305047812 */
/* 0x000fe400078ec0ff */
/*0100*/ ISETP.GE.U32.AND P1, PT, R0, 0x3, PT ; /* 0x000000030000780c */
/* 0x000fe40003f26070 */
/*0110*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe40003f05270 */
/*0120*/ MOV R7, c[0x0][0x16c] ; /* 0x00005b0000077a02 */
/* 0x001fe40000000f00 */
/*0130*/ MOV R0, RZ ; /* 0x000000ff00007202 */
/* 0x000fce0000000f00 */
/*0140*/ @!P1 BRA 0x3b0 ; /* 0x0000026000009947 */
/* 0x000fea0003800000 */
/*0150*/ HFMA2.MMA R0, -RZ, RZ, 0, 0 ; /* 0x00000000ff007435 */
/* 0x000fe200000001ff */
/*0160*/ IADD3 R5, R5, -R4, RZ ; /* 0x8000000405057210 */
/* 0x000fd20007ffe0ff */
/*0170*/ IADD3 R10, R0.reuse, 0x1, RZ ; /* 0x00000001000a7810 */
/* 0x040fe20007ffe0ff */
/*0180*/ I2F R6, R0 ; /* 0x0000000000067306 */
/* 0x0000620000201400 */
/*0190*/ IADD3 R13, R0.reuse, 0x2, RZ ; /* 0x00000002000d7810 */
/* 0x040fe40007ffe0ff */
/*01a0*/ IADD3 R15, R0.reuse, 0x3, RZ ; /* 0x00000003000f7810 */
/* 0x040fe40007ffe0ff */
/*01b0*/ MOV R16, c[0x0][0x174] ; /* 0x00005d0000107a02 */
/* 0x000fe40000000f00 */
/*01c0*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */
/* 0x000fe20007ffe0ff */
/*01d0*/ I2F R10, R10 ; /* 0x0000000a000a7306 */
/* 0x000ea20000201400 */
/*01e0*/ IADD3 R0, R0, 0x4, RZ ; /* 0x0000000400007810 */
/* 0x001fc40007ffe0ff */
/*01f0*/ ISETP.NE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fca0003f25270 */
/*0200*/ I2F R13, R13 ; /* 0x0000000d000d7306 */
/* 0x000e220000201400 */
/*0210*/ FFMA R6, R6, R16, c[0x0][0x168] ; /* 0x00005a0006067623 */
/* 0x002fc80000000010 */
/*0220*/ FMUL R9, R6.reuse, 9 ; /* 0x4110000006097820 */
/* 0x040fe40000400000 */
/*0230*/ FMUL R8, R6, 4 ; /* 0x4080000006087820 */
/* 0x000fe20000400000 */
/*0240*/ I2F R15, R15 ; /* 0x0000000f000f7306 */
/* 0x000e620000201400 */
/*0250*/ FFMA R11, R10, R16, c[0x0][0x168] ; /* 0x00005a000a0b7623 */
/* 0x004fe40000000010 */
/*0260*/ FFMA R8, R6, R9, -R8 ; /* 0x0000000906087223 */
/* 0x000fe40000000808 */
/*0270*/ FMUL R12, R11.reuse, 9 ; /* 0x411000000b0c7820 */
/* 0x040fe40000400000 */
/*0280*/ FMUL R10, R11, 4 ; /* 0x408000000b0a7820 */
/* 0x000fc40000400000 */
/*0290*/ FFMA R14, R13, R16, c[0x0][0x168] ; /* 0x00005a000d0e7623 */
/* 0x001fe40000000010 */
/*02a0*/ FADD R8, R8, 5 ; /* 0x40a0000008087421 */
/* 0x000fe40000000000 */
/*02b0*/ FFMA R10, R11, R12, -R10 ; /* 0x0000000c0b0a7223 */
/* 0x000fe4000000080a */
/*02c0*/ FMUL R9, R14.reuse, 9 ; /* 0x411000000e097820 */
/* 0x040fe40000400000 */
/*02d0*/ FFMA R13, R15, R16, c[0x0][0x168] ; /* 0x00005a000f0d7623 */
/* 0x002fe40000000010 */
/*02e0*/ FMUL R6, R14, 4 ; /* 0x408000000e067820 */
/* 0x000fc40000400000 */
/*02f0*/ FFMA R8, R8, c[0x0][0x174], R7 ; /* 0x00005d0008087a23 */
/* 0x000fe40000000007 */
/*0300*/ FMUL R12, R13.reuse, 9 ; /* 0x411000000d0c7820 */
/* 0x040fe40000400000 */
/*0310*/ FMUL R11, R13.reuse, 4 ; /* 0x408000000d0b7820 */
/* 0x040fe40000400000 */
/*0320*/ FADD R7, R10, 5 ; /* 0x40a000000a077421 */
/* 0x000fe40000000000 */
/*0330*/ FFMA R6, R14, R9, -R6 ; /* 0x000000090e067223 */
/* 0x000fe40000000806 */
/*0340*/ FFMA R11, R13, R12, -R11 ; /* 0x0000000c0d0b7223 */
/* 0x000fc4000000080b */
/*0350*/ FFMA R7, R7, c[0x0][0x174], R8 ; /* 0x00005d0007077a23 */
/* 0x000fe40000000008 */
/*0360*/ FADD R6, R6, 5 ; /* 0x40a0000006067421 */
/* 0x000fe40000000000 */
/*0370*/ FADD R8, R11, 5 ; /* 0x40a000000b087421 */
/* 0x000fe40000000000 */
/*0380*/ FFMA R7, R6, c[0x0][0x174], R7 ; /* 0x00005d0006077a23 */
/* 0x000fc80000000007 */
/*0390*/ FFMA R7, R8, c[0x0][0x174], R7 ; /* 0x00005d0008077a23 */
/* 0x000fe20000000007 */
/*03a0*/ @P1 BRA 0x170 ; /* 0xfffffdc000001947 */
/* 0x000fea000383ffff */
/*03b0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*03c0*/ BSSY B0, 0x4b0 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*03d0*/ @!P0 BRA 0x4a0 ; /* 0x000000c000008947 */
/* 0x000fea0003800000 */
/*03e0*/ MOV R8, c[0x0][0x174] ; /* 0x00005d0000087a02 */
/* 0x000fe40000000f00 */
/*03f0*/ I2F R5, R0 ; /* 0x0000000000057306 */
/* 0x0000620000201400 */
/*0400*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */
/* 0x000fc80007ffe0ff */
/*0410*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe40003f05270 */
/*0420*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */
/* 0x001fe20007ffe0ff */
/*0430*/ FFMA R6, R5, R8, c[0x0][0x168] ; /* 0x00005a0005067623 */
/* 0x002fc80000000008 */
/*0440*/ FMUL R9, R6.reuse, 9 ; /* 0x4110000006097820 */
/* 0x040fe40000400000 */
/*0450*/ FMUL R5, R6, -4 ; /* 0xc080000006057820 */
/* 0x000fc80000400000 */
/*0460*/ FFMA R5, R6, R9, R5 ; /* 0x0000000906057223 */
/* 0x000fc80000000005 */
/*0470*/ FADD R6, R5, 5 ; /* 0x40a0000005067421 */
/* 0x000fc80000000000 */
/*0480*/ FFMA R7, R6, c[0x0][0x174], R7 ; /* 0x00005d0006077a23 */
/* 0x000fe20000000007 */
/*0490*/ @P0 BRA 0x3f0 ; /* 0xffffff5000000947 */
/* 0x000fea000383ffff */
/*04a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*04b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101904 */
/*04c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*04d0*/ BRA 0x4d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0500*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <time.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void GPuEuler(float *y, float t_0, float y_0 ,int N, float delta) {
int myID = threadIdx.x + blockDim.x * blockIdx.x;
if (myID <N) {
y[myID] = y_0;
for(int i=0;i<myID;i++){
float j_del = delta * i+ t_0;
y[myID] =y[myID]+ delta * (9*j_del*j_del-4*j_del+5);
}
}
}
int main(int argc, char const *argv[])
{
printf("seccion 1.b\n");
int hilos1b = 256,n1b,bloque1b;
float delta_t1b,tiempoGPU1b;
float *dev_e1b,*hst_y;
cudaEvent_t start1b, end1b;
for(int i=1;i<5;i++) {
delta_t1b=powf(10,-i);
n1b=10/delta_t1b +1;
hst_y = (float*) malloc(n1b*sizeof(float));
bloque1b = ceil((float) n1b /hilos1b);
cudaEventCreate(&start1b);
cudaEventCreate(&end1b);
cudaEventRecord(start1b,0);
cudaMalloc( (void**) &dev_e1b, n1b*sizeof(float));
GPuEuler<<<bloque1b,hilos1b>>>(dev_e1b,0,4,n1b,delta_t1b);
cudaEventRecord(end1b,0);
cudaEventSynchronize(end1b);
cudaEventElapsedTime(&tiempoGPU1b,start1b,end1b);
cudaMemcpy(hst_y,dev_e1b,n1b*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(dev_e1b);
free(hst_y);
cudaEventDestroy(start1b);
cudaEventDestroy(end1b);
printf("%f\n",tiempoGPU1b);
}
return 0;
} | .file "tmpxft_001b53e4_00000000-6_pregunta1b.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z8GPuEulerPfffifPfffif
.type _Z31__device_stub__Z8GPuEulerPfffifPfffif, @function
_Z31__device_stub__Z8GPuEulerPfffifPfffif:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movss %xmm0, 20(%rsp)
movss %xmm1, 16(%rsp)
movl %esi, 12(%rsp)
movss %xmm2, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
leaq 8(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8GPuEulerPfffif(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z31__device_stub__Z8GPuEulerPfffifPfffif, .-_Z31__device_stub__Z8GPuEulerPfffifPfffif
.globl _Z8GPuEulerPfffif
.type _Z8GPuEulerPfffif, @function
_Z8GPuEulerPfffif:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z8GPuEulerPfffifPfffif
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z8GPuEulerPfffif, .-_Z8GPuEulerPfffif
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "seccion 1.b\n"
.LC8:
.string "%f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $88, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $-1, %r12d
jmp .L14
.L12:
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $256, 60(%rsp)
movl $1, 64(%rsp)
cvttss2sil 8(%rsp), %eax
movl %eax, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L13:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 20(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq 32(%rsp), %rdi
call cudaEventDestroy@PLT
movq 40(%rsp), %rdi
call cudaEventDestroy@PLT
pxor %xmm0, %xmm0
cvtss2sd 20(%rsp), %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
subl $1, %r12d
cmpl $-5, %r12d
je .L19
.L14:
pxor %xmm1, %xmm1
cvtsi2ssl %r12d, %xmm1
movss .LC1(%rip), %xmm0
call powf@PLT
movss %xmm0, 12(%rsp)
movss .LC1(%rip), %xmm4
divss %xmm0, %xmm4
movaps %xmm4, %xmm0
addss .LC2(%rip), %xmm0
cvttss2sil %xmm0, %r13d
movslq %r13d, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
pxor %xmm0, %xmm0
cvtsi2ssl %r13d, %xmm0
mulss .LC3(%rip), %xmm0
movss %xmm0, 8(%rsp)
movss .LC9(%rip), %xmm1
andps %xmm0, %xmm1
movss .LC4(%rip), %xmm2
ucomiss %xmm1, %xmm2
jbe .L12
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
movaps %xmm0, %xmm2
cmpnless %xmm1, %xmm2
movss .LC2(%rip), %xmm3
andps %xmm3, %xmm2
addss %xmm2, %xmm1
movss .LC9(%rip), %xmm2
andnps %xmm0, %xmm2
orps %xmm2, %xmm1
movss %xmm1, 8(%rsp)
jmp .L12
.L18:
movss 12(%rsp), %xmm2
movl %r13d, %esi
movss .LC6(%rip), %xmm1
pxor %xmm0, %xmm0
movq 24(%rsp), %rdi
call _Z31__device_stub__Z8GPuEulerPfffifPfffif
jmp .L13
.L19:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z8GPuEulerPfffif"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z8GPuEulerPfffif(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1092616192
.align 4
.LC2:
.long 1065353216
.align 4
.LC3:
.long 998244352
.align 4
.LC4:
.long 1258291200
.align 4
.LC6:
.long 1082130432
.align 4
.LC9:
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <time.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void GPuEuler(float *y, float t_0, float y_0 ,int N, float delta) {
int myID = threadIdx.x + blockDim.x * blockIdx.x;
if (myID <N) {
y[myID] = y_0;
for(int i=0;i<myID;i++){
float j_del = delta * i+ t_0;
y[myID] =y[myID]+ delta * (9*j_del*j_del-4*j_del+5);
}
}
}
int main(int argc, char const *argv[])
{
printf("seccion 1.b\n");
int hilos1b = 256,n1b,bloque1b;
float delta_t1b,tiempoGPU1b;
float *dev_e1b,*hst_y;
cudaEvent_t start1b, end1b;
for(int i=1;i<5;i++) {
delta_t1b=powf(10,-i);
n1b=10/delta_t1b +1;
hst_y = (float*) malloc(n1b*sizeof(float));
bloque1b = ceil((float) n1b /hilos1b);
cudaEventCreate(&start1b);
cudaEventCreate(&end1b);
cudaEventRecord(start1b,0);
cudaMalloc( (void**) &dev_e1b, n1b*sizeof(float));
GPuEuler<<<bloque1b,hilos1b>>>(dev_e1b,0,4,n1b,delta_t1b);
cudaEventRecord(end1b,0);
cudaEventSynchronize(end1b);
cudaEventElapsedTime(&tiempoGPU1b,start1b,end1b);
cudaMemcpy(hst_y,dev_e1b,n1b*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(dev_e1b);
free(hst_y);
cudaEventDestroy(start1b);
cudaEventDestroy(end1b);
printf("%f\n",tiempoGPU1b);
}
return 0;
} | #include <time.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
__global__ void GPuEuler(float *y, float t_0, float y_0 ,int N, float delta) {
int myID = threadIdx.x + blockDim.x * blockIdx.x;
if (myID <N) {
y[myID] = y_0;
for(int i=0;i<myID;i++){
float j_del = delta * i+ t_0;
y[myID] =y[myID]+ delta * (9*j_del*j_del-4*j_del+5);
}
}
}
int main(int argc, char const *argv[])
{
printf("seccion 1.b\n");
int hilos1b = 256,n1b,bloque1b;
float delta_t1b,tiempoGPU1b;
float *dev_e1b,*hst_y;
hipEvent_t start1b, end1b;
for(int i=1;i<5;i++) {
delta_t1b=powf(10,-i);
n1b=10/delta_t1b +1;
hst_y = (float*) malloc(n1b*sizeof(float));
bloque1b = ceil((float) n1b /hilos1b);
hipEventCreate(&start1b);
hipEventCreate(&end1b);
hipEventRecord(start1b,0);
hipMalloc( (void**) &dev_e1b, n1b*sizeof(float));
GPuEuler<<<bloque1b,hilos1b>>>(dev_e1b,0,4,n1b,delta_t1b);
hipEventRecord(end1b,0);
hipEventSynchronize(end1b);
hipEventElapsedTime(&tiempoGPU1b,start1b,end1b);
hipMemcpy(hst_y,dev_e1b,n1b*sizeof(float),hipMemcpyDeviceToHost);
hipFree(dev_e1b);
free(hst_y);
hipEventDestroy(start1b);
hipEventDestroy(end1b);
printf("%f\n",tiempoGPU1b);
}
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <time.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
__global__ void GPuEuler(float *y, float t_0, float y_0 ,int N, float delta) {
int myID = threadIdx.x + blockDim.x * blockIdx.x;
if (myID <N) {
y[myID] = y_0;
for(int i=0;i<myID;i++){
float j_del = delta * i+ t_0;
y[myID] =y[myID]+ delta * (9*j_del*j_del-4*j_del+5);
}
}
}
int main(int argc, char const *argv[])
{
printf("seccion 1.b\n");
int hilos1b = 256,n1b,bloque1b;
float delta_t1b,tiempoGPU1b;
float *dev_e1b,*hst_y;
hipEvent_t start1b, end1b;
for(int i=1;i<5;i++) {
delta_t1b=powf(10,-i);
n1b=10/delta_t1b +1;
hst_y = (float*) malloc(n1b*sizeof(float));
bloque1b = ceil((float) n1b /hilos1b);
hipEventCreate(&start1b);
hipEventCreate(&end1b);
hipEventRecord(start1b,0);
hipMalloc( (void**) &dev_e1b, n1b*sizeof(float));
GPuEuler<<<bloque1b,hilos1b>>>(dev_e1b,0,4,n1b,delta_t1b);
hipEventRecord(end1b,0);
hipEventSynchronize(end1b);
hipEventElapsedTime(&tiempoGPU1b,start1b,end1b);
hipMemcpy(hst_y,dev_e1b,n1b*sizeof(float),hipMemcpyDeviceToHost);
hipFree(dev_e1b);
free(hst_y);
hipEventDestroy(start1b);
hipEventDestroy(end1b);
printf("%f\n",tiempoGPU1b);
}
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8GPuEulerPfffif
.globl _Z8GPuEulerPfffif
.p2align 8
.type _Z8GPuEulerPfffif,@function
_Z8GPuEulerPfffif:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_5
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s4, s[0:1], 0xc
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
v_mov_b32_e32 v0, s4
v_cmp_lt_i32_e32 vcc_lo, 0, v1
s_mov_b32 s2, 0
global_store_b32 v[2:3], v0, off
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_5
global_load_b32 v0, v[2:3], off
s_clause 0x1
s_load_b32 s3, s[0:1], 0x8
s_load_b32 s0, s[0:1], 0x14
s_mov_b32 s1, 0
.LBB0_3:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cvt_f32_i32_e32 v4, s1
s_add_i32 s1, s1, 1
v_cmp_eq_u32_e32 vcc_lo, s1, v1
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f32 v4, v4, s0, s3
s_or_b32 s2, vcc_lo, s2
v_mul_f32_e32 v5, 0x41100000, v4
v_mul_f32_e32 v6, -4.0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v6, v4, v5
v_add_f32_e32 v4, 0x40a00000, v6
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v0, s0, v4
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB0_3
s_or_b32 exec_lo, exec_lo, s2
global_store_b32 v[2:3], v0, off
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8GPuEulerPfffif
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8GPuEulerPfffif, .Lfunc_end0-_Z8GPuEulerPfffif
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8GPuEulerPfffif
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8GPuEulerPfffif.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <time.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
__global__ void GPuEuler(float *y, float t_0, float y_0 ,int N, float delta) {
int myID = threadIdx.x + blockDim.x * blockIdx.x;
if (myID <N) {
y[myID] = y_0;
for(int i=0;i<myID;i++){
float j_del = delta * i+ t_0;
y[myID] =y[myID]+ delta * (9*j_del*j_del-4*j_del+5);
}
}
}
int main(int argc, char const *argv[])
{
printf("seccion 1.b\n");
int hilos1b = 256,n1b,bloque1b;
float delta_t1b,tiempoGPU1b;
float *dev_e1b,*hst_y;
hipEvent_t start1b, end1b;
for(int i=1;i<5;i++) {
delta_t1b=powf(10,-i);
n1b=10/delta_t1b +1;
hst_y = (float*) malloc(n1b*sizeof(float));
bloque1b = ceil((float) n1b /hilos1b);
hipEventCreate(&start1b);
hipEventCreate(&end1b);
hipEventRecord(start1b,0);
hipMalloc( (void**) &dev_e1b, n1b*sizeof(float));
GPuEuler<<<bloque1b,hilos1b>>>(dev_e1b,0,4,n1b,delta_t1b);
hipEventRecord(end1b,0);
hipEventSynchronize(end1b);
hipEventElapsedTime(&tiempoGPU1b,start1b,end1b);
hipMemcpy(hst_y,dev_e1b,n1b*sizeof(float),hipMemcpyDeviceToHost);
hipFree(dev_e1b);
free(hst_y);
hipEventDestroy(start1b);
hipEventDestroy(end1b);
printf("%f\n",tiempoGPU1b);
}
return 0;
} | .text
.file "pregunta1b.hip"
.globl _Z23__device_stub__GPuEulerPfffif # -- Begin function _Z23__device_stub__GPuEulerPfffif
.p2align 4, 0x90
.type _Z23__device_stub__GPuEulerPfffif,@function
_Z23__device_stub__GPuEulerPfffif: # @_Z23__device_stub__GPuEulerPfffif
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movss %xmm0, 20(%rsp)
movss %xmm1, 16(%rsp)
movl %esi, 12(%rsp)
movss %xmm2, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8GPuEulerPfffif, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z23__device_stub__GPuEulerPfffif, .Lfunc_end0-_Z23__device_stub__GPuEulerPfffif
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x41200000 # float 10
.LCPI1_1:
.long 0x3f800000 # float 1
.LCPI1_2:
.long 0x3b800000 # float 0.00390625
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movabsq $4294967296, %rbx # imm = 0x100000000
movl $.Lstr, %edi
callq puts@PLT
movl $-1, %r15d
leaq 256(%rbx), %r12
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_3: # in Loop: Header=BB1_1 Depth=1
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 36(%rsp), %rdi
callq hipEventElapsedTime
movq 24(%rsp), %rsi
movq %r13, %rdi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq %r13, %rdi
callq free
movq 16(%rsp), %rdi
callq hipEventDestroy
movq 8(%rsp), %rdi
callq hipEventDestroy
movss 36(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
decl %r15d
cmpl $-5, %r15d
je .LBB1_4
.LBB1_1: # =>This Inner Loop Header: Depth=1
cvtsi2ss %r15d, %xmm1
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
callq powf
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
movss %xmm0, 32(%rsp) # 4-byte Spill
divss %xmm0, %xmm1
addss .LCPI1_1(%rip), %xmm1
movaps %xmm1, 160(%rsp) # 16-byte Spill
cvttss2si %xmm1, %ebp
movslq %ebp, %rbx
shlq $2, %rbx
movq %rbx, %rdi
callq malloc
movq %rax, %r13
cvttps2dq 160(%rsp), %xmm0 # 16-byte Folded Reload
cvtdq2ps %xmm0, %xmm0
mulss .LCPI1_2(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r14d
leaq 16(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
leaq 24(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %r14
movq %r14, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_3
# %bb.2: # in Loop: Header=BB1_1 Depth=1
movq 24(%rsp), %rax
movq %rax, 104(%rsp)
movl $0, 52(%rsp)
movl $1082130432, 48(%rsp) # imm = 0x40800000
movl %ebp, 44(%rsp)
movss 32(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 40(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 52(%rsp), %rax
movq %rax, 120(%rsp)
leaq 48(%rsp), %rax
movq %rax, 128(%rsp)
leaq 44(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
movl $_Z8GPuEulerPfffif, %edi
leaq 112(%rsp), %r9
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB1_3
.LBB1_4:
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8GPuEulerPfffif, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8GPuEulerPfffif,@object # @_Z8GPuEulerPfffif
.section .rodata,"a",@progbits
.globl _Z8GPuEulerPfffif
.p2align 3, 0x0
_Z8GPuEulerPfffif:
.quad _Z23__device_stub__GPuEulerPfffif
.size _Z8GPuEulerPfffif, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "%f\n"
.size .L.str.1, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8GPuEulerPfffif"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "seccion 1.b"
.size .Lstr, 12
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__GPuEulerPfffif
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8GPuEulerPfffif
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z8GPuEulerPfffif
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e280000002100 */
/*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R5, R0, c[0x0][0x0], R5 ; /* 0x0000000000057a24 */
/* 0x001fca00078e0205 */
/*0040*/ ISETP.GE.AND P0, PT, R5, c[0x0][0x170], PT ; /* 0x00005c0005007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0070*/ ISETP.GE.AND P0, PT, R5, 0x1, PT ; /* 0x000000010500780c */
/* 0x000fe20003f06270 */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0090*/ MOV R7, c[0x0][0x16c] ; /* 0x00005b0000077a02 */
/* 0x000fce0000000f00 */
/*00a0*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */
/* 0x000fca00078e0202 */
/*00b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e2000c101904 */
/*00c0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00d0*/ IADD3 R0, R5.reuse, -0x1, RZ ; /* 0xffffffff05007810 */
/* 0x040fe20007ffe0ff */
/*00e0*/ BSSY B0, 0x3c0 ; /* 0x000002d000007945 */
/* 0x000fe20003800000 */
/*00f0*/ LOP3.LUT R4, R5, 0x3, RZ, 0xc0, !PT ; /* 0x0000000305047812 */
/* 0x000fe400078ec0ff */
/*0100*/ ISETP.GE.U32.AND P1, PT, R0, 0x3, PT ; /* 0x000000030000780c */
/* 0x000fe40003f26070 */
/*0110*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe40003f05270 */
/*0120*/ MOV R7, c[0x0][0x16c] ; /* 0x00005b0000077a02 */
/* 0x001fe40000000f00 */
/*0130*/ MOV R0, RZ ; /* 0x000000ff00007202 */
/* 0x000fce0000000f00 */
/*0140*/ @!P1 BRA 0x3b0 ; /* 0x0000026000009947 */
/* 0x000fea0003800000 */
/*0150*/ HFMA2.MMA R0, -RZ, RZ, 0, 0 ; /* 0x00000000ff007435 */
/* 0x000fe200000001ff */
/*0160*/ IADD3 R5, R5, -R4, RZ ; /* 0x8000000405057210 */
/* 0x000fd20007ffe0ff */
/*0170*/ IADD3 R10, R0.reuse, 0x1, RZ ; /* 0x00000001000a7810 */
/* 0x040fe20007ffe0ff */
/*0180*/ I2F R6, R0 ; /* 0x0000000000067306 */
/* 0x0000620000201400 */
/*0190*/ IADD3 R13, R0.reuse, 0x2, RZ ; /* 0x00000002000d7810 */
/* 0x040fe40007ffe0ff */
/*01a0*/ IADD3 R15, R0.reuse, 0x3, RZ ; /* 0x00000003000f7810 */
/* 0x040fe40007ffe0ff */
/*01b0*/ MOV R16, c[0x0][0x174] ; /* 0x00005d0000107a02 */
/* 0x000fe40000000f00 */
/*01c0*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */
/* 0x000fe20007ffe0ff */
/*01d0*/ I2F R10, R10 ; /* 0x0000000a000a7306 */
/* 0x000ea20000201400 */
/*01e0*/ IADD3 R0, R0, 0x4, RZ ; /* 0x0000000400007810 */
/* 0x001fc40007ffe0ff */
/*01f0*/ ISETP.NE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fca0003f25270 */
/*0200*/ I2F R13, R13 ; /* 0x0000000d000d7306 */
/* 0x000e220000201400 */
/*0210*/ FFMA R6, R6, R16, c[0x0][0x168] ; /* 0x00005a0006067623 */
/* 0x002fc80000000010 */
/*0220*/ FMUL R9, R6.reuse, 9 ; /* 0x4110000006097820 */
/* 0x040fe40000400000 */
/*0230*/ FMUL R8, R6, 4 ; /* 0x4080000006087820 */
/* 0x000fe20000400000 */
/*0240*/ I2F R15, R15 ; /* 0x0000000f000f7306 */
/* 0x000e620000201400 */
/*0250*/ FFMA R11, R10, R16, c[0x0][0x168] ; /* 0x00005a000a0b7623 */
/* 0x004fe40000000010 */
/*0260*/ FFMA R8, R6, R9, -R8 ; /* 0x0000000906087223 */
/* 0x000fe40000000808 */
/*0270*/ FMUL R12, R11.reuse, 9 ; /* 0x411000000b0c7820 */
/* 0x040fe40000400000 */
/*0280*/ FMUL R10, R11, 4 ; /* 0x408000000b0a7820 */
/* 0x000fc40000400000 */
/*0290*/ FFMA R14, R13, R16, c[0x0][0x168] ; /* 0x00005a000d0e7623 */
/* 0x001fe40000000010 */
/*02a0*/ FADD R8, R8, 5 ; /* 0x40a0000008087421 */
/* 0x000fe40000000000 */
/*02b0*/ FFMA R10, R11, R12, -R10 ; /* 0x0000000c0b0a7223 */
/* 0x000fe4000000080a */
/*02c0*/ FMUL R9, R14.reuse, 9 ; /* 0x411000000e097820 */
/* 0x040fe40000400000 */
/*02d0*/ FFMA R13, R15, R16, c[0x0][0x168] ; /* 0x00005a000f0d7623 */
/* 0x002fe40000000010 */
/*02e0*/ FMUL R6, R14, 4 ; /* 0x408000000e067820 */
/* 0x000fc40000400000 */
/*02f0*/ FFMA R8, R8, c[0x0][0x174], R7 ; /* 0x00005d0008087a23 */
/* 0x000fe40000000007 */
/*0300*/ FMUL R12, R13.reuse, 9 ; /* 0x411000000d0c7820 */
/* 0x040fe40000400000 */
/*0310*/ FMUL R11, R13.reuse, 4 ; /* 0x408000000d0b7820 */
/* 0x040fe40000400000 */
/*0320*/ FADD R7, R10, 5 ; /* 0x40a000000a077421 */
/* 0x000fe40000000000 */
/*0330*/ FFMA R6, R14, R9, -R6 ; /* 0x000000090e067223 */
/* 0x000fe40000000806 */
/*0340*/ FFMA R11, R13, R12, -R11 ; /* 0x0000000c0d0b7223 */
/* 0x000fc4000000080b */
/*0350*/ FFMA R7, R7, c[0x0][0x174], R8 ; /* 0x00005d0007077a23 */
/* 0x000fe40000000008 */
/*0360*/ FADD R6, R6, 5 ; /* 0x40a0000006067421 */
/* 0x000fe40000000000 */
/*0370*/ FADD R8, R11, 5 ; /* 0x40a000000b087421 */
/* 0x000fe40000000000 */
/*0380*/ FFMA R7, R6, c[0x0][0x174], R7 ; /* 0x00005d0006077a23 */
/* 0x000fc80000000007 */
/*0390*/ FFMA R7, R8, c[0x0][0x174], R7 ; /* 0x00005d0008077a23 */
/* 0x000fe20000000007 */
/*03a0*/ @P1 BRA 0x170 ; /* 0xfffffdc000001947 */
/* 0x000fea000383ffff */
/*03b0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*03c0*/ BSSY B0, 0x4b0 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*03d0*/ @!P0 BRA 0x4a0 ; /* 0x000000c000008947 */
/* 0x000fea0003800000 */
/*03e0*/ MOV R8, c[0x0][0x174] ; /* 0x00005d0000087a02 */
/* 0x000fe40000000f00 */
/*03f0*/ I2F R5, R0 ; /* 0x0000000000057306 */
/* 0x0000620000201400 */
/*0400*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */
/* 0x000fc80007ffe0ff */
/*0410*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe40003f05270 */
/*0420*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */
/* 0x001fe20007ffe0ff */
/*0430*/ FFMA R6, R5, R8, c[0x0][0x168] ; /* 0x00005a0005067623 */
/* 0x002fc80000000008 */
/*0440*/ FMUL R9, R6.reuse, 9 ; /* 0x4110000006097820 */
/* 0x040fe40000400000 */
/*0450*/ FMUL R5, R6, -4 ; /* 0xc080000006057820 */
/* 0x000fc80000400000 */
/*0460*/ FFMA R5, R6, R9, R5 ; /* 0x0000000906057223 */
/* 0x000fc80000000005 */
/*0470*/ FADD R6, R5, 5 ; /* 0x40a0000005067421 */
/* 0x000fc80000000000 */
/*0480*/ FFMA R7, R6, c[0x0][0x174], R7 ; /* 0x00005d0006077a23 */
/* 0x000fe20000000007 */
/*0490*/ @P0 BRA 0x3f0 ; /* 0xffffff5000000947 */
/* 0x000fea000383ffff */
/*04a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*04b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101904 */
/*04c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*04d0*/ BRA 0x4d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0500*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8GPuEulerPfffif
.globl _Z8GPuEulerPfffif
.p2align 8
.type _Z8GPuEulerPfffif,@function
_Z8GPuEulerPfffif:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_5
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s4, s[0:1], 0xc
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
v_mov_b32_e32 v0, s4
v_cmp_lt_i32_e32 vcc_lo, 0, v1
s_mov_b32 s2, 0
global_store_b32 v[2:3], v0, off
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_5
global_load_b32 v0, v[2:3], off
s_clause 0x1
s_load_b32 s3, s[0:1], 0x8
s_load_b32 s0, s[0:1], 0x14
s_mov_b32 s1, 0
.LBB0_3:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cvt_f32_i32_e32 v4, s1
s_add_i32 s1, s1, 1
v_cmp_eq_u32_e32 vcc_lo, s1, v1
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f32 v4, v4, s0, s3
s_or_b32 s2, vcc_lo, s2
v_mul_f32_e32 v5, 0x41100000, v4
v_mul_f32_e32 v6, -4.0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v6, v4, v5
v_add_f32_e32 v4, 0x40a00000, v6
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v0, s0, v4
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB0_3
s_or_b32 exec_lo, exec_lo, s2
global_store_b32 v[2:3], v0, off
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8GPuEulerPfffif
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8GPuEulerPfffif, .Lfunc_end0-_Z8GPuEulerPfffif
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8GPuEulerPfffif
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8GPuEulerPfffif.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001b53e4_00000000-6_pregunta1b.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z8GPuEulerPfffifPfffif
.type _Z31__device_stub__Z8GPuEulerPfffifPfffif, @function
_Z31__device_stub__Z8GPuEulerPfffifPfffif:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movss %xmm0, 20(%rsp)
movss %xmm1, 16(%rsp)
movl %esi, 12(%rsp)
movss %xmm2, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
leaq 8(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8GPuEulerPfffif(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z31__device_stub__Z8GPuEulerPfffifPfffif, .-_Z31__device_stub__Z8GPuEulerPfffifPfffif
.globl _Z8GPuEulerPfffif
.type _Z8GPuEulerPfffif, @function
_Z8GPuEulerPfffif:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z8GPuEulerPfffifPfffif
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z8GPuEulerPfffif, .-_Z8GPuEulerPfffif
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "seccion 1.b\n"
.LC8:
.string "%f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $88, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $-1, %r12d
jmp .L14
.L12:
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $256, 60(%rsp)
movl $1, 64(%rsp)
cvttss2sil 8(%rsp), %eax
movl %eax, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L13:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 20(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq 32(%rsp), %rdi
call cudaEventDestroy@PLT
movq 40(%rsp), %rdi
call cudaEventDestroy@PLT
pxor %xmm0, %xmm0
cvtss2sd 20(%rsp), %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
subl $1, %r12d
cmpl $-5, %r12d
je .L19
.L14:
pxor %xmm1, %xmm1
cvtsi2ssl %r12d, %xmm1
movss .LC1(%rip), %xmm0
call powf@PLT
movss %xmm0, 12(%rsp)
movss .LC1(%rip), %xmm4
divss %xmm0, %xmm4
movaps %xmm4, %xmm0
addss .LC2(%rip), %xmm0
cvttss2sil %xmm0, %r13d
movslq %r13d, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
pxor %xmm0, %xmm0
cvtsi2ssl %r13d, %xmm0
mulss .LC3(%rip), %xmm0
movss %xmm0, 8(%rsp)
movss .LC9(%rip), %xmm1
andps %xmm0, %xmm1
movss .LC4(%rip), %xmm2
ucomiss %xmm1, %xmm2
jbe .L12
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
movaps %xmm0, %xmm2
cmpnless %xmm1, %xmm2
movss .LC2(%rip), %xmm3
andps %xmm3, %xmm2
addss %xmm2, %xmm1
movss .LC9(%rip), %xmm2
andnps %xmm0, %xmm2
orps %xmm2, %xmm1
movss %xmm1, 8(%rsp)
jmp .L12
.L18:
movss 12(%rsp), %xmm2
movl %r13d, %esi
movss .LC6(%rip), %xmm1
pxor %xmm0, %xmm0
movq 24(%rsp), %rdi
call _Z31__device_stub__Z8GPuEulerPfffifPfffif
jmp .L13
.L19:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z8GPuEulerPfffif"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z8GPuEulerPfffif(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1092616192
.align 4
.LC2:
.long 1065353216
.align 4
.LC3:
.long 998244352
.align 4
.LC4:
.long 1258291200
.align 4
.LC6:
.long 1082130432
.align 4
.LC9:
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "pregunta1b.hip"
.globl _Z23__device_stub__GPuEulerPfffif # -- Begin function _Z23__device_stub__GPuEulerPfffif
.p2align 4, 0x90
.type _Z23__device_stub__GPuEulerPfffif,@function
_Z23__device_stub__GPuEulerPfffif: # @_Z23__device_stub__GPuEulerPfffif
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movss %xmm0, 20(%rsp)
movss %xmm1, 16(%rsp)
movl %esi, 12(%rsp)
movss %xmm2, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8GPuEulerPfffif, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z23__device_stub__GPuEulerPfffif, .Lfunc_end0-_Z23__device_stub__GPuEulerPfffif
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x41200000 # float 10
.LCPI1_1:
.long 0x3f800000 # float 1
.LCPI1_2:
.long 0x3b800000 # float 0.00390625
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movabsq $4294967296, %rbx # imm = 0x100000000
movl $.Lstr, %edi
callq puts@PLT
movl $-1, %r15d
leaq 256(%rbx), %r12
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_3: # in Loop: Header=BB1_1 Depth=1
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 36(%rsp), %rdi
callq hipEventElapsedTime
movq 24(%rsp), %rsi
movq %r13, %rdi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq %r13, %rdi
callq free
movq 16(%rsp), %rdi
callq hipEventDestroy
movq 8(%rsp), %rdi
callq hipEventDestroy
movss 36(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
decl %r15d
cmpl $-5, %r15d
je .LBB1_4
.LBB1_1: # =>This Inner Loop Header: Depth=1
cvtsi2ss %r15d, %xmm1
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
callq powf
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
movss %xmm0, 32(%rsp) # 4-byte Spill
divss %xmm0, %xmm1
addss .LCPI1_1(%rip), %xmm1
movaps %xmm1, 160(%rsp) # 16-byte Spill
cvttss2si %xmm1, %ebp
movslq %ebp, %rbx
shlq $2, %rbx
movq %rbx, %rdi
callq malloc
movq %rax, %r13
cvttps2dq 160(%rsp), %xmm0 # 16-byte Folded Reload
cvtdq2ps %xmm0, %xmm0
mulss .LCPI1_2(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r14d
leaq 16(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
leaq 24(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %r14
movq %r14, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_3
# %bb.2: # in Loop: Header=BB1_1 Depth=1
movq 24(%rsp), %rax
movq %rax, 104(%rsp)
movl $0, 52(%rsp)
movl $1082130432, 48(%rsp) # imm = 0x40800000
movl %ebp, 44(%rsp)
movss 32(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 40(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 52(%rsp), %rax
movq %rax, 120(%rsp)
leaq 48(%rsp), %rax
movq %rax, 128(%rsp)
leaq 44(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
movl $_Z8GPuEulerPfffif, %edi
leaq 112(%rsp), %r9
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB1_3
.LBB1_4:
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8GPuEulerPfffif, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8GPuEulerPfffif,@object # @_Z8GPuEulerPfffif
.section .rodata,"a",@progbits
.globl _Z8GPuEulerPfffif
.p2align 3, 0x0
_Z8GPuEulerPfffif:
.quad _Z23__device_stub__GPuEulerPfffif
.size _Z8GPuEulerPfffif, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "%f\n"
.size .L.str.1, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8GPuEulerPfffif"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "seccion 1.b"
.size .Lstr, 12
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__GPuEulerPfffif
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8GPuEulerPfffif
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
// Cuda supports printf in kernels for
// hardware with compute compatibility >= 2.0
__global__ void helloworld()
{
// CUDA runtime uses device overloading or printf in kernels
printf("Hello world!\n");
}
int main(void)
{
helloworld<<<1,1>>>();
return 0;
} | code for sm_80
Function : _Z10helloworldv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0020*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0030*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0050*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x00006c0000000a00 */
/*0060*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x000fe40000000000 */
/*0070*/ MOV R11, 0xe0 ; /* 0x000000e0000b7802 */
/* 0x000fe40000000f00 */
/*0080*/ MOV R20, 0x60 ; /* 0x0000006000147802 */
/* 0x000fe40000000f00 */
/*0090*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00a0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fc40000000f00 */
/*00b0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*00c0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*00d0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
// Cuda supports printf in kernels for
// hardware with compute compatibility >= 2.0
__global__ void helloworld()
{
// CUDA runtime uses device overloading or printf in kernels
printf("Hello world!\n");
}
int main(void)
{
helloworld<<<1,1>>>();
return 0;
} | .file "tmpxft_0014b04c_00000000-6_helloworld.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z10helloworldvv
.type _Z29__device_stub__Z10helloworldvv, @function
_Z29__device_stub__Z10helloworldvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z10helloworldv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z29__device_stub__Z10helloworldvv, .-_Z29__device_stub__Z10helloworldvv
.globl _Z10helloworldv
.type _Z10helloworldv, @function
_Z10helloworldv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z10helloworldvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10helloworldv, .-_Z10helloworldv
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call _Z29__device_stub__Z10helloworldvv
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10helloworldv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10helloworldv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
// Cuda supports printf in kernels for
// hardware with compute compatibility >= 2.0
__global__ void helloworld()
{
// CUDA runtime uses device overloading or printf in kernels
printf("Hello world!\n");
}
int main(void)
{
helloworld<<<1,1>>>();
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
// Cuda supports printf in kernels for
// hardware with compute compatibility >= 2.0
__global__ void helloworld()
{
// CUDA runtime uses device overloading or printf in kernels
printf("Hello world!\n");
}
int main(void)
{
helloworld<<<1,1>>>();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
// Cuda supports printf in kernels for
// hardware with compute compatibility >= 2.0
__global__ void helloworld()
{
// CUDA runtime uses device overloading or printf in kernels
printf("Hello world!\n");
}
int main(void)
{
helloworld<<<1,1>>>();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10helloworldv
.globl _Z10helloworldv
.p2align 8
.type _Z10helloworldv,@function
_Z10helloworldv:
s_load_b64 s[2:3], s[0:1], 0x50
v_mbcnt_lo_u32_b32 v20, -1, 0
v_mov_b32_e32 v6, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_dual_mov_b32 v7, 0 :: v_dual_mov_b32 v4, v20
v_readfirstlane_b32 s0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s0, v4
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_6
v_mov_b32_e32 v0, 0
s_mov_b32 s4, exec_lo
s_waitcnt lgkmcnt(0)
global_load_b64 v[8:9], v0, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[5:6], v0, s[2:3]
s_waitcnt vmcnt(1)
v_and_b32_e32 v1, v1, v8
v_and_b32_e32 v2, v2, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v3, v1, 24
v_mul_lo_u32 v2, v2, 24
v_mul_lo_u32 v1, v1, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v3, v2
s_waitcnt vmcnt(0)
v_add_co_u32 v1, vcc_lo, v5, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, v6, v2, vcc_lo
global_load_b64 v[6:7], v[1:2], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[6:7], v[8:9]
s_cbranch_execz .LBB0_5
s_mov_b32 s5, 0
.p2align 6
.LBB0_3:
s_sleep 1
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[10:11], v0, s[2:3]
v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v1, v1, v8
v_and_b32_e32 v7, v2, v9
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[5:6], null, v1, 24, v[10:11]
v_mov_b32_e32 v1, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v7, 24, v[1:2]
v_mov_b32_e32 v6, v2
global_load_b64 v[6:7], v[5:6], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[8:9]
s_or_b32 s5, vcc_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_3
s_or_b32 exec_lo, exec_lo, s5
.LBB0_5:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_6:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v5, 0
v_readfirstlane_b32 s4, v6
v_readfirstlane_b32 s5, v7
s_mov_b32 s8, exec_lo
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b64 v[8:9], v5, s[2:3] offset:40
global_load_b128 v[0:3], v5, s[2:3]
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s6, v8
v_readfirstlane_b32 s7, v9
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[6:7], s[4:5], s[6:7]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_8
v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, 0
s_mul_i32 s8, s7, 24
s_mul_hi_u32 s9, s6, 24
v_dual_mov_b32 v8, 2 :: v_dual_mov_b32 v9, 1
s_add_i32 s9, s9, s8
s_mul_i32 s8, s6, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v10, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v11, vcc_lo, s9, v1, vcc_lo
global_store_b128 v[10:11], v[6:9], off offset:8
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s1
s_lshl_b64 s[8:9], s[6:7], 12
v_lshlrev_b64 v[4:5], 6, v[4:5]
s_waitcnt vmcnt(0)
v_add_co_u32 v2, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v3, vcc_lo
v_mov_b32_e32 v3, 0
s_mov_b32 s8, 0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, v2, v4
v_mov_b32_e32 v2, 33
s_mov_b32 s9, s8
s_mov_b32 s10, s8
s_mov_b32 s11, s8
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo
v_mov_b32_e32 v4, v3
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v8, s8
v_dual_mov_b32 v9, s9 :: v_dual_mov_b32 v10, s10
v_mov_b32_e32 v11, s11
s_clause 0x3
global_store_b128 v[6:7], v[2:5], off
global_store_b128 v[6:7], v[8:11], off offset:16
global_store_b128 v[6:7], v[8:11], off offset:32
global_store_b128 v[6:7], v[8:11], off offset:48
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_16
v_dual_mov_b32 v10, 0 :: v_dual_mov_b32 v11, s4
v_mov_b32_e32 v12, s5
s_clause 0x1
global_load_b64 v[13:14], v10, s[2:3] offset:32 glc
global_load_b64 v[2:3], v10, s[2:3] offset:40
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
v_readfirstlane_b32 s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[8:9], s[8:9], s[4:5]
s_mul_i32 s9, s9, 24
s_mul_hi_u32 s10, s8, 24
s_mul_i32 s8, s8, 24
s_add_i32 s10, s10, s9
v_add_co_u32 v8, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v9, vcc_lo, s10, v1, vcc_lo
s_mov_b32 s8, exec_lo
global_store_b64 v[8:9], v[13:14], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v10, v[11:14], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[4:5], v[13:14]
s_cbranch_execz .LBB0_12
s_mov_b32 s9, 0
.LBB0_11:
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_sleep 1
global_store_b64 v[8:9], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v10, v[2:5], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
s_or_b32 s9, vcc_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_11
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s8
v_mov_b32_e32 v2, 0
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v4, s9, 0
global_load_b64 v[2:3], v2, s[2:3] offset:16
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_14
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[2:3], v[4:5], off offset:8
.LBB0_14:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
global_load_b64 v[4:5], v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
s_cbranch_vccnz .LBB0_16
global_load_b32 v2, v[2:3], off offset:24
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
s_waitcnt_vscnt null, 0x0
global_store_b64 v[4:5], v[2:3], off
s_and_b32 m0, s8, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s7, 24
s_mul_hi_u32 s7, s6, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s1
s_mul_i32 s1, s6, 24
v_add_co_u32 v0, vcc_lo, v0, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_20
.p2align 6
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_19
s_sleep 1
s_cbranch_execnz .LBB0_20
s_branch .LBB0_22
.p2align 6
.LBB0_19:
s_branch .LBB0_22
.LBB0_20:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_17
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_17
.LBB0_22:
global_load_b64 v[22:23], v[6:7], off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_26
v_mov_b32_e32 v6, 0
s_clause 0x2
global_load_b64 v[2:3], v6, s[2:3] offset:40
global_load_b64 v[7:8], v6, s[2:3] offset:24 glc
global_load_b64 v[4:5], v6, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v9, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v9, s4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9
v_and_b32_e32 v3, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v2, v0, v2
v_mul_lo_u32 v3, v3, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v9, v2, 24
v_mul_lo_u32 v2, v2, 24
v_add_nc_u32_e32 v3, v9, v3
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, v4, v2
v_mov_b32_e32 v2, v7
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo
v_mov_b32_e32 v3, v8
global_store_b64 v[4:5], v[7:8], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_26
s_mov_b32 s0, 0
.LBB0_25:
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_25
.LBB0_26:
s_or_b32 exec_lo, exec_lo, s1
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, .str@rel32@lo+4
s_addc_u32 s5, s5, .str@rel32@hi+12
s_mov_b32 s0, -1
s_cmp_lg_u64 s[4:5], 0
s_cbranch_scc0 .LBB0_105
s_waitcnt vmcnt(0)
v_dual_mov_b32 v1, v23 :: v_dual_and_b32 v0, -3, v22
v_mov_b32_e32 v25, 0
s_mov_b64 s[6:7], 14
s_branch .LBB0_29
.LBB0_28:
s_or_b32 exec_lo, exec_lo, s1
s_sub_u32 s6, s6, s8
s_subb_u32 s7, s7, s9
s_add_u32 s4, s4, s8
s_addc_u32 s5, s5, s9
s_cmp_lg_u64 s[6:7], 0
s_cbranch_scc0 .LBB0_104
.LBB0_29:
v_cmp_lt_u64_e64 s0, s[6:7], 56
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 s0, s0, exec_lo
s_cselect_b32 s8, s6, 56
s_cselect_b32 s9, s7, 0
s_cmp_gt_u32 s8, 7
s_mov_b32 s0, -1
s_cbranch_scc1 .LBB0_34
v_mov_b32_e32 v2, 0
v_mov_b32_e32 v3, 0
s_cmp_eq_u32 s8, 0
s_cbranch_scc1 .LBB0_33
s_lshl_b64 s[0:1], s[8:9], 3
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], s[4:5]
.LBB0_32:
global_load_u8 v4, v25, s[12:13]
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v4
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[4:5], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
s_cmp_lg_u32 s0, s10
v_or_b32_e32 v2, v4, v2
v_or_b32_e32 v3, v5, v3
s_cbranch_scc1 .LBB0_32
.LBB0_33:
s_mov_b32 s0, 0
s_mov_b32 s15, 0
.LBB0_34:
s_and_not1_b32 vcc_lo, exec_lo, s0
s_mov_b64 s[0:1], s[4:5]
s_cbranch_vccnz .LBB0_36
global_load_b64 v[2:3], v25, s[4:5]
s_add_i32 s15, s8, -8
s_add_u32 s0, s4, 8
s_addc_u32 s1, s5, 0
.LBB0_36:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_41
v_mov_b32_e32 v4, 0
v_mov_b32_e32 v5, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_40
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_39:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v6, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v4, v6, v4
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v5, v7, v5
s_cbranch_scc1 .LBB0_39
.LBB0_40:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_42
s_branch .LBB0_43
.LBB0_41:
.LBB0_42:
global_load_b64 v[4:5], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_43:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_48
v_mov_b32_e32 v6, 0
v_mov_b32_e32 v7, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_47
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_46:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v8, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[8:9], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s14, s12
v_or_b32_e32 v6, v8, v6
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v7, v9, v7
s_cbranch_scc1 .LBB0_46
.LBB0_47:
s_mov_b32 s15, 0
s_cbranch_execz .LBB0_49
s_branch .LBB0_50
.LBB0_48:
.LBB0_49:
global_load_b64 v[6:7], v25, s[0:1]
s_add_i32 s15, s14, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_50:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_55
v_mov_b32_e32 v8, 0
v_mov_b32_e32 v9, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_54
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_53:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v10, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[10:11], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v8, v10, v8
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v9, v11, v9
s_cbranch_scc1 .LBB0_53
.LBB0_54:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_56
s_branch .LBB0_57
.LBB0_55:
.LBB0_56:
global_load_b64 v[8:9], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_57:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_62
v_mov_b32_e32 v10, 0
v_mov_b32_e32 v11, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_61
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_60:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v12, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[12:13], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s14, s12
v_or_b32_e32 v10, v12, v10
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v11, v13, v11
s_cbranch_scc1 .LBB0_60
.LBB0_61:
s_mov_b32 s15, 0
s_cbranch_execz .LBB0_63
s_branch .LBB0_64
.LBB0_62:
.LBB0_63:
global_load_b64 v[10:11], v25, s[0:1]
s_add_i32 s15, s14, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_64:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_69
v_mov_b32_e32 v12, 0
v_mov_b32_e32 v13, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_68
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_67:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v14, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[14:15], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v12, v14, v12
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v13, v15, v13
s_cbranch_scc1 .LBB0_67
.LBB0_68:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_70
s_branch .LBB0_71
.LBB0_69:
.LBB0_70:
global_load_b64 v[12:13], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_71:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_76
v_mov_b32_e32 v14, 0
v_mov_b32_e32 v15, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_75
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], s[0:1]
.LBB0_74:
global_load_u8 v16, v25, s[12:13]
s_add_i32 s14, s14, -1
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v16
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[16:17], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
s_cmp_lg_u32 s14, 0
v_or_b32_e32 v14, v16, v14
v_or_b32_e32 v15, v17, v15
s_cbranch_scc1 .LBB0_74
.LBB0_75:
s_cbranch_execz .LBB0_77
s_branch .LBB0_78
.LBB0_76:
.LBB0_77:
global_load_b64 v[14:15], v25, s[0:1]
.LBB0_78:
v_mov_b32_e32 v24, v20
v_mov_b32_e32 v26, 0
v_mov_b32_e32 v27, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s0, v24
v_cmp_eq_u32_e64 s0, s0, v24
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_84
global_load_b64 v[18:19], v25, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[16:17], v25, s[2:3] offset:40
global_load_b64 v[26:27], v25, s[2:3]
s_mov_b32 s10, exec_lo
s_waitcnt vmcnt(1)
v_and_b32_e32 v17, v17, v19
v_and_b32_e32 v16, v16, v18
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v17, v17, 24
v_mul_hi_u32 v21, v16, 24
v_mul_lo_u32 v16, v16, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v17, v21, v17
s_waitcnt vmcnt(0)
v_add_co_u32 v16, vcc_lo, v26, v16
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v17, vcc_lo, v27, v17, vcc_lo
global_load_b64 v[16:17], v[16:17], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[26:27], v[18:19]
s_cbranch_execz .LBB0_83
s_mov_b32 s11, 0
.p2align 6
.LBB0_81:
s_sleep 1
s_clause 0x1
global_load_b64 v[16:17], v25, s[2:3] offset:40
global_load_b64 v[28:29], v25, s[2:3]
v_dual_mov_b32 v18, v26 :: v_dual_mov_b32 v19, v27
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_and_b32_e32 v16, v16, v18
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[26:27], null, v16, 24, v[28:29]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v16, v27 :: v_dual_and_b32 v17, v17, v19
v_mad_u64_u32 v[27:28], null, v17, 24, v[16:17]
global_load_b64 v[16:17], v[26:27], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[26:27], v[18:19]
s_or_b32 s11, vcc_lo, s11
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s11
s_cbranch_execnz .LBB0_81
s_or_b32 exec_lo, exec_lo, s11
.LBB0_83:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s10
.LBB0_84:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
s_clause 0x1
global_load_b64 v[28:29], v25, s[2:3] offset:40
global_load_b128 v[16:19], v25, s[2:3]
v_readfirstlane_b32 s10, v26
v_readfirstlane_b32 s11, v27
s_mov_b32 s14, exec_lo
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s12, v28
v_readfirstlane_b32 s13, v29
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[12:13], s[10:11], s[12:13]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_86
v_dual_mov_b32 v26, s14 :: v_dual_mov_b32 v27, 0
s_mul_i32 s14, s13, 24
s_mul_hi_u32 s15, s12, 24
v_dual_mov_b32 v28, 2 :: v_dual_mov_b32 v29, 1
s_add_i32 s15, s15, s14
s_mul_i32 s14, s12, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v30, vcc_lo, v16, s14
v_add_co_ci_u32_e32 v31, vcc_lo, s15, v17, vcc_lo
global_store_b128 v[30:31], v[26:29], off offset:8
.LBB0_86:
s_or_b32 exec_lo, exec_lo, s1
v_cmp_gt_u64_e64 vcc_lo, s[6:7], 56
v_or_b32_e32 v21, 2, v0
s_lshl_b64 s[14:15], s[12:13], 12
v_lshlrev_b64 v[26:27], 6, v[24:25]
s_lshl_b32 s1, s8, 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_i32 s1, s1, 28
v_cndmask_b32_e32 v0, v21, v0, vcc_lo
s_waitcnt vmcnt(0)
v_add_co_u32 v18, vcc_lo, v18, s14
v_add_co_ci_u32_e32 v19, vcc_lo, s15, v19, vcc_lo
s_and_b32 s1, s1, 0x1e0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v18, vcc_lo, v18, v26
v_and_or_b32 v0, v0, 0xffffff1f, s1
v_add_co_ci_u32_e32 v19, vcc_lo, v19, v27, vcc_lo
s_clause 0x3
global_store_b128 v[18:19], v[0:3], off
global_store_b128 v[18:19], v[4:7], off offset:16
global_store_b128 v[18:19], v[8:11], off offset:32
global_store_b128 v[18:19], v[12:15], off offset:48
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_94
s_clause 0x1
global_load_b64 v[8:9], v25, s[2:3] offset:32 glc
global_load_b64 v[0:1], v25, s[2:3] offset:40
v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s14, v0
v_readfirstlane_b32 s15, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[14:15], s[14:15], s[10:11]
s_mul_i32 s15, s15, 24
s_mul_hi_u32 s16, s14, 24
s_mul_i32 s14, s14, 24
s_add_i32 s16, s16, s15
v_add_co_u32 v4, vcc_lo, v16, s14
v_add_co_ci_u32_e32 v5, vcc_lo, s16, v17, vcc_lo
s_mov_b32 s14, exec_lo
global_store_b64 v[4:5], v[8:9], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v25, v[6:9], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[2:3], v[8:9]
s_cbranch_execz .LBB0_90
s_mov_b32 s15, 0
.LBB0_89:
v_dual_mov_b32 v0, s10 :: v_dual_mov_b32 v1, s11
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[0:1], v25, v[0:3], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
s_or_b32 s15, vcc_lo, s15
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s15
s_cbranch_execnz .LBB0_89
.LBB0_90:
s_or_b32 exec_lo, exec_lo, s14
global_load_b64 v[0:1], v25, s[2:3] offset:16
s_mov_b32 s15, exec_lo
s_mov_b32 s14, exec_lo
v_mbcnt_lo_u32_b32 v2, s15, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v2
s_cbranch_execz .LBB0_92
s_bcnt1_i32_b32 s15, s15
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v2, s15
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[0:1], v[2:3], off offset:8
.LBB0_92:
s_or_b32 exec_lo, exec_lo, s14
s_waitcnt vmcnt(0)
global_load_b64 v[2:3], v[0:1], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
s_cbranch_vccnz .LBB0_94
global_load_b32 v24, v[0:1], off offset:24
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s14, v24
s_waitcnt_vscnt null, 0x0
global_store_b64 v[2:3], v[24:25], off
s_and_b32 m0, s14, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_94:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s13, 24
s_mul_hi_u32 s13, s12, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s13, s13, s1
s_mul_i32 s1, s12, 24
v_add_co_u32 v0, vcc_lo, v16, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s13, v17, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_98
.p2align 6
.LBB0_95:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_97
s_sleep 1
s_cbranch_execnz .LBB0_98
s_branch .LBB0_100
.p2align 6
.LBB0_97:
s_branch .LBB0_100
.LBB0_98:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_95
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_95
.LBB0_100:
global_load_b64 v[0:1], v[18:19], off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_28
s_clause 0x2
global_load_b64 v[4:5], v25, s[2:3] offset:40
global_load_b64 v[8:9], v25, s[2:3] offset:24 glc
global_load_b64 v[6:7], v25, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v10, vcc_lo, v4, 1
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v10, s10
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
v_dual_cndmask_b32 v3, v3, v11 :: v_dual_cndmask_b32 v2, v2, v10
v_and_b32_e32 v5, v3, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_and_b32_e32 v4, v2, v4
v_mul_hi_u32 v10, v4, 24
v_mul_lo_u32 v4, v4, 24
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_u32 v6, vcc_lo, v6, v4
v_mov_b32_e32 v4, v8
v_mul_lo_u32 v5, v5, 24
v_add_nc_u32_e32 v5, v10, v5
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo
v_mov_b32_e32 v5, v9
global_store_b64 v[6:7], v[8:9], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v25, v[2:5], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[8:9]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_28
s_mov_b32 s0, 0
.LBB0_103:
s_sleep 1
global_store_b64 v[6:7], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[8:9], v25, v[2:5], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[8:9], v[4:5]
v_dual_mov_b32 v4, v8 :: v_dual_mov_b32 v5, v9
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_103
s_branch .LBB0_28
.LBB0_104:
s_mov_b32 s0, 0
.LBB0_105:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s0
s_cbranch_vccz .LBB0_132
v_readfirstlane_b32 s0, v20
v_mov_b32_e32 v4, 0
v_mov_b32_e32 v5, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s0, v20
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_112
s_waitcnt vmcnt(0)
v_mov_b32_e32 v0, 0
s_mov_b32 s4, exec_lo
global_load_b64 v[6:7], v0, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[3:4], v0, s[2:3]
s_waitcnt vmcnt(1)
v_and_b32_e32 v1, v1, v6
v_and_b32_e32 v2, v2, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v5, v1, 24
v_mul_lo_u32 v2, v2, 24
v_mul_lo_u32 v1, v1, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v5, v2
s_waitcnt vmcnt(0)
v_add_co_u32 v1, vcc_lo, v3, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, v4, v2, vcc_lo
global_load_b64 v[4:5], v[1:2], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[4:5], v[6:7]
s_cbranch_execz .LBB0_111
s_mov_b32 s5, 0
.p2align 6
.LBB0_109:
s_sleep 1
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[8:9], v0, s[2:3]
v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_and_b32_e32 v1, v1, v6
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[3:4], null, v1, 24, v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v1, v4 :: v_dual_and_b32 v2, v2, v7
v_mad_u64_u32 v[4:5], null, v2, 24, v[1:2]
global_load_b64 v[4:5], v[3:4], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
s_or_b32 s5, vcc_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_109
s_or_b32 exec_lo, exec_lo, s5
.LBB0_111:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_112:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v21, 0
v_readfirstlane_b32 s4, v4
v_readfirstlane_b32 s5, v5
s_mov_b32 s8, exec_lo
s_clause 0x1
global_load_b64 v[6:7], v21, s[2:3] offset:40
global_load_b128 v[0:3], v21, s[2:3]
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s6, v6
v_readfirstlane_b32 s7, v7
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[6:7], s[4:5], s[6:7]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_114
v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, 0
s_mul_i32 s8, s7, 24
s_mul_hi_u32 s9, s6, 24
v_dual_mov_b32 v6, 2 :: v_dual_mov_b32 v7, 1
s_add_i32 s9, s9, s8
s_mul_i32 s8, s6, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v8, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v9, vcc_lo, s9, v1, vcc_lo
global_store_b128 v[8:9], v[4:7], off offset:8
.LBB0_114:
s_or_b32 exec_lo, exec_lo, s1
s_lshl_b64 s[8:9], s[6:7], 12
v_and_or_b32 v22, v22, 0xffffff1d, 34
s_waitcnt vmcnt(0)
v_add_co_u32 v4, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v3, vcc_lo
v_lshlrev_b64 v[2:3], 6, v[20:21]
s_mov_b32 s8, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mov_b32 s9, s8
s_mov_b32 s10, s8
s_mov_b32 s11, s8
v_add_co_u32 v8, vcc_lo, v4, v2
v_mov_b32_e32 v6, 0
v_add_co_ci_u32_e32 v9, vcc_lo, v5, v3, vcc_lo
v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v5, s11
v_dual_mov_b32 v3, s9 :: v_dual_mov_b32 v4, s10
s_delay_alu instid0(VALU_DEP_4)
v_mov_b32_e32 v7, v6
s_clause 0x4
global_store_b64 v[8:9], v[22:23], off
global_store_b128 v[8:9], v[2:5], off offset:8
global_store_b128 v[8:9], v[2:5], off offset:24
global_store_b128 v[8:9], v[2:5], off offset:40
global_store_b64 v[8:9], v[6:7], off offset:56
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_122
v_dual_mov_b32 v8, 0 :: v_dual_mov_b32 v9, s4
v_mov_b32_e32 v10, s5
s_clause 0x1
global_load_b64 v[11:12], v8, s[2:3] offset:32 glc
global_load_b64 v[2:3], v8, s[2:3] offset:40
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
v_readfirstlane_b32 s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[8:9], s[8:9], s[4:5]
s_mul_i32 s9, s9, 24
s_mul_hi_u32 s10, s8, 24
s_mul_i32 s8, s8, 24
s_add_i32 s10, s10, s9
v_add_co_u32 v6, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v7, vcc_lo, s10, v1, vcc_lo
s_mov_b32 s8, exec_lo
global_store_b64 v[6:7], v[11:12], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v8, v[9:12], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[4:5], v[11:12]
s_cbranch_execz .LBB0_118
s_mov_b32 s9, 0
.LBB0_117:
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_sleep 1
global_store_b64 v[6:7], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v8, v[2:5], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
s_or_b32 s9, vcc_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_117
.LBB0_118:
s_or_b32 exec_lo, exec_lo, s8
v_mov_b32_e32 v2, 0
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v4, s9, 0
global_load_b64 v[2:3], v2, s[2:3] offset:16
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_120
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[2:3], v[4:5], off offset:8
.LBB0_120:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
global_load_b64 v[4:5], v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
s_cbranch_vccnz .LBB0_122
global_load_b32 v2, v[2:3], off offset:24
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
s_waitcnt_vscnt null, 0x0
global_store_b64 v[4:5], v[2:3], off
s_and_b32 m0, s8, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_122:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s7, 24
s_mul_hi_u32 s7, s6, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s1
s_mul_i32 s1, s6, 24
v_add_co_u32 v0, vcc_lo, v0, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_126
.p2align 6
.LBB0_123:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_125
s_sleep 1
s_cbranch_execnz .LBB0_126
s_branch .LBB0_128
.p2align 6
.LBB0_125:
s_branch .LBB0_128
.LBB0_126:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_123
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_123
.LBB0_128:
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_132
v_mov_b32_e32 v6, 0
s_clause 0x2
global_load_b64 v[2:3], v6, s[2:3] offset:40
global_load_b64 v[7:8], v6, s[2:3] offset:24 glc
global_load_b64 v[4:5], v6, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v9, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v9, s4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9
v_and_b32_e32 v3, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v2, v0, v2
v_mul_lo_u32 v3, v3, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v9, v2, 24
v_mul_lo_u32 v2, v2, 24
v_add_nc_u32_e32 v3, v9, v3
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, v4, v2
v_mov_b32_e32 v2, v7
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo
v_mov_b32_e32 v3, v8
global_store_b64 v[4:5], v[7:8], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_132
s_mov_b32 s0, 0
.LBB0_131:
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_131
.LBB0_132:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10helloworldv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 256
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 32
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10helloworldv, .Lfunc_end0-_Z10helloworldv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type .str,@object
.section .rodata.str1.1,"aMS",@progbits,1
.str:
.asciz "Hello world!\n"
.size .str, 14
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: hidden_block_count_x
- .offset: 4
.size: 4
.value_kind: hidden_block_count_y
- .offset: 8
.size: 4
.value_kind: hidden_block_count_z
- .offset: 12
.size: 2
.value_kind: hidden_group_size_x
- .offset: 14
.size: 2
.value_kind: hidden_group_size_y
- .offset: 16
.size: 2
.value_kind: hidden_group_size_z
- .offset: 18
.size: 2
.value_kind: hidden_remainder_x
- .offset: 20
.size: 2
.value_kind: hidden_remainder_y
- .offset: 22
.size: 2
.value_kind: hidden_remainder_z
- .offset: 40
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 64
.size: 2
.value_kind: hidden_grid_dims
- .offset: 80
.size: 8
.value_kind: hidden_hostcall_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 256
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10helloworldv
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z10helloworldv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 32
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
// Cuda supports printf in kernels for
// hardware with compute compatibility >= 2.0
__global__ void helloworld()
{
// CUDA runtime uses device overloading or printf in kernels
printf("Hello world!\n");
}
int main(void)
{
helloworld<<<1,1>>>();
return 0;
} | .text
.file "helloworld.hip"
.globl _Z25__device_stub__helloworldv # -- Begin function _Z25__device_stub__helloworldv
.p2align 4, 0x90
.type _Z25__device_stub__helloworldv,@function
_Z25__device_stub__helloworldv: # @_Z25__device_stub__helloworldv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z10helloworldv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z25__device_stub__helloworldv, .Lfunc_end0-_Z25__device_stub__helloworldv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z10helloworldv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10helloworldv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10helloworldv,@object # @_Z10helloworldv
.section .rodata,"a",@progbits
.globl _Z10helloworldv
.p2align 3, 0x0
_Z10helloworldv:
.quad _Z25__device_stub__helloworldv
.size _Z10helloworldv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10helloworldv"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__helloworldv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10helloworldv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10helloworldv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0020*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0030*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0050*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x00006c0000000a00 */
/*0060*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x000fe40000000000 */
/*0070*/ MOV R11, 0xe0 ; /* 0x000000e0000b7802 */
/* 0x000fe40000000f00 */
/*0080*/ MOV R20, 0x60 ; /* 0x0000006000147802 */
/* 0x000fe40000000f00 */
/*0090*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00a0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fc40000000f00 */
/*00b0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*00c0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*00d0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10helloworldv
.globl _Z10helloworldv
.p2align 8
.type _Z10helloworldv,@function
_Z10helloworldv:
s_load_b64 s[2:3], s[0:1], 0x50
v_mbcnt_lo_u32_b32 v20, -1, 0
v_mov_b32_e32 v6, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_dual_mov_b32 v7, 0 :: v_dual_mov_b32 v4, v20
v_readfirstlane_b32 s0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s0, v4
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_6
v_mov_b32_e32 v0, 0
s_mov_b32 s4, exec_lo
s_waitcnt lgkmcnt(0)
global_load_b64 v[8:9], v0, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[5:6], v0, s[2:3]
s_waitcnt vmcnt(1)
v_and_b32_e32 v1, v1, v8
v_and_b32_e32 v2, v2, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v3, v1, 24
v_mul_lo_u32 v2, v2, 24
v_mul_lo_u32 v1, v1, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v3, v2
s_waitcnt vmcnt(0)
v_add_co_u32 v1, vcc_lo, v5, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, v6, v2, vcc_lo
global_load_b64 v[6:7], v[1:2], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[6:7], v[8:9]
s_cbranch_execz .LBB0_5
s_mov_b32 s5, 0
.p2align 6
.LBB0_3:
s_sleep 1
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[10:11], v0, s[2:3]
v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v1, v1, v8
v_and_b32_e32 v7, v2, v9
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[5:6], null, v1, 24, v[10:11]
v_mov_b32_e32 v1, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v7, 24, v[1:2]
v_mov_b32_e32 v6, v2
global_load_b64 v[6:7], v[5:6], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[8:9]
s_or_b32 s5, vcc_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_3
s_or_b32 exec_lo, exec_lo, s5
.LBB0_5:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_6:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v5, 0
v_readfirstlane_b32 s4, v6
v_readfirstlane_b32 s5, v7
s_mov_b32 s8, exec_lo
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b64 v[8:9], v5, s[2:3] offset:40
global_load_b128 v[0:3], v5, s[2:3]
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s6, v8
v_readfirstlane_b32 s7, v9
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[6:7], s[4:5], s[6:7]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_8
v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, 0
s_mul_i32 s8, s7, 24
s_mul_hi_u32 s9, s6, 24
v_dual_mov_b32 v8, 2 :: v_dual_mov_b32 v9, 1
s_add_i32 s9, s9, s8
s_mul_i32 s8, s6, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v10, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v11, vcc_lo, s9, v1, vcc_lo
global_store_b128 v[10:11], v[6:9], off offset:8
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s1
s_lshl_b64 s[8:9], s[6:7], 12
v_lshlrev_b64 v[4:5], 6, v[4:5]
s_waitcnt vmcnt(0)
v_add_co_u32 v2, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v3, vcc_lo
v_mov_b32_e32 v3, 0
s_mov_b32 s8, 0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, v2, v4
v_mov_b32_e32 v2, 33
s_mov_b32 s9, s8
s_mov_b32 s10, s8
s_mov_b32 s11, s8
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo
v_mov_b32_e32 v4, v3
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v8, s8
v_dual_mov_b32 v9, s9 :: v_dual_mov_b32 v10, s10
v_mov_b32_e32 v11, s11
s_clause 0x3
global_store_b128 v[6:7], v[2:5], off
global_store_b128 v[6:7], v[8:11], off offset:16
global_store_b128 v[6:7], v[8:11], off offset:32
global_store_b128 v[6:7], v[8:11], off offset:48
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_16
v_dual_mov_b32 v10, 0 :: v_dual_mov_b32 v11, s4
v_mov_b32_e32 v12, s5
s_clause 0x1
global_load_b64 v[13:14], v10, s[2:3] offset:32 glc
global_load_b64 v[2:3], v10, s[2:3] offset:40
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
v_readfirstlane_b32 s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[8:9], s[8:9], s[4:5]
s_mul_i32 s9, s9, 24
s_mul_hi_u32 s10, s8, 24
s_mul_i32 s8, s8, 24
s_add_i32 s10, s10, s9
v_add_co_u32 v8, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v9, vcc_lo, s10, v1, vcc_lo
s_mov_b32 s8, exec_lo
global_store_b64 v[8:9], v[13:14], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v10, v[11:14], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[4:5], v[13:14]
s_cbranch_execz .LBB0_12
s_mov_b32 s9, 0
.LBB0_11:
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_sleep 1
global_store_b64 v[8:9], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v10, v[2:5], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
s_or_b32 s9, vcc_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_11
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s8
v_mov_b32_e32 v2, 0
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v4, s9, 0
global_load_b64 v[2:3], v2, s[2:3] offset:16
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_14
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[2:3], v[4:5], off offset:8
.LBB0_14:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
global_load_b64 v[4:5], v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
s_cbranch_vccnz .LBB0_16
global_load_b32 v2, v[2:3], off offset:24
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
s_waitcnt_vscnt null, 0x0
global_store_b64 v[4:5], v[2:3], off
s_and_b32 m0, s8, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s7, 24
s_mul_hi_u32 s7, s6, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s1
s_mul_i32 s1, s6, 24
v_add_co_u32 v0, vcc_lo, v0, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_20
.p2align 6
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_19
s_sleep 1
s_cbranch_execnz .LBB0_20
s_branch .LBB0_22
.p2align 6
.LBB0_19:
s_branch .LBB0_22
.LBB0_20:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_17
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_17
.LBB0_22:
global_load_b64 v[22:23], v[6:7], off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_26
v_mov_b32_e32 v6, 0
s_clause 0x2
global_load_b64 v[2:3], v6, s[2:3] offset:40
global_load_b64 v[7:8], v6, s[2:3] offset:24 glc
global_load_b64 v[4:5], v6, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v9, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v9, s4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9
v_and_b32_e32 v3, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v2, v0, v2
v_mul_lo_u32 v3, v3, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v9, v2, 24
v_mul_lo_u32 v2, v2, 24
v_add_nc_u32_e32 v3, v9, v3
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, v4, v2
v_mov_b32_e32 v2, v7
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo
v_mov_b32_e32 v3, v8
global_store_b64 v[4:5], v[7:8], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_26
s_mov_b32 s0, 0
.LBB0_25:
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_25
.LBB0_26:
s_or_b32 exec_lo, exec_lo, s1
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, .str@rel32@lo+4
s_addc_u32 s5, s5, .str@rel32@hi+12
s_mov_b32 s0, -1
s_cmp_lg_u64 s[4:5], 0
s_cbranch_scc0 .LBB0_105
s_waitcnt vmcnt(0)
v_dual_mov_b32 v1, v23 :: v_dual_and_b32 v0, -3, v22
v_mov_b32_e32 v25, 0
s_mov_b64 s[6:7], 14
s_branch .LBB0_29
.LBB0_28:
s_or_b32 exec_lo, exec_lo, s1
s_sub_u32 s6, s6, s8
s_subb_u32 s7, s7, s9
s_add_u32 s4, s4, s8
s_addc_u32 s5, s5, s9
s_cmp_lg_u64 s[6:7], 0
s_cbranch_scc0 .LBB0_104
.LBB0_29:
v_cmp_lt_u64_e64 s0, s[6:7], 56
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 s0, s0, exec_lo
s_cselect_b32 s8, s6, 56
s_cselect_b32 s9, s7, 0
s_cmp_gt_u32 s8, 7
s_mov_b32 s0, -1
s_cbranch_scc1 .LBB0_34
v_mov_b32_e32 v2, 0
v_mov_b32_e32 v3, 0
s_cmp_eq_u32 s8, 0
s_cbranch_scc1 .LBB0_33
s_lshl_b64 s[0:1], s[8:9], 3
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], s[4:5]
.LBB0_32:
global_load_u8 v4, v25, s[12:13]
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v4
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[4:5], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
s_cmp_lg_u32 s0, s10
v_or_b32_e32 v2, v4, v2
v_or_b32_e32 v3, v5, v3
s_cbranch_scc1 .LBB0_32
.LBB0_33:
s_mov_b32 s0, 0
s_mov_b32 s15, 0
.LBB0_34:
s_and_not1_b32 vcc_lo, exec_lo, s0
s_mov_b64 s[0:1], s[4:5]
s_cbranch_vccnz .LBB0_36
global_load_b64 v[2:3], v25, s[4:5]
s_add_i32 s15, s8, -8
s_add_u32 s0, s4, 8
s_addc_u32 s1, s5, 0
.LBB0_36:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_41
v_mov_b32_e32 v4, 0
v_mov_b32_e32 v5, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_40
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_39:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v6, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v4, v6, v4
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v5, v7, v5
s_cbranch_scc1 .LBB0_39
.LBB0_40:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_42
s_branch .LBB0_43
.LBB0_41:
.LBB0_42:
global_load_b64 v[4:5], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_43:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_48
v_mov_b32_e32 v6, 0
v_mov_b32_e32 v7, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_47
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_46:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v8, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[8:9], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s14, s12
v_or_b32_e32 v6, v8, v6
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v7, v9, v7
s_cbranch_scc1 .LBB0_46
.LBB0_47:
s_mov_b32 s15, 0
s_cbranch_execz .LBB0_49
s_branch .LBB0_50
.LBB0_48:
.LBB0_49:
global_load_b64 v[6:7], v25, s[0:1]
s_add_i32 s15, s14, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_50:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_55
v_mov_b32_e32 v8, 0
v_mov_b32_e32 v9, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_54
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_53:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v10, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[10:11], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v8, v10, v8
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v9, v11, v9
s_cbranch_scc1 .LBB0_53
.LBB0_54:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_56
s_branch .LBB0_57
.LBB0_55:
.LBB0_56:
global_load_b64 v[8:9], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_57:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_62
v_mov_b32_e32 v10, 0
v_mov_b32_e32 v11, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_61
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_60:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v12, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[12:13], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s14, s12
v_or_b32_e32 v10, v12, v10
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v11, v13, v11
s_cbranch_scc1 .LBB0_60
.LBB0_61:
s_mov_b32 s15, 0
s_cbranch_execz .LBB0_63
s_branch .LBB0_64
.LBB0_62:
.LBB0_63:
global_load_b64 v[10:11], v25, s[0:1]
s_add_i32 s15, s14, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_64:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_69
v_mov_b32_e32 v12, 0
v_mov_b32_e32 v13, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_68
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_67:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v14, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[14:15], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v12, v14, v12
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v13, v15, v13
s_cbranch_scc1 .LBB0_67
.LBB0_68:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_70
s_branch .LBB0_71
.LBB0_69:
.LBB0_70:
global_load_b64 v[12:13], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_71:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_76
v_mov_b32_e32 v14, 0
v_mov_b32_e32 v15, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_75
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], s[0:1]
.LBB0_74:
global_load_u8 v16, v25, s[12:13]
s_add_i32 s14, s14, -1
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v16
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[16:17], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
s_cmp_lg_u32 s14, 0
v_or_b32_e32 v14, v16, v14
v_or_b32_e32 v15, v17, v15
s_cbranch_scc1 .LBB0_74
.LBB0_75:
s_cbranch_execz .LBB0_77
s_branch .LBB0_78
.LBB0_76:
.LBB0_77:
global_load_b64 v[14:15], v25, s[0:1]
.LBB0_78:
v_mov_b32_e32 v24, v20
v_mov_b32_e32 v26, 0
v_mov_b32_e32 v27, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s0, v24
v_cmp_eq_u32_e64 s0, s0, v24
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_84
global_load_b64 v[18:19], v25, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[16:17], v25, s[2:3] offset:40
global_load_b64 v[26:27], v25, s[2:3]
s_mov_b32 s10, exec_lo
s_waitcnt vmcnt(1)
v_and_b32_e32 v17, v17, v19
v_and_b32_e32 v16, v16, v18
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v17, v17, 24
v_mul_hi_u32 v21, v16, 24
v_mul_lo_u32 v16, v16, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v17, v21, v17
s_waitcnt vmcnt(0)
v_add_co_u32 v16, vcc_lo, v26, v16
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v17, vcc_lo, v27, v17, vcc_lo
global_load_b64 v[16:17], v[16:17], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[26:27], v[18:19]
s_cbranch_execz .LBB0_83
s_mov_b32 s11, 0
.p2align 6
.LBB0_81:
s_sleep 1
s_clause 0x1
global_load_b64 v[16:17], v25, s[2:3] offset:40
global_load_b64 v[28:29], v25, s[2:3]
v_dual_mov_b32 v18, v26 :: v_dual_mov_b32 v19, v27
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_and_b32_e32 v16, v16, v18
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[26:27], null, v16, 24, v[28:29]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v16, v27 :: v_dual_and_b32 v17, v17, v19
v_mad_u64_u32 v[27:28], null, v17, 24, v[16:17]
global_load_b64 v[16:17], v[26:27], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[26:27], v[18:19]
s_or_b32 s11, vcc_lo, s11
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s11
s_cbranch_execnz .LBB0_81
s_or_b32 exec_lo, exec_lo, s11
.LBB0_83:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s10
.LBB0_84:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
s_clause 0x1
global_load_b64 v[28:29], v25, s[2:3] offset:40
global_load_b128 v[16:19], v25, s[2:3]
v_readfirstlane_b32 s10, v26
v_readfirstlane_b32 s11, v27
s_mov_b32 s14, exec_lo
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s12, v28
v_readfirstlane_b32 s13, v29
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[12:13], s[10:11], s[12:13]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_86
v_dual_mov_b32 v26, s14 :: v_dual_mov_b32 v27, 0
s_mul_i32 s14, s13, 24
s_mul_hi_u32 s15, s12, 24
v_dual_mov_b32 v28, 2 :: v_dual_mov_b32 v29, 1
s_add_i32 s15, s15, s14
s_mul_i32 s14, s12, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v30, vcc_lo, v16, s14
v_add_co_ci_u32_e32 v31, vcc_lo, s15, v17, vcc_lo
global_store_b128 v[30:31], v[26:29], off offset:8
.LBB0_86:
s_or_b32 exec_lo, exec_lo, s1
v_cmp_gt_u64_e64 vcc_lo, s[6:7], 56
v_or_b32_e32 v21, 2, v0
s_lshl_b64 s[14:15], s[12:13], 12
v_lshlrev_b64 v[26:27], 6, v[24:25]
s_lshl_b32 s1, s8, 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_i32 s1, s1, 28
v_cndmask_b32_e32 v0, v21, v0, vcc_lo
s_waitcnt vmcnt(0)
v_add_co_u32 v18, vcc_lo, v18, s14
v_add_co_ci_u32_e32 v19, vcc_lo, s15, v19, vcc_lo
s_and_b32 s1, s1, 0x1e0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v18, vcc_lo, v18, v26
v_and_or_b32 v0, v0, 0xffffff1f, s1
v_add_co_ci_u32_e32 v19, vcc_lo, v19, v27, vcc_lo
s_clause 0x3
global_store_b128 v[18:19], v[0:3], off
global_store_b128 v[18:19], v[4:7], off offset:16
global_store_b128 v[18:19], v[8:11], off offset:32
global_store_b128 v[18:19], v[12:15], off offset:48
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_94
s_clause 0x1
global_load_b64 v[8:9], v25, s[2:3] offset:32 glc
global_load_b64 v[0:1], v25, s[2:3] offset:40
v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s14, v0
v_readfirstlane_b32 s15, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[14:15], s[14:15], s[10:11]
s_mul_i32 s15, s15, 24
s_mul_hi_u32 s16, s14, 24
s_mul_i32 s14, s14, 24
s_add_i32 s16, s16, s15
v_add_co_u32 v4, vcc_lo, v16, s14
v_add_co_ci_u32_e32 v5, vcc_lo, s16, v17, vcc_lo
s_mov_b32 s14, exec_lo
global_store_b64 v[4:5], v[8:9], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v25, v[6:9], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[2:3], v[8:9]
s_cbranch_execz .LBB0_90
s_mov_b32 s15, 0
.LBB0_89:
v_dual_mov_b32 v0, s10 :: v_dual_mov_b32 v1, s11
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[0:1], v25, v[0:3], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
s_or_b32 s15, vcc_lo, s15
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s15
s_cbranch_execnz .LBB0_89
.LBB0_90:
s_or_b32 exec_lo, exec_lo, s14
global_load_b64 v[0:1], v25, s[2:3] offset:16
s_mov_b32 s15, exec_lo
s_mov_b32 s14, exec_lo
v_mbcnt_lo_u32_b32 v2, s15, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v2
s_cbranch_execz .LBB0_92
s_bcnt1_i32_b32 s15, s15
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v2, s15
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[0:1], v[2:3], off offset:8
.LBB0_92:
s_or_b32 exec_lo, exec_lo, s14
s_waitcnt vmcnt(0)
global_load_b64 v[2:3], v[0:1], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
s_cbranch_vccnz .LBB0_94
global_load_b32 v24, v[0:1], off offset:24
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s14, v24
s_waitcnt_vscnt null, 0x0
global_store_b64 v[2:3], v[24:25], off
s_and_b32 m0, s14, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_94:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s13, 24
s_mul_hi_u32 s13, s12, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s13, s13, s1
s_mul_i32 s1, s12, 24
v_add_co_u32 v0, vcc_lo, v16, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s13, v17, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_98
.p2align 6
.LBB0_95:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_97
s_sleep 1
s_cbranch_execnz .LBB0_98
s_branch .LBB0_100
.p2align 6
.LBB0_97:
s_branch .LBB0_100
.LBB0_98:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_95
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_95
.LBB0_100:
global_load_b64 v[0:1], v[18:19], off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_28
s_clause 0x2
global_load_b64 v[4:5], v25, s[2:3] offset:40
global_load_b64 v[8:9], v25, s[2:3] offset:24 glc
global_load_b64 v[6:7], v25, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v10, vcc_lo, v4, 1
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v10, s10
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
v_dual_cndmask_b32 v3, v3, v11 :: v_dual_cndmask_b32 v2, v2, v10
v_and_b32_e32 v5, v3, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_and_b32_e32 v4, v2, v4
v_mul_hi_u32 v10, v4, 24
v_mul_lo_u32 v4, v4, 24
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_u32 v6, vcc_lo, v6, v4
v_mov_b32_e32 v4, v8
v_mul_lo_u32 v5, v5, 24
v_add_nc_u32_e32 v5, v10, v5
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo
v_mov_b32_e32 v5, v9
global_store_b64 v[6:7], v[8:9], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v25, v[2:5], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[8:9]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_28
s_mov_b32 s0, 0
.LBB0_103:
s_sleep 1
global_store_b64 v[6:7], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[8:9], v25, v[2:5], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[8:9], v[4:5]
v_dual_mov_b32 v4, v8 :: v_dual_mov_b32 v5, v9
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_103
s_branch .LBB0_28
.LBB0_104:
s_mov_b32 s0, 0
.LBB0_105:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s0
s_cbranch_vccz .LBB0_132
v_readfirstlane_b32 s0, v20
v_mov_b32_e32 v4, 0
v_mov_b32_e32 v5, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s0, v20
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_112
s_waitcnt vmcnt(0)
v_mov_b32_e32 v0, 0
s_mov_b32 s4, exec_lo
global_load_b64 v[6:7], v0, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[3:4], v0, s[2:3]
s_waitcnt vmcnt(1)
v_and_b32_e32 v1, v1, v6
v_and_b32_e32 v2, v2, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v5, v1, 24
v_mul_lo_u32 v2, v2, 24
v_mul_lo_u32 v1, v1, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v5, v2
s_waitcnt vmcnt(0)
v_add_co_u32 v1, vcc_lo, v3, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, v4, v2, vcc_lo
global_load_b64 v[4:5], v[1:2], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[4:5], v[6:7]
s_cbranch_execz .LBB0_111
s_mov_b32 s5, 0
.p2align 6
.LBB0_109:
s_sleep 1
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[8:9], v0, s[2:3]
v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_and_b32_e32 v1, v1, v6
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[3:4], null, v1, 24, v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v1, v4 :: v_dual_and_b32 v2, v2, v7
v_mad_u64_u32 v[4:5], null, v2, 24, v[1:2]
global_load_b64 v[4:5], v[3:4], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
s_or_b32 s5, vcc_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_109
s_or_b32 exec_lo, exec_lo, s5
.LBB0_111:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_112:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v21, 0
v_readfirstlane_b32 s4, v4
v_readfirstlane_b32 s5, v5
s_mov_b32 s8, exec_lo
s_clause 0x1
global_load_b64 v[6:7], v21, s[2:3] offset:40
global_load_b128 v[0:3], v21, s[2:3]
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s6, v6
v_readfirstlane_b32 s7, v7
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[6:7], s[4:5], s[6:7]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_114
v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, 0
s_mul_i32 s8, s7, 24
s_mul_hi_u32 s9, s6, 24
v_dual_mov_b32 v6, 2 :: v_dual_mov_b32 v7, 1
s_add_i32 s9, s9, s8
s_mul_i32 s8, s6, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v8, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v9, vcc_lo, s9, v1, vcc_lo
global_store_b128 v[8:9], v[4:7], off offset:8
.LBB0_114:
s_or_b32 exec_lo, exec_lo, s1
s_lshl_b64 s[8:9], s[6:7], 12
v_and_or_b32 v22, v22, 0xffffff1d, 34
s_waitcnt vmcnt(0)
v_add_co_u32 v4, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v3, vcc_lo
v_lshlrev_b64 v[2:3], 6, v[20:21]
s_mov_b32 s8, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mov_b32 s9, s8
s_mov_b32 s10, s8
s_mov_b32 s11, s8
v_add_co_u32 v8, vcc_lo, v4, v2
v_mov_b32_e32 v6, 0
v_add_co_ci_u32_e32 v9, vcc_lo, v5, v3, vcc_lo
v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v5, s11
v_dual_mov_b32 v3, s9 :: v_dual_mov_b32 v4, s10
s_delay_alu instid0(VALU_DEP_4)
v_mov_b32_e32 v7, v6
s_clause 0x4
global_store_b64 v[8:9], v[22:23], off
global_store_b128 v[8:9], v[2:5], off offset:8
global_store_b128 v[8:9], v[2:5], off offset:24
global_store_b128 v[8:9], v[2:5], off offset:40
global_store_b64 v[8:9], v[6:7], off offset:56
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_122
v_dual_mov_b32 v8, 0 :: v_dual_mov_b32 v9, s4
v_mov_b32_e32 v10, s5
s_clause 0x1
global_load_b64 v[11:12], v8, s[2:3] offset:32 glc
global_load_b64 v[2:3], v8, s[2:3] offset:40
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
v_readfirstlane_b32 s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[8:9], s[8:9], s[4:5]
s_mul_i32 s9, s9, 24
s_mul_hi_u32 s10, s8, 24
s_mul_i32 s8, s8, 24
s_add_i32 s10, s10, s9
v_add_co_u32 v6, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v7, vcc_lo, s10, v1, vcc_lo
s_mov_b32 s8, exec_lo
global_store_b64 v[6:7], v[11:12], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v8, v[9:12], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[4:5], v[11:12]
s_cbranch_execz .LBB0_118
s_mov_b32 s9, 0
.LBB0_117:
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_sleep 1
global_store_b64 v[6:7], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v8, v[2:5], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
s_or_b32 s9, vcc_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_117
.LBB0_118:
s_or_b32 exec_lo, exec_lo, s8
v_mov_b32_e32 v2, 0
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v4, s9, 0
global_load_b64 v[2:3], v2, s[2:3] offset:16
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_120
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[2:3], v[4:5], off offset:8
.LBB0_120:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
global_load_b64 v[4:5], v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
s_cbranch_vccnz .LBB0_122
global_load_b32 v2, v[2:3], off offset:24
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
s_waitcnt_vscnt null, 0x0
global_store_b64 v[4:5], v[2:3], off
s_and_b32 m0, s8, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_122:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s7, 24
s_mul_hi_u32 s7, s6, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s1
s_mul_i32 s1, s6, 24
v_add_co_u32 v0, vcc_lo, v0, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_126
.p2align 6
.LBB0_123:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_125
s_sleep 1
s_cbranch_execnz .LBB0_126
s_branch .LBB0_128
.p2align 6
.LBB0_125:
s_branch .LBB0_128
.LBB0_126:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_123
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_123
.LBB0_128:
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_132
v_mov_b32_e32 v6, 0
s_clause 0x2
global_load_b64 v[2:3], v6, s[2:3] offset:40
global_load_b64 v[7:8], v6, s[2:3] offset:24 glc
global_load_b64 v[4:5], v6, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v9, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v9, s4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9
v_and_b32_e32 v3, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v2, v0, v2
v_mul_lo_u32 v3, v3, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v9, v2, 24
v_mul_lo_u32 v2, v2, 24
v_add_nc_u32_e32 v3, v9, v3
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, v4, v2
v_mov_b32_e32 v2, v7
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo
v_mov_b32_e32 v3, v8
global_store_b64 v[4:5], v[7:8], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_132
s_mov_b32 s0, 0
.LBB0_131:
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_131
.LBB0_132:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10helloworldv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 256
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 32
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10helloworldv, .Lfunc_end0-_Z10helloworldv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type .str,@object
.section .rodata.str1.1,"aMS",@progbits,1
.str:
.asciz "Hello world!\n"
.size .str, 14
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: hidden_block_count_x
- .offset: 4
.size: 4
.value_kind: hidden_block_count_y
- .offset: 8
.size: 4
.value_kind: hidden_block_count_z
- .offset: 12
.size: 2
.value_kind: hidden_group_size_x
- .offset: 14
.size: 2
.value_kind: hidden_group_size_y
- .offset: 16
.size: 2
.value_kind: hidden_group_size_z
- .offset: 18
.size: 2
.value_kind: hidden_remainder_x
- .offset: 20
.size: 2
.value_kind: hidden_remainder_y
- .offset: 22
.size: 2
.value_kind: hidden_remainder_z
- .offset: 40
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 64
.size: 2
.value_kind: hidden_grid_dims
- .offset: 80
.size: 8
.value_kind: hidden_hostcall_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 256
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10helloworldv
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z10helloworldv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 32
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0014b04c_00000000-6_helloworld.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z10helloworldvv
.type _Z29__device_stub__Z10helloworldvv, @function
_Z29__device_stub__Z10helloworldvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z10helloworldv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z29__device_stub__Z10helloworldvv, .-_Z29__device_stub__Z10helloworldvv
.globl _Z10helloworldv
.type _Z10helloworldv, @function
_Z10helloworldv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z10helloworldvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10helloworldv, .-_Z10helloworldv
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call _Z29__device_stub__Z10helloworldvv
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10helloworldv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10helloworldv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "helloworld.hip"
.globl _Z25__device_stub__helloworldv # -- Begin function _Z25__device_stub__helloworldv
.p2align 4, 0x90
.type _Z25__device_stub__helloworldv,@function
_Z25__device_stub__helloworldv: # @_Z25__device_stub__helloworldv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z10helloworldv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z25__device_stub__helloworldv, .Lfunc_end0-_Z25__device_stub__helloworldv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z10helloworldv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10helloworldv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10helloworldv,@object # @_Z10helloworldv
.section .rodata,"a",@progbits
.globl _Z10helloworldv
.p2align 3, 0x0
_Z10helloworldv:
.quad _Z25__device_stub__helloworldv
.size _Z10helloworldv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10helloworldv"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__helloworldv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10helloworldv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* Small CUDA exercise to try to improve efficiency by using two
separate streams to set up a staged copying and execution (when
instead of one large copy, followed by one large kernel
computation, one does it in many small chunks, with copying and
computing done in parallel in two streams, after first chunk was
copied to the device).
For the purpose of this exercise, ignore the copying of the
results from the device to host at the end; only do the staged copy
and execute for the copying of the initial data to the device + the
kernel.
We use cudaMallocHost to allocate arrays on host in pinned memory,
which is both results in faster copying to/from GPU (compared to malloc),
and also a CUDA requirement for copying running concurrently with a kernel.
Make sure that the "Result:" value printed by the code is (almost)
identical in both original and modified versions of the code. If
not, you have a bug!
Hints: You will have to use the following CUDA functions:
- cudaStreamCreate
- cudaMemcpyAsync
- cudaStreamDestroy
- cudaDeviceSynchronize
* You will have to set up a for loop for multiple chunks copying and
kernel execution;
* Number of chunks should be a variable (or macro parameter); for
simplicity, make NMAX dividable by the number of chunks;
* In cudaMemcpyAsync the first two arguments should be "&d_A[ind],
&h_A[ind]", not "d_A, h_A", ind being the starting index for the
current chunk to copy;
* You'll have to pass two more arguments to the kernel -
ind and number of threads per chunk;
* Nblocks will be different - shoul be computed per chunk.
* You'll have to modify the kernel slightly;
At the end, you should get the timings (based on 10 runs, NMAX=1000000,
BLOCK_SIZE=128) similar to this:
NCHUNKS t, ms
- 2.76 - the original (non-staged) version of the code
1 2.72 - result is similar to the non-staged code
2 2.08 - even with only 2 chunks, we already see 33% speedup
4 1.79
5 1.75
10 1.72 - seems to be the best timing, 60% faster than the original code
20 1.87 - as NCHUNKS increases, the results get worse. Why?
100 3.53 - for too many NCHUNKS results can get even worse than in non-staged code
To compile:
nvcc -arch=sm_20 -O2 staged.cu -o staged
The best/average timings:
../best_time.sh ./staged
*/
#include <sys/time.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
// Number of times to run the test (for better timings accuracy):
#define NTESTS 100
// Number of threads in one block (possible range is 32...1024):
#define BLOCK_SIZE 128
// Total number of threads (total number of elements to process in the kernel):
#define NMAX 1000000
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
// It messes up with y!
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// The kernel:
__global__ void MyKernel (double *d_A, double *d_B)
{
double x, y, z;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= NMAX)
return;
// Some meaningless cpu-intensive computation:
x = pow(d_A[i], 2.71);
y = pow(d_A[i], 0.35);
z = 2*x + 5*y;
d_B[i] = x + y + z + x*y + x/y + y/z;
return;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
int main (int argc,char **argv)
{
struct timeval tdr0, tdr1, tdr, tdr01;
double restime, restime0, restime1;
int devid, devcount, error, Max_gridsize;
double *h_A, *h_B, *d_A, *d_B;
/* find number of device in current "context" */
cudaGetDevice(&devid);
/* find how many devices are available */
if (cudaGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
Max_gridsize = deviceProp.maxGridSize[0];
}
// Loop to run the timing test multiple times:
int kk;
for (kk=0; kk<NTESTS; kk++)
{
// Using cudaMallocHost (intead of malloc) to accelerate data copying:
// Initial data array on host:
if (error = cudaMallocHost (&h_A, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
// Results array on host:
if (error = cudaMallocHost (&h_B, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
// ALlocating arrays on GPU:
if (error = cudaMalloc (&d_A, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = cudaMalloc (&d_B, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
// Initializing the input array:
for (int i=0; i<NMAX; i++)
h_A[i] = (double)rand()/(double)RAND_MAX;
// Number of blocks of threads:
int Nblocks = (NMAX+BLOCK_SIZE-1) / BLOCK_SIZE;
if (Nblocks > Max_gridsize)
{
printf ("Nblocks > Max_gridsize! %d %d\n", Nblocks, Max_gridsize);
exit (1);
}
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr0, NULL);
//--------------------------------------------------------------------------------
// Copying the data to device (we time it):
if (error = cudaMemcpy (d_A, h_A, NMAX*sizeof(double), cudaMemcpyHostToDevice))
{
printf ("Error %d\n", error);
exit (error);
}
// Intermediate timing, to measure timings separately for copying and kernel execution
// (Should be removed in the solution code)
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr01, NULL);
// The kernel call:
MyKernel <<<Nblocks, BLOCK_SIZE>>> (d_A, d_B);
//--------------------------------------------------------------------------------
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr1, NULL);
tdr = tdr0;
timeval_subtract (&restime, &tdr1, &tdr);
// Copying the result back to host (we don't time it):
if (error = cudaMemcpy (h_B, d_B, NMAX*sizeof(double), cudaMemcpyDeviceToHost))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
// Adding up the results, for accuracy/correctness testing:
double result = 0.0;
for (int i=0; i<NMAX; i++)
{
result += h_B[i];
}
tdr = tdr0;
timeval_subtract (&restime0, &tdr01, &tdr);
tdr = tdr01;
timeval_subtract (&restime1, &tdr1, &tdr);
printf ("Individual timings: %e %e\n", restime0, restime1);
printf ("Result: %e\n\n", result);
printf ("Time: %e\n", restime);
cudaFreeHost (h_A);
cudaFreeHost (h_B);
cudaFree (d_A);
cudaFree (d_B);
} // kk loop
return 0;
} | .file "tmpxft_000f5ee8_00000000-6_staged.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2074:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2074:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z16timeval_subtractPdP7timevalS1_
.type _Z16timeval_subtractPdP7timevalS1_, @function
_Z16timeval_subtractPdP7timevalS1_:
.LFB2070:
.cfi_startproc
endbr64
movq %rdx, %rcx
movq 8(%rsi), %rax
movq 8(%rdx), %r8
cmpq %r8, %rax
jge .L4
movq %r8, %r9
subq %rax, %r9
movabsq $4835703278458516699, %rdx
movq %r9, %rax
imulq %rdx
sarq $18, %rdx
sarq $63, %r9
subq %r9, %rdx
addl $1, %edx
imull $1000000, %edx, %eax
cltq
subq %rax, %r8
movq %r8, 8(%rcx)
movslq %edx, %rdx
addq %rdx, (%rcx)
.L4:
movq 8(%rsi), %rax
movq 8(%rcx), %r8
movq %rax, %rdx
subq %r8, %rdx
cmpq $1000000, %rdx
jle .L5
movq %r8, %r9
subq %rax, %r9
movabsq $4835703278458516699, %rdx
movq %r9, %rax
imulq %rdx
sarq $18, %rdx
sarq $63, %r9
subq %r9, %rdx
imull $1000000, %edx, %eax
cltq
addq %r8, %rax
movq %rax, 8(%rcx)
movslq %edx, %rdx
subq %rdx, (%rcx)
.L5:
movq 8(%rsi), %rax
subq 8(%rcx), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC0(%rip), %xmm0
movq (%rsi), %rax
subq (%rcx), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
addsd %xmm1, %xmm0
movsd %xmm0, (%rdi)
movq (%rcx), %rax
cmpq %rax, (%rsi)
setl %al
movzbl %al, %eax
ret
.cfi_endproc
.LFE2070:
.size _Z16timeval_subtractPdP7timevalS1_, .-_Z16timeval_subtractPdP7timevalS1_
.globl _Z29__device_stub__Z8MyKernelPdS_PdS_
.type _Z29__device_stub__Z8MyKernelPdS_PdS_, @function
_Z29__device_stub__Z8MyKernelPdS_PdS_:
.LFB2096:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L10
.L6:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L11
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z8MyKernelPdS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L6
.L11:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2096:
.size _Z29__device_stub__Z8MyKernelPdS_PdS_, .-_Z29__device_stub__Z8MyKernelPdS_PdS_
.globl _Z8MyKernelPdS_
.type _Z8MyKernelPdS_, @function
_Z8MyKernelPdS_:
.LFB2097:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z8MyKernelPdS_PdS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2097:
.size _Z8MyKernelPdS_, .-_Z8MyKernelPdS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "Device count, devid: %d %d\n"
.LC3:
.string "Device: %s\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n"
.section .rodata.str1.1
.LC5:
.string "No CUDA devices!\n"
.LC6:
.string "Error %d\n"
.section .rodata.str1.8
.align 8
.LC8:
.string "Nblocks > Max_gridsize! %d %d\n"
.section .rodata.str1.1
.LC9:
.string "Individual timings: %e %e\n"
.LC10:
.string "Result: %e\n\n"
.LC11:
.string "Time: %e\n"
.text
.globl main
.type main, @function
main:
.LFB2071:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1224, %rsp
.cfi_def_cfa_offset 1280
movq %fs:40, %rax
movq %rax, 1208(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rdi
call cudaGetDevice@PLT
leaq 28(%rsp), %rdi
call cudaGetDeviceCount@PLT
testl %eax, %eax
jne .L15
cmpl $0, 28(%rsp)
je .L15
leaq 176(%rsp), %rbx
movl 24(%rsp), %esi
movq %rbx, %rdi
call cudaGetDeviceProperties_v2@PLT
movl 24(%rsp), %ecx
movl 28(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 540(%rsp), %ecx
movl 536(%rsp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 512(%rsp), %r12d
movl $100, %ebx
leaq .LC9(%rip), %r14
leaq .LC10(%rip), %r13
leaq .LC11(%rip), %r15
jmp .L16
.L15:
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L40:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L41:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L42:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L43:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L44:
movl %r12d, %ecx
movl $7813, %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L45:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L46:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L47:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L25:
call cudaDeviceSynchronize@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L36
leaq 128(%rsp), %rbp
movl $0, %esi
movq %rbp, %rdi
call gettimeofday@PLT
movdqa 112(%rsp), %xmm3
movaps %xmm3, 144(%rsp)
leaq 144(%rsp), %rdx
leaq 32(%rsp), %rdi
movq %rbp, %rsi
call _Z16timeval_subtractPdP7timevalS1_
movl $2, %ecx
movl $8000000, %edx
movq 80(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L37
call cudaDeviceSynchronize@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L38
movq 64(%rsp), %rax
leaq 8000000(%rax), %rdx
movq $0x000000000, 8(%rsp)
.L29:
movsd 8(%rsp), %xmm2
addsd (%rax), %xmm2
movsd %xmm2, 8(%rsp)
addq $8, %rax
cmpq %rdx, %rax
jne .L29
movdqa 112(%rsp), %xmm4
movaps %xmm4, 144(%rsp)
leaq 144(%rsp), %rbp
leaq 160(%rsp), %rsi
leaq 40(%rsp), %rdi
movq %rbp, %rdx
call _Z16timeval_subtractPdP7timevalS1_
movdqa 160(%rsp), %xmm5
movaps %xmm5, 144(%rsp)
leaq 128(%rsp), %rsi
leaq 48(%rsp), %rdi
movq %rbp, %rdx
call _Z16timeval_subtractPdP7timevalS1_
movsd 48(%rsp), %xmm1
movsd 40(%rsp), %xmm0
movq %r14, %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movsd 8(%rsp), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movsd 32(%rsp), %xmm0
movq %r15, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 56(%rsp), %rdi
call cudaFreeHost@PLT
movq 64(%rsp), %rdi
call cudaFreeHost@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
subl $1, %ebx
je .L39
.L16:
leaq 56(%rsp), %rdi
movl $0, %edx
movl $8000000, %esi
call cudaHostAlloc@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L40
leaq 64(%rsp), %rdi
movl $0, %edx
movl $8000000, %esi
call cudaHostAlloc@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L41
leaq 72(%rsp), %rdi
movl $8000000, %esi
call cudaMalloc@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L42
leaq 80(%rsp), %rdi
movl $8000000, %esi
call cudaMalloc@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L43
movl $0, %ebp
.L20:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
divsd .LC7(%rip), %xmm0
movq 56(%rsp), %rax
movsd %xmm0, (%rax,%rbp)
addq $8, %rbp
cmpq $8000000, %rbp
jne .L20
cmpl $7812, %r12d
jle .L44
call cudaDeviceSynchronize@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L45
leaq 112(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
movl $1, %ecx
movl $8000000, %edx
movq 56(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L46
call cudaDeviceSynchronize@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L47
leaq 160(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
movl $128, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $7813, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 100(%rsp), %rdx
movl $1, %ecx
movq 88(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L25
movq 80(%rsp), %rsi
movq 72(%rsp), %rdi
call _Z29__device_stub__Z8MyKernelPdS_PdS_
jmp .L25
.L36:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L37:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L38:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L39:
movq 1208(%rsp), %rax
subq %fs:40, %rax
jne .L48
movl $0, %eax
addq $1224, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2071:
.size main, .-main
.section .rodata.str1.1
.LC12:
.string "_Z8MyKernelPdS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2099:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z8MyKernelPdS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2099:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1093567616
.align 8
.LC7:
.long -4194304
.long 1105199103
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* Small CUDA exercise to try to improve efficiency by using two
separate streams to set up a staged copying and execution (when
instead of one large copy, followed by one large kernel
computation, one does it in many small chunks, with copying and
computing done in parallel in two streams, after first chunk was
copied to the device).
For the purpose of this exercise, ignore the copying of the
results from the device to host at the end; only do the staged copy
and execute for the copying of the initial data to the device + the
kernel.
We use cudaMallocHost to allocate arrays on host in pinned memory,
which is both results in faster copying to/from GPU (compared to malloc),
and also a CUDA requirement for copying running concurrently with a kernel.
Make sure that the "Result:" value printed by the code is (almost)
identical in both original and modified versions of the code. If
not, you have a bug!
Hints: You will have to use the following CUDA functions:
- cudaStreamCreate
- cudaMemcpyAsync
- cudaStreamDestroy
- cudaDeviceSynchronize
* You will have to set up a for loop for multiple chunks copying and
kernel execution;
* Number of chunks should be a variable (or macro parameter); for
simplicity, make NMAX dividable by the number of chunks;
* In cudaMemcpyAsync the first two arguments should be "&d_A[ind],
&h_A[ind]", not "d_A, h_A", ind being the starting index for the
current chunk to copy;
* You'll have to pass two more arguments to the kernel -
ind and number of threads per chunk;
* Nblocks will be different - shoul be computed per chunk.
* You'll have to modify the kernel slightly;
At the end, you should get the timings (based on 10 runs, NMAX=1000000,
BLOCK_SIZE=128) similar to this:
NCHUNKS t, ms
- 2.76 - the original (non-staged) version of the code
1 2.72 - result is similar to the non-staged code
2 2.08 - even with only 2 chunks, we already see 33% speedup
4 1.79
5 1.75
10 1.72 - seems to be the best timing, 60% faster than the original code
20 1.87 - as NCHUNKS increases, the results get worse. Why?
100 3.53 - for too many NCHUNKS results can get even worse than in non-staged code
To compile:
nvcc -arch=sm_20 -O2 staged.cu -o staged
The best/average timings:
../best_time.sh ./staged
*/
#include <sys/time.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
// Number of times to run the test (for better timings accuracy):
#define NTESTS 100
// Number of threads in one block (possible range is 32...1024):
#define BLOCK_SIZE 128
// Total number of threads (total number of elements to process in the kernel):
#define NMAX 1000000
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
// It messes up with y!
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// The kernel:
__global__ void MyKernel (double *d_A, double *d_B)
{
double x, y, z;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= NMAX)
return;
// Some meaningless cpu-intensive computation:
x = pow(d_A[i], 2.71);
y = pow(d_A[i], 0.35);
z = 2*x + 5*y;
d_B[i] = x + y + z + x*y + x/y + y/z;
return;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
int main (int argc,char **argv)
{
struct timeval tdr0, tdr1, tdr, tdr01;
double restime, restime0, restime1;
int devid, devcount, error, Max_gridsize;
double *h_A, *h_B, *d_A, *d_B;
/* find number of device in current "context" */
cudaGetDevice(&devid);
/* find how many devices are available */
if (cudaGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
Max_gridsize = deviceProp.maxGridSize[0];
}
// Loop to run the timing test multiple times:
int kk;
for (kk=0; kk<NTESTS; kk++)
{
// Using cudaMallocHost (intead of malloc) to accelerate data copying:
// Initial data array on host:
if (error = cudaMallocHost (&h_A, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
// Results array on host:
if (error = cudaMallocHost (&h_B, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
// ALlocating arrays on GPU:
if (error = cudaMalloc (&d_A, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = cudaMalloc (&d_B, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
// Initializing the input array:
for (int i=0; i<NMAX; i++)
h_A[i] = (double)rand()/(double)RAND_MAX;
// Number of blocks of threads:
int Nblocks = (NMAX+BLOCK_SIZE-1) / BLOCK_SIZE;
if (Nblocks > Max_gridsize)
{
printf ("Nblocks > Max_gridsize! %d %d\n", Nblocks, Max_gridsize);
exit (1);
}
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr0, NULL);
//--------------------------------------------------------------------------------
// Copying the data to device (we time it):
if (error = cudaMemcpy (d_A, h_A, NMAX*sizeof(double), cudaMemcpyHostToDevice))
{
printf ("Error %d\n", error);
exit (error);
}
// Intermediate timing, to measure timings separately for copying and kernel execution
// (Should be removed in the solution code)
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr01, NULL);
// The kernel call:
MyKernel <<<Nblocks, BLOCK_SIZE>>> (d_A, d_B);
//--------------------------------------------------------------------------------
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr1, NULL);
tdr = tdr0;
timeval_subtract (&restime, &tdr1, &tdr);
// Copying the result back to host (we don't time it):
if (error = cudaMemcpy (h_B, d_B, NMAX*sizeof(double), cudaMemcpyDeviceToHost))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
// Adding up the results, for accuracy/correctness testing:
double result = 0.0;
for (int i=0; i<NMAX; i++)
{
result += h_B[i];
}
tdr = tdr0;
timeval_subtract (&restime0, &tdr01, &tdr);
tdr = tdr01;
timeval_subtract (&restime1, &tdr1, &tdr);
printf ("Individual timings: %e %e\n", restime0, restime1);
printf ("Result: %e\n\n", result);
printf ("Time: %e\n", restime);
cudaFreeHost (h_A);
cudaFreeHost (h_B);
cudaFree (d_A);
cudaFree (d_B);
} // kk loop
return 0;
} | /* Small CUDA exercise to try to improve efficiency by using two
separate streams to set up a staged copying and execution (when
instead of one large copy, followed by one large kernel
computation, one does it in many small chunks, with copying and
computing done in parallel in two streams, after first chunk was
copied to the device).
For the purpose of this exercise, ignore the copying of the
results from the device to host at the end; only do the staged copy
and execute for the copying of the initial data to the device + the
kernel.
We use cudaMallocHost to allocate arrays on host in pinned memory,
which is both results in faster copying to/from GPU (compared to malloc),
and also a CUDA requirement for copying running concurrently with a kernel.
Make sure that the "Result:" value printed by the code is (almost)
identical in both original and modified versions of the code. If
not, you have a bug!
Hints: You will have to use the following CUDA functions:
- cudaStreamCreate
- cudaMemcpyAsync
- cudaStreamDestroy
- cudaDeviceSynchronize
* You will have to set up a for loop for multiple chunks copying and
kernel execution;
* Number of chunks should be a variable (or macro parameter); for
simplicity, make NMAX dividable by the number of chunks;
* In cudaMemcpyAsync the first two arguments should be "&d_A[ind],
&h_A[ind]", not "d_A, h_A", ind being the starting index for the
current chunk to copy;
* You'll have to pass two more arguments to the kernel -
ind and number of threads per chunk;
* Nblocks will be different - shoul be computed per chunk.
* You'll have to modify the kernel slightly;
At the end, you should get the timings (based on 10 runs, NMAX=1000000,
BLOCK_SIZE=128) similar to this:
NCHUNKS t, ms
- 2.76 - the original (non-staged) version of the code
1 2.72 - result is similar to the non-staged code
2 2.08 - even with only 2 chunks, we already see 33% speedup
4 1.79
5 1.75
10 1.72 - seems to be the best timing, 60% faster than the original code
20 1.87 - as NCHUNKS increases, the results get worse. Why?
100 3.53 - for too many NCHUNKS results can get even worse than in non-staged code
To compile:
nvcc -arch=sm_20 -O2 staged.cu -o staged
The best/average timings:
../best_time.sh ./staged
*/
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
// Number of times to run the test (for better timings accuracy):
#define NTESTS 100
// Number of threads in one block (possible range is 32...1024):
#define BLOCK_SIZE 128
// Total number of threads (total number of elements to process in the kernel):
#define NMAX 1000000
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
// It messes up with y!
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// The kernel:
__global__ void MyKernel (double *d_A, double *d_B)
{
double x, y, z;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= NMAX)
return;
// Some meaningless cpu-intensive computation:
x = pow(d_A[i], 2.71);
y = pow(d_A[i], 0.35);
z = 2*x + 5*y;
d_B[i] = x + y + z + x*y + x/y + y/z;
return;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
int main (int argc,char **argv)
{
struct timeval tdr0, tdr1, tdr, tdr01;
double restime, restime0, restime1;
int devid, devcount, error, Max_gridsize;
double *h_A, *h_B, *d_A, *d_B;
/* find number of device in current "context" */
hipGetDevice(&devid);
/* find how many devices are available */
if (hipGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
Max_gridsize = deviceProp.maxGridSize[0];
}
// Loop to run the timing test multiple times:
int kk;
for (kk=0; kk<NTESTS; kk++)
{
// Using cudaMallocHost (intead of malloc) to accelerate data copying:
// Initial data array on host:
if (error = hipHostMalloc (&h_A, NMAX*sizeof(double), hipHostMallocDefault))
{
printf ("Error %d\n", error);
exit (error);
}
// Results array on host:
if (error = hipHostMalloc (&h_B, NMAX*sizeof(double), hipHostMallocDefault))
{
printf ("Error %d\n", error);
exit (error);
}
// ALlocating arrays on GPU:
if (error = hipMalloc (&d_A, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = hipMalloc (&d_B, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
// Initializing the input array:
for (int i=0; i<NMAX; i++)
h_A[i] = (double)rand()/(double)RAND_MAX;
// Number of blocks of threads:
int Nblocks = (NMAX+BLOCK_SIZE-1) / BLOCK_SIZE;
if (Nblocks > Max_gridsize)
{
printf ("Nblocks > Max_gridsize! %d %d\n", Nblocks, Max_gridsize);
exit (1);
}
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr0, NULL);
//--------------------------------------------------------------------------------
// Copying the data to device (we time it):
if (error = hipMemcpy (d_A, h_A, NMAX*sizeof(double), hipMemcpyHostToDevice))
{
printf ("Error %d\n", error);
exit (error);
}
// Intermediate timing, to measure timings separately for copying and kernel execution
// (Should be removed in the solution code)
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr01, NULL);
// The kernel call:
MyKernel <<<Nblocks, BLOCK_SIZE>>> (d_A, d_B);
//--------------------------------------------------------------------------------
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr1, NULL);
tdr = tdr0;
timeval_subtract (&restime, &tdr1, &tdr);
// Copying the result back to host (we don't time it):
if (error = hipMemcpy (h_B, d_B, NMAX*sizeof(double), hipMemcpyDeviceToHost))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
// Adding up the results, for accuracy/correctness testing:
double result = 0.0;
for (int i=0; i<NMAX; i++)
{
result += h_B[i];
}
tdr = tdr0;
timeval_subtract (&restime0, &tdr01, &tdr);
tdr = tdr01;
timeval_subtract (&restime1, &tdr1, &tdr);
printf ("Individual timings: %e %e\n", restime0, restime1);
printf ("Result: %e\n\n", result);
printf ("Time: %e\n", restime);
hipHostFree (h_A);
hipHostFree (h_B);
hipFree (d_A);
hipFree (d_B);
} // kk loop
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* Small CUDA exercise to try to improve efficiency by using two
separate streams to set up a staged copying and execution (when
instead of one large copy, followed by one large kernel
computation, one does it in many small chunks, with copying and
computing done in parallel in two streams, after first chunk was
copied to the device).
For the purpose of this exercise, ignore the copying of the
results from the device to host at the end; only do the staged copy
and execute for the copying of the initial data to the device + the
kernel.
We use cudaMallocHost to allocate arrays on host in pinned memory,
which is both results in faster copying to/from GPU (compared to malloc),
and also a CUDA requirement for copying running concurrently with a kernel.
Make sure that the "Result:" value printed by the code is (almost)
identical in both original and modified versions of the code. If
not, you have a bug!
Hints: You will have to use the following CUDA functions:
- cudaStreamCreate
- cudaMemcpyAsync
- cudaStreamDestroy
- cudaDeviceSynchronize
* You will have to set up a for loop for multiple chunks copying and
kernel execution;
* Number of chunks should be a variable (or macro parameter); for
simplicity, make NMAX dividable by the number of chunks;
* In cudaMemcpyAsync the first two arguments should be "&d_A[ind],
&h_A[ind]", not "d_A, h_A", ind being the starting index for the
current chunk to copy;
* You'll have to pass two more arguments to the kernel -
ind and number of threads per chunk;
* Nblocks will be different - shoul be computed per chunk.
* You'll have to modify the kernel slightly;
At the end, you should get the timings (based on 10 runs, NMAX=1000000,
BLOCK_SIZE=128) similar to this:
NCHUNKS t, ms
- 2.76 - the original (non-staged) version of the code
1 2.72 - result is similar to the non-staged code
2 2.08 - even with only 2 chunks, we already see 33% speedup
4 1.79
5 1.75
10 1.72 - seems to be the best timing, 60% faster than the original code
20 1.87 - as NCHUNKS increases, the results get worse. Why?
100 3.53 - for too many NCHUNKS results can get even worse than in non-staged code
To compile:
nvcc -arch=sm_20 -O2 staged.cu -o staged
The best/average timings:
../best_time.sh ./staged
*/
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
// Number of times to run the test (for better timings accuracy):
#define NTESTS 100
// Number of threads in one block (possible range is 32...1024):
#define BLOCK_SIZE 128
// Total number of threads (total number of elements to process in the kernel):
#define NMAX 1000000
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
// It messes up with y!
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// The kernel:
__global__ void MyKernel (double *d_A, double *d_B)
{
double x, y, z;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= NMAX)
return;
// Some meaningless cpu-intensive computation:
x = pow(d_A[i], 2.71);
y = pow(d_A[i], 0.35);
z = 2*x + 5*y;
d_B[i] = x + y + z + x*y + x/y + y/z;
return;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
int main (int argc,char **argv)
{
struct timeval tdr0, tdr1, tdr, tdr01;
double restime, restime0, restime1;
int devid, devcount, error, Max_gridsize;
double *h_A, *h_B, *d_A, *d_B;
/* find number of device in current "context" */
hipGetDevice(&devid);
/* find how many devices are available */
if (hipGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
Max_gridsize = deviceProp.maxGridSize[0];
}
// Loop to run the timing test multiple times:
int kk;
for (kk=0; kk<NTESTS; kk++)
{
// Using cudaMallocHost (intead of malloc) to accelerate data copying:
// Initial data array on host:
if (error = hipHostMalloc (&h_A, NMAX*sizeof(double), hipHostMallocDefault))
{
printf ("Error %d\n", error);
exit (error);
}
// Results array on host:
if (error = hipHostMalloc (&h_B, NMAX*sizeof(double), hipHostMallocDefault))
{
printf ("Error %d\n", error);
exit (error);
}
// ALlocating arrays on GPU:
if (error = hipMalloc (&d_A, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = hipMalloc (&d_B, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
// Initializing the input array:
for (int i=0; i<NMAX; i++)
h_A[i] = (double)rand()/(double)RAND_MAX;
// Number of blocks of threads:
int Nblocks = (NMAX+BLOCK_SIZE-1) / BLOCK_SIZE;
if (Nblocks > Max_gridsize)
{
printf ("Nblocks > Max_gridsize! %d %d\n", Nblocks, Max_gridsize);
exit (1);
}
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr0, NULL);
//--------------------------------------------------------------------------------
// Copying the data to device (we time it):
if (error = hipMemcpy (d_A, h_A, NMAX*sizeof(double), hipMemcpyHostToDevice))
{
printf ("Error %d\n", error);
exit (error);
}
// Intermediate timing, to measure timings separately for copying and kernel execution
// (Should be removed in the solution code)
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr01, NULL);
// The kernel call:
MyKernel <<<Nblocks, BLOCK_SIZE>>> (d_A, d_B);
//--------------------------------------------------------------------------------
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr1, NULL);
tdr = tdr0;
timeval_subtract (&restime, &tdr1, &tdr);
// Copying the result back to host (we don't time it):
if (error = hipMemcpy (h_B, d_B, NMAX*sizeof(double), hipMemcpyDeviceToHost))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
// Adding up the results, for accuracy/correctness testing:
double result = 0.0;
for (int i=0; i<NMAX; i++)
{
result += h_B[i];
}
tdr = tdr0;
timeval_subtract (&restime0, &tdr01, &tdr);
tdr = tdr01;
timeval_subtract (&restime1, &tdr1, &tdr);
printf ("Individual timings: %e %e\n", restime0, restime1);
printf ("Result: %e\n\n", result);
printf ("Time: %e\n", restime);
hipHostFree (h_A);
hipHostFree (h_B);
hipFree (d_A);
hipFree (d_B);
} // kk loop
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8MyKernelPdS_
.globl _Z8MyKernelPdS_
.p2align 8
.type _Z8MyKernelPdS_,@function
_Z8MyKernelPdS_:
s_load_b32 s2, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e32 0xf4240, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s1, 0x3fe55555
s_mov_b32 s0, 0x55555555
s_mov_b32 s3, 0x3fba6564
s_mov_b32 s2, 0x968915a9
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
s_mov_b32 s5, 0x3fbdee67
s_mov_b32 s4, 0x4222de17
global_load_b64 v[2:3], v[2:3], off
s_waitcnt vmcnt(0)
v_frexp_mant_f64_e64 v[4:5], |v[2:3]|
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_gt_f64_e32 vcc_lo, s[0:1], v[4:5]
v_cndmask_b32_e64 v6, 0, 1, vcc_lo
v_ldexp_f64 v[4:5], v[4:5], v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_f64 v[6:7], v[4:5], 1.0
v_add_f64 v[12:13], v[4:5], -1.0
v_rcp_f64_e32 v[8:9], v[6:7]
v_add_f64 v[14:15], v[6:7], -1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_f64 v[4:5], v[4:5], -v[14:15]
s_waitcnt_depctr 0xfff
v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
v_fma_f64 v[8:9], v[10:11], v[8:9], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
v_fma_f64 v[8:9], v[10:11], v[8:9], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[10:11], v[12:13], v[8:9]
v_mul_f64 v[16:17], v[6:7], v[10:11]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[6:7], v[10:11], v[6:7], -v[16:17]
v_fma_f64 v[4:5], v[10:11], v[4:5], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[6:7], v[16:17], v[4:5]
v_add_f64 v[14:15], v[12:13], -v[6:7]
v_add_f64 v[16:17], v[6:7], -v[16:17]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[12:13], v[12:13], -v[14:15]
v_add_f64 v[4:5], v[16:17], -v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[6:7], v[12:13], -v[6:7]
v_add_f64 v[4:5], v[4:5], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[4:5], v[14:15], v[4:5]
v_mul_f64 v[4:5], v[8:9], v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[6:7], v[10:11], v[4:5]
v_add_f64 v[8:9], v[6:7], -v[10:11]
v_mul_f64 v[10:11], v[6:7], v[6:7]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[4:5], v[4:5], -v[8:9]
v_fma_f64 v[8:9], v[6:7], v[6:7], -v[10:11]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[12:13], v[4:5], v[4:5]
v_fma_f64 v[8:9], v[6:7], v[12:13], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[12:13], v[10:11], v[8:9]
v_fma_f64 v[14:15], v[12:13], s[4:5], s[2:3]
s_mov_b32 s3, 0x3fbe25e4
s_mov_b32 s2, 0x3abe935a
v_add_f64 v[10:11], v[12:13], -v[10:11]
v_mul_f64 v[20:21], v[6:7], v[12:13]
s_mov_b32 s5, 0x3ff71547
s_mov_b32 s4, 0x652b82fe
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_fma_f64 v[14:15], v[12:13], v[14:15], s[2:3]
s_mov_b32 s3, 0x3fc110ef
s_mov_b32 s2, 0x47e6c9c2
v_add_f64 v[8:9], v[8:9], -v[10:11]
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[14:15], v[12:13], v[14:15], s[2:3]
s_mov_b32 s3, 0x3fc3b13b
s_mov_b32 s2, 0xcfa74449
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[14:15], v[12:13], v[14:15], s[2:3]
s_mov_b32 s3, 0x3fc745d1
s_mov_b32 s2, 0x71bf3c30
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[14:15], v[12:13], v[14:15], s[2:3]
s_mov_b32 s3, 0x3fcc71c7
s_mov_b32 s2, 0x1c7792ce
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[14:15], v[12:13], v[14:15], s[2:3]
s_mov_b32 s3, 0x3fd24924
s_mov_b32 s2, 0x924920da
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[14:15], v[12:13], v[14:15], s[2:3]
s_mov_b32 s3, 0x3fd99999
s_mov_b32 s2, 0x9999999c
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[14:15], v[12:13], v[14:15], s[2:3]
s_mov_b32 s3, 0x3c7abc9e
s_mov_b32 s2, 0x3b39803f
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[16:17], v[12:13], v[14:15]
v_fma_f64 v[10:11], v[12:13], v[14:15], -v[16:17]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[10:11], v[8:9], v[14:15], v[10:11]
v_add_f64 v[14:15], v[16:17], v[10:11]
s_delay_alu instid0(VALU_DEP_1)
v_add_f64 v[18:19], v[14:15], s[0:1]
v_add_f64 v[16:17], v[14:15], -v[16:17]
s_mov_b32 s1, 0xbfe55555
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_add_f64 v[22:23], v[18:19], s[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
v_add_f64 v[10:11], v[10:11], -v[16:17]
v_fma_f64 v[16:17], v[12:13], v[6:7], -v[20:21]
s_mov_b32 s1, 0x3c8543b0
s_mov_b32 s0, 0xd5df274d
v_add_f64 v[14:15], v[14:15], -v[22:23]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[10:11], v[10:11], s[0:1]
v_fma_f64 v[12:13], v[12:13], v[4:5], v[16:17]
s_mov_b32 s1, 0x3fe62e42
s_mov_b32 s0, 0xfefa39ef
v_ldexp_f64 v[4:5], v[4:5], 1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[10:11], v[10:11], v[14:15]
v_fma_f64 v[8:9], v[8:9], v[6:7], v[12:13]
v_ldexp_f64 v[6:7], v[6:7], 1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[12:13], v[18:19], v[10:11]
v_add_f64 v[14:15], v[20:21], v[8:9]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[16:17], v[18:19], -v[12:13]
v_mul_f64 v[18:19], v[14:15], v[12:13]
v_add_f64 v[20:21], v[14:15], -v[20:21]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[10:11], v[10:11], v[16:17]
v_fma_f64 v[16:17], v[14:15], v[12:13], -v[18:19]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[8:9], v[8:9], -v[20:21]
v_fma_f64 v[10:11], v[14:15], v[10:11], v[16:17]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fma_f64 v[8:9], v[8:9], v[12:13], v[10:11]
v_frexp_exp_i32_f64_e32 v12, v[2:3]
v_add_f64 v[10:11], v[18:19], v[8:9]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_co_ci_u32_e32 v12, vcc_lo, 0, v12, vcc_lo
v_cmp_eq_f64_e32 vcc_lo, 1.0, v[2:3]
v_cvt_f64_i32_e32 v[12:13], v12
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_f64 v[14:15], v[6:7], v[10:11]
v_add_f64 v[16:17], v[10:11], -v[18:19]
v_mul_f64 v[18:19], v[12:13], s[0:1]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[6:7], v[14:15], -v[6:7]
v_add_f64 v[8:9], v[8:9], -v[16:17]
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_fma_f64 v[16:17], v[12:13], s[0:1], -v[18:19]
s_mov_b32 s1, 0xbfe62e42
v_add_f64 v[6:7], v[10:11], -v[6:7]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[4:5], v[4:5], v[8:9]
v_fma_f64 v[8:9], v[12:13], s[2:3], v[16:17]
s_mov_b32 s3, 0xbc7abc9e
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[4:5], v[4:5], v[6:7]
v_add_f64 v[6:7], v[18:19], v[8:9]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[10:11], v[14:15], v[4:5]
v_add_f64 v[18:19], v[6:7], -v[18:19]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_f64 v[12:13], v[6:7], v[10:11]
v_add_f64 v[14:15], v[10:11], -v[14:15]
v_add_f64 v[8:9], v[8:9], -v[18:19]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[16:17], v[12:13], -v[6:7]
v_add_f64 v[4:5], v[4:5], -v[14:15]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_f64 v[20:21], v[12:13], -v[16:17]
v_add_f64 v[10:11], v[10:11], -v[16:17]
v_add_f64 v[14:15], v[8:9], v[4:5]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[6:7], v[6:7], -v[20:21]
v_add_f64 v[6:7], v[10:11], v[6:7]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[10:11], v[14:15], -v[8:9]
v_add_f64 v[6:7], v[14:15], v[6:7]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_f64 v[14:15], v[14:15], -v[10:11]
v_add_f64 v[4:5], v[4:5], -v[10:11]
v_add_f64 v[16:17], v[12:13], v[6:7]
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_f64 v[8:9], v[8:9], -v[14:15]
v_mov_b32_e32 v14, 0x3ff00000
v_cndmask_b32_e32 v15, 0x3fd66666, v14, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_f64 v[10:11], v[16:17], -v[12:13]
v_add_f64 v[4:5], v[4:5], v[8:9]
v_cndmask_b32_e32 v9, 0x4005ae14, v14, vcc_lo
v_cndmask_b32_e64 v8, 0x7ae147ae, 0, vcc_lo
v_cndmask_b32_e64 v14, 0x66666666, 0, vcc_lo
v_add_f64 v[6:7], v[6:7], -v[10:11]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[4:5], v[4:5], v[6:7]
v_add_f64 v[6:7], v[16:17], v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_add_f64 v[10:11], v[6:7], -v[16:17]
v_mul_f64 v[12:13], v[8:9], v[6:7]
v_mul_f64 v[16:17], v[14:15], v[6:7]
v_add_f64 v[4:5], v[4:5], -v[10:11]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fma_f64 v[10:11], v[8:9], v[6:7], -v[12:13]
v_fma_f64 v[6:7], v[14:15], v[6:7], -v[16:17]
v_cmp_class_f64_e64 vcc_lo, v[12:13], 0x204
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fma_f64 v[10:11], v[8:9], v[4:5], v[10:11]
v_fma_f64 v[4:5], v[14:15], v[4:5], v[6:7]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[18:19], v[12:13], v[10:11]
v_add_f64 v[22:23], v[16:17], v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_dual_cndmask_b32 v7, v19, v13 :: v_dual_cndmask_b32 v6, v18, v12
v_cmp_class_f64_e64 vcc_lo, v[16:17], 0x204
v_add_f64 v[12:13], v[18:19], -v[12:13]
v_cndmask_b32_e32 v25, v23, v17, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mul_f64 v[20:21], v[6:7], s[4:5]
v_add_f64 v[10:11], v[10:11], -v[12:13]
v_cndmask_b32_e32 v24, v22, v16, vcc_lo
v_cmp_neq_f64_e64 vcc_lo, 0x7ff00000, |v[6:7]|
v_add_f64 v[16:17], v[22:23], -v[16:17]
v_rndne_f64_e32 v[20:21], v[20:21]
v_cndmask_b32_e32 v11, 0, v11, vcc_lo
v_mul_f64 v[26:27], v[24:25], s[4:5]
s_mov_b32 s5, 0x3e928af3
s_mov_b32 s4, 0xfca7ab0c
v_cndmask_b32_e32 v10, 0, v10, vcc_lo
v_add_f64 v[4:5], v[4:5], -v[16:17]
v_fma_f64 v[28:29], v[20:21], s[0:1], v[6:7]
v_cvt_i32_f64_e32 v36, v[20:21]
v_rndne_f64_e32 v[26:27], v[26:27]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f64 v[28:29], v[20:21], s[2:3], v[28:29]
v_fma_f64 v[30:31], v[26:27], s[0:1], v[24:25]
s_mov_b32 s1, 0x3e5ade15
s_mov_b32 s0, 0x6a5dcb37
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_fma_f64 v[32:33], v[28:29], s[0:1], s[4:5]
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[30:31], v[26:27], s[2:3], v[30:31]
s_mov_b32 s3, 0x3ec71dee
s_mov_b32 s2, 0x623fde64
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_fma_f64 v[32:33], v[28:29], v[32:33], s[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
v_fma_f64 v[34:35], v[30:31], s[0:1], s[4:5]
s_mov_b32 s1, 0x3efa0199
s_mov_b32 s0, 0x7c89e6b0
v_cmp_ngt_f64_e64 s4, 0xc090cc00, v[24:25]
v_cmp_class_f64_e64 s5, v[2:3], 0x204
v_fma_f64 v[32:33], v[28:29], v[32:33], s[0:1]
s_delay_alu instid0(VALU_DEP_4)
v_fma_f64 v[34:35], v[30:31], v[34:35], s[2:3]
s_mov_b32 s3, 0x3f2a01a0
s_mov_b32 s2, 0x14761f6e
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_fma_f64 v[32:33], v[28:29], v[32:33], s[2:3]
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[34:35], v[30:31], v[34:35], s[0:1]
s_mov_b32 s1, 0x3f56c16c
s_mov_b32 s0, 0x1852b7b0
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_fma_f64 v[32:33], v[28:29], v[32:33], s[0:1]
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[34:35], v[30:31], v[34:35], s[2:3]
s_mov_b32 s3, 0x3f811111
s_mov_b32 s2, 0x11122322
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_fma_f64 v[32:33], v[28:29], v[32:33], s[2:3]
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[34:35], v[30:31], v[34:35], s[0:1]
s_mov_b32 s1, 0x3fa55555
s_mov_b32 s0, 0x555502a1
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_fma_f64 v[32:33], v[28:29], v[32:33], s[0:1]
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[34:35], v[30:31], v[34:35], s[2:3]
s_mov_b32 s3, 0x3fc55555
s_mov_b32 s2, 0x55555511
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_fma_f64 v[32:33], v[28:29], v[32:33], s[2:3]
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[34:35], v[30:31], v[34:35], s[0:1]
s_mov_b32 s1, 0x3fe00000
s_mov_b32 s0, 11
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_fma_f64 v[32:33], v[28:29], v[32:33], s[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_fma_f64 v[34:35], v[30:31], v[34:35], s[2:3]
v_cmp_nlt_f64_e64 s3, 0x40900000, v[24:25]
v_cmp_neq_f64_e64 s2, 0x7ff00000, |v[24:25]|
v_fma_f64 v[32:33], v[28:29], v[32:33], 1.0
s_delay_alu instid0(VALU_DEP_4)
v_fma_f64 v[34:35], v[30:31], v[34:35], s[0:1]
v_cmp_nlt_f64_e64 s0, 0x40900000, v[6:7]
v_cmp_ngt_f64_e64 s1, 0xc090cc00, v[6:7]
v_trunc_f64_e32 v[6:7], v[8:9]
v_cndmask_b32_e64 v5, 0, v5, s2
v_cndmask_b32_e64 v4, 0, v4, s2
s_and_b32 s2, s4, s3
v_fma_f64 v[20:21], v[28:29], v[32:33], 1.0
v_cvt_i32_f64_e32 v32, v[26:27]
v_fma_f64 v[28:29], v[30:31], v[34:35], 1.0
s_and_b32 vcc_lo, s1, s0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_ldexp_f64 v[18:19], v[20:21], v36
v_mul_f64 v[20:21], v[8:9], 0.5
v_fma_f64 v[26:27], v[30:31], v[28:29], 1.0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v19, 0x7ff00000, v19, s0
v_trunc_f64_e32 v[12:13], v[20:21]
v_cndmask_b32_e32 v18, 0, v18, vcc_lo
v_cmp_eq_f64_e32 vcc_lo, v[6:7], v[8:9]
v_ldexp_f64 v[22:23], v[26:27], v32
v_mul_f64 v[26:27], v[14:15], 0.5
v_cndmask_b32_e64 v19, 0, v19, s1
v_trunc_f64_e32 v[8:9], v[14:15]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
v_fma_f64 v[6:7], v[18:19], v[10:11], v[18:19]
v_cmp_class_f64_e64 s1, v[18:19], 0x204
v_cmp_neq_f64_e64 s0, v[12:13], v[20:21]
v_cndmask_b32_e64 v12, 0x7ff00000, v23, s3
v_trunc_f64_e32 v[10:11], v[26:27]
v_cndmask_b32_e64 v16, v6, v18, s1
v_cndmask_b32_e64 v6, v7, v19, s1
v_cmp_eq_f64_e64 s1, 0, v[2:3]
v_cndmask_b32_e64 v7, 0, v12, s4
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cndmask_b32_e32 v18, 0, v16, vcc_lo
s_and_b32 s0, vcc_lo, s0
v_cndmask_b32_e64 v13, 0x3ff00000, v3, s0
v_cmp_neq_f64_e64 s3, v[10:11], v[26:27]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
v_bfi_b32 v13, 0x7fffffff, v6, v13
v_cndmask_b32_e64 v6, 0, v22, s2
v_cmp_eq_f64_e64 s2, v[8:9], v[14:15]
v_cndmask_b32_e64 v9, 0, v3, s0
v_cndmask_b32_e32 v17, 0x7ff80000, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_fma_f64 v[4:5], v[6:7], v[4:5], v[6:7]
v_cmp_class_f64_e64 s4, v[6:7], 0x204
v_cmp_gt_f64_e32 vcc_lo, 0, v[2:3]
v_cndmask_b32_e64 v8, 0x7ff00000, 0, s1
s_or_b32 s0, s1, s5
v_cmp_u_f64_e64 s1, v[2:3], v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_bfi_b32 v9, 0x7fffffff, v8, v9
s_and_b32 s3, s2, s3
v_cndmask_b32_e64 v2, 0x3ff00000, v3, s3
v_cndmask_b32_e64 v3, 0, v3, s3
v_cndmask_b32_e64 v5, v5, v7, s4
v_cndmask_b32_e32 v11, v13, v17, vcc_lo
v_cndmask_b32_e64 v4, v4, v6, s4
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_bfi_b32 v3, 0x7fffffff, v8, v3
v_cndmask_b32_e32 v10, v16, v18, vcc_lo
v_bfi_b32 v2, 0x7fffffff, v5, v2
v_cndmask_b32_e64 v9, v11, v9, s0
v_cndmask_b32_e64 v5, 0x7ff80000, v2, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
v_cndmask_b32_e64 v5, 0, v4, s2
v_cndmask_b32_e64 v6, v2, v3, s0
s_delay_alu instid0(VALU_DEP_2)
v_cndmask_b32_e32 v4, v4, v5, vcc_lo
s_or_b32 s0, s1, s0
v_cndmask_b32_e64 v3, v9, 0x7ff80000, s1
v_cndmask_b32_e64 v2, v10, 0, s0
v_cndmask_b32_e64 v5, v6, 0x7ff80000, s1
v_cndmask_b32_e64 v4, v4, 0, s0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_mul_f64 v[6:7], v[4:5], 0x40140000
v_div_scale_f64 v[8:9], null, v[4:5], v[4:5], v[2:3]
v_add_f64 v[22:23], v[2:3], v[4:5]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fma_f64 v[6:7], v[2:3], 2.0, v[6:7]
v_rcp_f64_e32 v[10:11], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_div_scale_f64 v[12:13], null, v[6:7], v[6:7], v[4:5]
v_add_f64 v[22:23], v[22:23], v[6:7]
s_waitcnt_depctr 0xfff
v_fma_f64 v[16:17], -v[8:9], v[10:11], 1.0
v_rcp_f64_e32 v[14:15], v[12:13]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_fma_f64 v[10:11], v[10:11], v[16:17], v[10:11]
s_waitcnt_depctr 0xfff
v_fma_f64 v[16:17], -v[12:13], v[14:15], 1.0
v_fma_f64 v[18:19], -v[8:9], v[10:11], 1.0
v_fma_f64 v[14:15], v[14:15], v[16:17], v[14:15]
v_div_scale_f64 v[16:17], vcc_lo, v[2:3], v[4:5], v[2:3]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fma_f64 v[10:11], v[10:11], v[18:19], v[10:11]
v_fma_f64 v[18:19], -v[12:13], v[14:15], 1.0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_f64 v[20:21], v[16:17], v[10:11]
v_fma_f64 v[14:15], v[14:15], v[18:19], v[14:15]
v_div_scale_f64 v[18:19], s0, v[4:5], v[6:7], v[4:5]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f64 v[8:9], -v[8:9], v[20:21], v[16:17]
v_mul_f64 v[16:17], v[18:19], v[14:15]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_fmas_f64 v[8:9], v[8:9], v[10:11], v[20:21]
s_mov_b32 vcc_lo, s0
v_fma_f64 v[10:11], -v[12:13], v[16:17], v[18:19]
v_fma_f64 v[12:13], v[2:3], v[4:5], v[22:23]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_div_fixup_f64 v[2:3], v[8:9], v[4:5], v[2:3]
v_div_fmas_f64 v[8:9], v[10:11], v[14:15], v[16:17]
v_add_co_u32 v0, vcc_lo, s6, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_add_f64 v[2:3], v[2:3], v[12:13]
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
v_div_fixup_f64 v[4:5], v[8:9], v[6:7], v[4:5]
s_delay_alu instid0(VALU_DEP_1)
v_add_f64 v[2:3], v[4:5], v[2:3]
global_store_b64 v[0:1], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8MyKernelPdS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 37
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8MyKernelPdS_, .Lfunc_end0-_Z8MyKernelPdS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8MyKernelPdS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8MyKernelPdS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 37
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* Small CUDA exercise to try to improve efficiency by using two
separate streams to set up a staged copying and execution (when
instead of one large copy, followed by one large kernel
computation, one does it in many small chunks, with copying and
computing done in parallel in two streams, after first chunk was
copied to the device).
For the purpose of this exercise, ignore the copying of the
results from the device to host at the end; only do the staged copy
and execute for the copying of the initial data to the device + the
kernel.
We use cudaMallocHost to allocate arrays on host in pinned memory,
which is both results in faster copying to/from GPU (compared to malloc),
and also a CUDA requirement for copying running concurrently with a kernel.
Make sure that the "Result:" value printed by the code is (almost)
identical in both original and modified versions of the code. If
not, you have a bug!
Hints: You will have to use the following CUDA functions:
- cudaStreamCreate
- cudaMemcpyAsync
- cudaStreamDestroy
- cudaDeviceSynchronize
* You will have to set up a for loop for multiple chunks copying and
kernel execution;
* Number of chunks should be a variable (or macro parameter); for
simplicity, make NMAX dividable by the number of chunks;
* In cudaMemcpyAsync the first two arguments should be "&d_A[ind],
&h_A[ind]", not "d_A, h_A", ind being the starting index for the
current chunk to copy;
* You'll have to pass two more arguments to the kernel -
ind and number of threads per chunk;
* Nblocks will be different - shoul be computed per chunk.
* You'll have to modify the kernel slightly;
At the end, you should get the timings (based on 10 runs, NMAX=1000000,
BLOCK_SIZE=128) similar to this:
NCHUNKS t, ms
- 2.76 - the original (non-staged) version of the code
1 2.72 - result is similar to the non-staged code
2 2.08 - even with only 2 chunks, we already see 33% speedup
4 1.79
5 1.75
10 1.72 - seems to be the best timing, 60% faster than the original code
20 1.87 - as NCHUNKS increases, the results get worse. Why?
100 3.53 - for too many NCHUNKS results can get even worse than in non-staged code
To compile:
nvcc -arch=sm_20 -O2 staged.cu -o staged
The best/average timings:
../best_time.sh ./staged
*/
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
// Number of times to run the test (for better timings accuracy):
#define NTESTS 100
// Number of threads in one block (possible range is 32...1024):
#define BLOCK_SIZE 128
// Total number of threads (total number of elements to process in the kernel):
#define NMAX 1000000
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
// It messes up with y!
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// The kernel:
__global__ void MyKernel (double *d_A, double *d_B)
{
double x, y, z;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= NMAX)
return;
// Some meaningless cpu-intensive computation:
x = pow(d_A[i], 2.71);
y = pow(d_A[i], 0.35);
z = 2*x + 5*y;
d_B[i] = x + y + z + x*y + x/y + y/z;
return;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
int main (int argc,char **argv)
{
struct timeval tdr0, tdr1, tdr, tdr01;
double restime, restime0, restime1;
int devid, devcount, error, Max_gridsize;
double *h_A, *h_B, *d_A, *d_B;
/* find number of device in current "context" */
hipGetDevice(&devid);
/* find how many devices are available */
if (hipGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
Max_gridsize = deviceProp.maxGridSize[0];
}
// Loop to run the timing test multiple times:
int kk;
for (kk=0; kk<NTESTS; kk++)
{
// Using cudaMallocHost (intead of malloc) to accelerate data copying:
// Initial data array on host:
if (error = hipHostMalloc (&h_A, NMAX*sizeof(double), hipHostMallocDefault))
{
printf ("Error %d\n", error);
exit (error);
}
// Results array on host:
if (error = hipHostMalloc (&h_B, NMAX*sizeof(double), hipHostMallocDefault))
{
printf ("Error %d\n", error);
exit (error);
}
// ALlocating arrays on GPU:
if (error = hipMalloc (&d_A, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = hipMalloc (&d_B, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
// Initializing the input array:
for (int i=0; i<NMAX; i++)
h_A[i] = (double)rand()/(double)RAND_MAX;
// Number of blocks of threads:
int Nblocks = (NMAX+BLOCK_SIZE-1) / BLOCK_SIZE;
if (Nblocks > Max_gridsize)
{
printf ("Nblocks > Max_gridsize! %d %d\n", Nblocks, Max_gridsize);
exit (1);
}
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr0, NULL);
//--------------------------------------------------------------------------------
// Copying the data to device (we time it):
if (error = hipMemcpy (d_A, h_A, NMAX*sizeof(double), hipMemcpyHostToDevice))
{
printf ("Error %d\n", error);
exit (error);
}
// Intermediate timing, to measure timings separately for copying and kernel execution
// (Should be removed in the solution code)
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr01, NULL);
// The kernel call:
MyKernel <<<Nblocks, BLOCK_SIZE>>> (d_A, d_B);
//--------------------------------------------------------------------------------
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr1, NULL);
tdr = tdr0;
timeval_subtract (&restime, &tdr1, &tdr);
// Copying the result back to host (we don't time it):
if (error = hipMemcpy (h_B, d_B, NMAX*sizeof(double), hipMemcpyDeviceToHost))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
// Adding up the results, for accuracy/correctness testing:
double result = 0.0;
for (int i=0; i<NMAX; i++)
{
result += h_B[i];
}
tdr = tdr0;
timeval_subtract (&restime0, &tdr01, &tdr);
tdr = tdr01;
timeval_subtract (&restime1, &tdr1, &tdr);
printf ("Individual timings: %e %e\n", restime0, restime1);
printf ("Result: %e\n\n", result);
printf ("Time: %e\n", restime);
hipHostFree (h_A);
hipHostFree (h_B);
hipFree (d_A);
hipFree (d_B);
} // kk loop
return 0;
} | .text
.file "staged.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z16timeval_subtractPdP7timevalS1_
.LCPI0_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z16timeval_subtractPdP7timevalS1_
.p2align 4, 0x90
.type _Z16timeval_subtractPdP7timevalS1_,@function
_Z16timeval_subtractPdP7timevalS1_: # @_Z16timeval_subtractPdP7timevalS1_
.cfi_startproc
# %bb.0:
movq %rdx, %rcx
movq 8(%rdx), %r9
movq %r9, %rax
movabsq $4835703278458516699, %r8 # imm = 0x431BDE82D7B634DB
subq 8(%rsi), %rax
jle .LBB0_2
# %bb.1:
imulq %r8
movq %rdx, %rax
shrq $18, %rax
shrq $63, %rdx
addl %edx, %eax
incl %eax
imull $1000000, %eax, %edx # imm = 0xF4240
movslq %edx, %rdx
subq %rdx, %r9
movq %r9, 8(%rcx)
cltq
addq %rax, (%rcx)
.LBB0_2:
movq 8(%rsi), %rdx
movq 8(%rcx), %r9
movq %rdx, %rax
subq %r9, %rax
cmpq $1000001, %rax # imm = 0xF4241
jl .LBB0_4
# %bb.3:
movq %r9, %rax
subq %rdx, %rax
imulq %r8
movq %rdx, %rax
shrq $63, %rax
sarq $18, %rdx
addq %rax, %rdx
movabsq $4294967296000000, %rax # imm = 0xF424000000000
imulq %rdx, %rax
sarq $32, %rax
addq %r9, %rax
movq %rax, 8(%rcx)
movslq %edx, %rax
subq %rax, (%rcx)
.LBB0_4:
movq 8(%rsi), %rax
subq 8(%rcx), %rax
cvtsi2sd %rax, %xmm0
movq (%rsi), %rdx
divsd .LCPI0_0(%rip), %xmm0
xorl %eax, %eax
subq (%rcx), %rdx
cvtsi2sd %rdx, %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, (%rdi)
setl %al
retq
.Lfunc_end0:
.size _Z16timeval_subtractPdP7timevalS1_, .Lfunc_end0-_Z16timeval_subtractPdP7timevalS1_
.cfi_endproc
# -- End function
.globl _Z23__device_stub__MyKernelPdS_ # -- Begin function _Z23__device_stub__MyKernelPdS_
.p2align 4, 0x90
.type _Z23__device_stub__MyKernelPdS_,@function
_Z23__device_stub__MyKernelPdS_: # @_Z23__device_stub__MyKernelPdS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z8MyKernelPdS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z23__device_stub__MyKernelPdS_, .Lfunc_end1-_Z23__device_stub__MyKernelPdS_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x41dfffffffc00000 # double 2147483647
.LCPI2_1:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1672, %rsp # imm = 0x688
.cfi_def_cfa_offset 1728
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 20(%rsp), %rdi
callq hipGetDevice
leaq 16(%rsp), %rdi
callq hipGetDeviceCount
testl %eax, %eax
jne .LBB2_36
# %bb.1:
cmpl $0, 16(%rsp)
je .LBB2_36
# %bb.2:
movabsq $4294967424, %rbx # imm = 0x100000080
movl 20(%rsp), %esi
leaq 192(%rsp), %r14
movq %r14, %rdi
callq hipGetDevicePropertiesR0600
movl 16(%rsp), %esi
movl 20(%rsp), %edx
xorl %r15d, %r15d
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movl $.L.str.2, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
movl 552(%rsp), %esi
movl 556(%rsp), %edx
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl 528(%rsp), %eax
movl %eax, 12(%rsp) # 4-byte Spill
leaq 7685(%rbx), %rax
movq %rax, 72(%rsp) # 8-byte Spill
jmp .LBB2_3
.p2align 4, 0x90
.LBB2_33: # in Loop: Header=BB2_3 Depth=1
movsd %xmm1, 64(%rsp) # 8-byte Spill
movq %rcx, %rax
subq %r14, %rax
movabsq $4835703278458516699, %rdx # imm = 0x431BDE82D7B634DB
imulq %rdx
movq %rdx, %rax
shrq $63, %rax
sarq $18, %rdx
addq %rax, %rdx
movslq %edx, %rax
movabsq $4294967296000000, %r8 # imm = 0xF424000000000
imulq %r8, %rdx
sarq $32, %rdx
addq %rdx, %rcx
subq %rax, %rdi
.LBB2_34: # %_Z16timeval_subtractPdP7timevalS1_.exit66
# in Loop: Header=BB2_3 Depth=1
subq %r13, %rsi
xorps %xmm1, %xmm1
cvtsi2sd %rsi, %xmm1
addsd %xmm1, %xmm0
subq %rdi, %rbp
subq %rcx, %r14
cvtsi2sd %r14, %xmm2
divsd .LCPI2_1(%rip), %xmm2
xorps %xmm1, %xmm1
cvtsi2sd %rbp, %xmm1
addsd %xmm2, %xmm1
movl $.L.str.6, %edi
movb $2, %al
callq printf
movl $.L.str.7, %edi
movsd 64(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $1, %al
callq printf
movl $.L.str.8, %edi
movsd 24(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $1, %al
callq printf
movq 56(%rsp), %rdi
callq hipHostFree
movq 48(%rsp), %rdi
callq hipHostFree
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
incl %r15d
cmpl $100, %r15d
je .LBB2_35
.LBB2_3: # =>This Loop Header: Depth=1
# Child Loop BB2_8 Depth 2
# Child Loop BB2_23 Depth 2
movl $8000000, %esi # imm = 0x7A1200
leaq 56(%rsp), %rdi
xorl %edx, %edx
callq hipHostMalloc
testl %eax, %eax
jne .LBB2_37
# %bb.4: # in Loop: Header=BB2_3 Depth=1
movl $8000000, %esi # imm = 0x7A1200
leaq 48(%rsp), %rdi
xorl %edx, %edx
callq hipHostMalloc
testl %eax, %eax
jne .LBB2_38
# %bb.5: # in Loop: Header=BB2_3 Depth=1
movl $8000000, %esi # imm = 0x7A1200
leaq 40(%rsp), %rdi
callq hipMalloc
testl %eax, %eax
jne .LBB2_38
# %bb.6: # in Loop: Header=BB2_3 Depth=1
movl $8000000, %esi # imm = 0x7A1200
leaq 32(%rsp), %rdi
callq hipMalloc
testl %eax, %eax
jne .LBB2_38
# %bb.7: # %.preheader95.preheader
# in Loop: Header=BB2_3 Depth=1
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_8: # %.preheader95
# Parent Loop BB2_3 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd .LCPI2_0(%rip), %xmm0
movq 56(%rsp), %rax
movsd %xmm0, (%rax,%rbx,8)
incq %rbx
cmpq $1000000, %rbx # imm = 0xF4240
jne .LBB2_8
# %bb.9: # in Loop: Header=BB2_3 Depth=1
cmpl $7813, 12(%rsp) # 4-byte Folded Reload
# imm = 0x1E85
jl .LBB2_39
# %bb.10: # in Loop: Header=BB2_3 Depth=1
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB2_38
# %bb.11: # in Loop: Header=BB2_3 Depth=1
leaq 176(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 40(%rsp), %rdi
movq 56(%rsp), %rsi
movl $8000000, %edx # imm = 0x7A1200
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_38
# %bb.12: # in Loop: Header=BB2_3 Depth=1
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB2_38
# %bb.13: # in Loop: Header=BB2_3 Depth=1
leaq 144(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 72(%rsp), %rdi # 8-byte Reload
movl $1, %esi
movabsq $4294967424, %rdx # imm = 0x100000080
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_15
# %bb.14: # in Loop: Header=BB2_3 Depth=1
movq 40(%rsp), %rax
movq 32(%rsp), %rcx
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
leaq 136(%rsp), %rax
movq %rax, 192(%rsp)
leaq 128(%rsp), %rax
movq %rax, 200(%rsp)
leaq 112(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
movq 96(%rsp), %rcx
movl 104(%rsp), %r8d
movl $_Z8MyKernelPdS_, %edi
leaq 192(%rsp), %r9
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_15: # in Loop: Header=BB2_3 Depth=1
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB2_38
# %bb.16: # in Loop: Header=BB2_3 Depth=1
leaq 160(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 176(%rsp), %r13
movq 184(%rsp), %r12
movq 168(%rsp), %r14
movq %r12, %rax
movq %r13, %rbx
movq %r12, %rcx
subq %r14, %rax
jle .LBB2_18
# %bb.17: # in Loop: Header=BB2_3 Depth=1
movabsq $4835703278458516699, %rcx # imm = 0x431BDE82D7B634DB
imulq %rcx
movq %rdx, %rax
shrq $18, %rax
shrq $63, %rdx
addl %edx, %eax
incl %eax
imull $1000000, %eax, %ecx # imm = 0xF4240
movslq %ecx, %rdx
movq %r12, %rcx
subq %rdx, %rcx
movslq %eax, %rbx
addq %r13, %rbx
.LBB2_18: # in Loop: Header=BB2_3 Depth=1
movq %r14, %rax
subq %rcx, %rax
cmpq $1000001, %rax # imm = 0xF4241
jl .LBB2_20
# %bb.19: # in Loop: Header=BB2_3 Depth=1
movq %rcx, %rax
subq %r14, %rax
movabsq $4835703278458516699, %rdx # imm = 0x431BDE82D7B634DB
imulq %rdx
movq %rdx, %rax
shrq $63, %rax
sarq $18, %rdx
addq %rax, %rdx
movslq %edx, %rax
movabsq $4294967296000000, %rsi # imm = 0xF424000000000
imulq %rsi, %rdx
sarq $32, %rdx
addq %rdx, %rcx
subq %rax, %rbx
.LBB2_20: # %_Z16timeval_subtractPdP7timevalS1_.exit
# in Loop: Header=BB2_3 Depth=1
movq %r14, %rax
subq %rcx, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
movq 160(%rsp), %rbp
divsd .LCPI2_1(%rip), %xmm0
movsd %xmm0, 24(%rsp) # 8-byte Spill
movq 48(%rsp), %rdi
movq 32(%rsp), %rsi
movl $8000000, %edx # imm = 0x7A1200
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_38
# %bb.21: # in Loop: Header=BB2_3 Depth=1
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB2_38
# %bb.22: # %.preheader
# in Loop: Header=BB2_3 Depth=1
movq %rbp, %rax
subq %rbx, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
movsd 24(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movapd %xmm1, %xmm0
xorpd %xmm1, %xmm1
movq 48(%rsp), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB2_23: # Parent Loop BB2_3 Depth=1
# => This Inner Loop Header: Depth=2
addsd (%rax,%rcx,8), %xmm1
incq %rcx
cmpq $1000000, %rcx # imm = 0xF4240
jne .LBB2_23
# %bb.24: # in Loop: Header=BB2_3 Depth=1
movq 152(%rsp), %rcx
movq %r12, %rax
subq %rcx, %rax
jle .LBB2_26
# %bb.25: # in Loop: Header=BB2_3 Depth=1
movabsq $4835703278458516699, %rdx # imm = 0x431BDE82D7B634DB
imulq %rdx
movq %rdx, %rax
shrq $18, %rax
shrq $63, %rdx
addl %edx, %eax
incl %eax
imull $1000000, %eax, %edx # imm = 0xF4240
movslq %edx, %rdx
subq %rdx, %r12
cltq
addq %rax, %r13
.LBB2_26: # in Loop: Header=BB2_3 Depth=1
movq %rcx, %rax
subq %r12, %rax
cmpq $1000001, %rax # imm = 0xF4241
movsd %xmm0, 24(%rsp) # 8-byte Spill
jl .LBB2_28
# %bb.27: # in Loop: Header=BB2_3 Depth=1
movq %r12, %rax
subq %rcx, %rax
movabsq $4835703278458516699, %rdx # imm = 0x431BDE82D7B634DB
imulq %rdx
movq %rdx, %rax
shrq $63, %rax
sarq $18, %rdx
addq %rax, %rdx
movslq %edx, %rax
movabsq $4294967296000000, %rsi # imm = 0xF424000000000
imulq %rsi, %rdx
sarq $32, %rdx
addq %rdx, %r12
subq %rax, %r13
.LBB2_28: # %_Z16timeval_subtractPdP7timevalS1_.exit63
# in Loop: Header=BB2_3 Depth=1
movq %rcx, %rax
subq %r12, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
movq 144(%rsp), %rsi
divsd .LCPI2_1(%rip), %xmm0
movq %rcx, %rax
subq %r14, %rax
jle .LBB2_29
# %bb.30: # in Loop: Header=BB2_3 Depth=1
movabsq $4835703278458516699, %rdx # imm = 0x431BDE82D7B634DB
imulq %rdx
movq %rdx, %rax
shrq $18, %rax
shrq $63, %rdx
addl %edx, %eax
incl %eax
imull $1000000, %eax, %edx # imm = 0xF4240
movslq %edx, %rdx
subq %rdx, %rcx
movslq %eax, %rdi
addq %rsi, %rdi
jmp .LBB2_31
.p2align 4, 0x90
.LBB2_29: # in Loop: Header=BB2_3 Depth=1
movq %rsi, %rdi
.LBB2_31: # in Loop: Header=BB2_3 Depth=1
movq %r14, %rax
subq %rcx, %rax
cmpq $1000001, %rax # imm = 0xF4241
jge .LBB2_33
# %bb.32: # in Loop: Header=BB2_3 Depth=1
movsd %xmm1, 64(%rsp) # 8-byte Spill
jmp .LBB2_34
.LBB2_35:
xorl %eax, %eax
addq $1672, %rsp # imm = 0x688
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_38:
.cfi_def_cfa_offset 1728
movl $.L.str.4, %edi
movl %eax, %esi
movl %eax, %ebx
xorl %eax, %eax
callq printf
movl %ebx, %edi
callq exit
.LBB2_39:
movl $.L.str.5, %edi
movl $7813, %esi # imm = 0x1E85
movl 12(%rsp), %edx # 4-byte Reload
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.LBB2_37:
movl %eax, %r14d
movl $.L.str.4, %edi
movl %eax, %esi
xorl %eax, %eax
callq printf
movl %r14d, %edi
callq exit
.LBB2_36:
movl $.Lstr, %edi
callq puts@PLT
movl $1, %edi
callq exit
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8MyKernelPdS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8MyKernelPdS_,@object # @_Z8MyKernelPdS_
.section .rodata,"a",@progbits
.globl _Z8MyKernelPdS_
.p2align 3, 0x0
_Z8MyKernelPdS_:
.quad _Z23__device_stub__MyKernelPdS_
.size _Z8MyKernelPdS_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Device count, devid: %d %d\n"
.size .L.str.1, 28
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Device: %s\n"
.size .L.str.2, 12
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n"
.size .L.str.3, 48
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Error %d\n"
.size .L.str.4, 10
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Nblocks > Max_gridsize! %d %d\n"
.size .L.str.5, 33
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Individual timings: %e %e\n"
.size .L.str.6, 27
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Result: %e\n\n"
.size .L.str.7, 13
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Time: %e\n"
.size .L.str.8, 10
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8MyKernelPdS_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "No CUDA devices!"
.size .Lstr, 17
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__MyKernelPdS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8MyKernelPdS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000f5ee8_00000000-6_staged.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2074:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2074:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z16timeval_subtractPdP7timevalS1_
.type _Z16timeval_subtractPdP7timevalS1_, @function
_Z16timeval_subtractPdP7timevalS1_:
.LFB2070:
.cfi_startproc
endbr64
movq %rdx, %rcx
movq 8(%rsi), %rax
movq 8(%rdx), %r8
cmpq %r8, %rax
jge .L4
movq %r8, %r9
subq %rax, %r9
movabsq $4835703278458516699, %rdx
movq %r9, %rax
imulq %rdx
sarq $18, %rdx
sarq $63, %r9
subq %r9, %rdx
addl $1, %edx
imull $1000000, %edx, %eax
cltq
subq %rax, %r8
movq %r8, 8(%rcx)
movslq %edx, %rdx
addq %rdx, (%rcx)
.L4:
movq 8(%rsi), %rax
movq 8(%rcx), %r8
movq %rax, %rdx
subq %r8, %rdx
cmpq $1000000, %rdx
jle .L5
movq %r8, %r9
subq %rax, %r9
movabsq $4835703278458516699, %rdx
movq %r9, %rax
imulq %rdx
sarq $18, %rdx
sarq $63, %r9
subq %r9, %rdx
imull $1000000, %edx, %eax
cltq
addq %r8, %rax
movq %rax, 8(%rcx)
movslq %edx, %rdx
subq %rdx, (%rcx)
.L5:
movq 8(%rsi), %rax
subq 8(%rcx), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC0(%rip), %xmm0
movq (%rsi), %rax
subq (%rcx), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
addsd %xmm1, %xmm0
movsd %xmm0, (%rdi)
movq (%rcx), %rax
cmpq %rax, (%rsi)
setl %al
movzbl %al, %eax
ret
.cfi_endproc
.LFE2070:
.size _Z16timeval_subtractPdP7timevalS1_, .-_Z16timeval_subtractPdP7timevalS1_
.globl _Z29__device_stub__Z8MyKernelPdS_PdS_
.type _Z29__device_stub__Z8MyKernelPdS_PdS_, @function
_Z29__device_stub__Z8MyKernelPdS_PdS_:
.LFB2096:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L10
.L6:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L11
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z8MyKernelPdS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L6
.L11:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2096:
.size _Z29__device_stub__Z8MyKernelPdS_PdS_, .-_Z29__device_stub__Z8MyKernelPdS_PdS_
.globl _Z8MyKernelPdS_
.type _Z8MyKernelPdS_, @function
_Z8MyKernelPdS_:
.LFB2097:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z8MyKernelPdS_PdS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2097:
.size _Z8MyKernelPdS_, .-_Z8MyKernelPdS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "Device count, devid: %d %d\n"
.LC3:
.string "Device: %s\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n"
.section .rodata.str1.1
.LC5:
.string "No CUDA devices!\n"
.LC6:
.string "Error %d\n"
.section .rodata.str1.8
.align 8
.LC8:
.string "Nblocks > Max_gridsize! %d %d\n"
.section .rodata.str1.1
.LC9:
.string "Individual timings: %e %e\n"
.LC10:
.string "Result: %e\n\n"
.LC11:
.string "Time: %e\n"
.text
.globl main
.type main, @function
main:
.LFB2071:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1224, %rsp
.cfi_def_cfa_offset 1280
movq %fs:40, %rax
movq %rax, 1208(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rdi
call cudaGetDevice@PLT
leaq 28(%rsp), %rdi
call cudaGetDeviceCount@PLT
testl %eax, %eax
jne .L15
cmpl $0, 28(%rsp)
je .L15
leaq 176(%rsp), %rbx
movl 24(%rsp), %esi
movq %rbx, %rdi
call cudaGetDeviceProperties_v2@PLT
movl 24(%rsp), %ecx
movl 28(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 540(%rsp), %ecx
movl 536(%rsp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 512(%rsp), %r12d
movl $100, %ebx
leaq .LC9(%rip), %r14
leaq .LC10(%rip), %r13
leaq .LC11(%rip), %r15
jmp .L16
.L15:
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L40:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L41:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L42:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L43:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L44:
movl %r12d, %ecx
movl $7813, %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L45:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L46:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L47:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L25:
call cudaDeviceSynchronize@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L36
leaq 128(%rsp), %rbp
movl $0, %esi
movq %rbp, %rdi
call gettimeofday@PLT
movdqa 112(%rsp), %xmm3
movaps %xmm3, 144(%rsp)
leaq 144(%rsp), %rdx
leaq 32(%rsp), %rdi
movq %rbp, %rsi
call _Z16timeval_subtractPdP7timevalS1_
movl $2, %ecx
movl $8000000, %edx
movq 80(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L37
call cudaDeviceSynchronize@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L38
movq 64(%rsp), %rax
leaq 8000000(%rax), %rdx
movq $0x000000000, 8(%rsp)
.L29:
movsd 8(%rsp), %xmm2
addsd (%rax), %xmm2
movsd %xmm2, 8(%rsp)
addq $8, %rax
cmpq %rdx, %rax
jne .L29
movdqa 112(%rsp), %xmm4
movaps %xmm4, 144(%rsp)
leaq 144(%rsp), %rbp
leaq 160(%rsp), %rsi
leaq 40(%rsp), %rdi
movq %rbp, %rdx
call _Z16timeval_subtractPdP7timevalS1_
movdqa 160(%rsp), %xmm5
movaps %xmm5, 144(%rsp)
leaq 128(%rsp), %rsi
leaq 48(%rsp), %rdi
movq %rbp, %rdx
call _Z16timeval_subtractPdP7timevalS1_
movsd 48(%rsp), %xmm1
movsd 40(%rsp), %xmm0
movq %r14, %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movsd 8(%rsp), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movsd 32(%rsp), %xmm0
movq %r15, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 56(%rsp), %rdi
call cudaFreeHost@PLT
movq 64(%rsp), %rdi
call cudaFreeHost@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
subl $1, %ebx
je .L39
.L16:
leaq 56(%rsp), %rdi
movl $0, %edx
movl $8000000, %esi
call cudaHostAlloc@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L40
leaq 64(%rsp), %rdi
movl $0, %edx
movl $8000000, %esi
call cudaHostAlloc@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L41
leaq 72(%rsp), %rdi
movl $8000000, %esi
call cudaMalloc@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L42
leaq 80(%rsp), %rdi
movl $8000000, %esi
call cudaMalloc@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L43
movl $0, %ebp
.L20:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
divsd .LC7(%rip), %xmm0
movq 56(%rsp), %rax
movsd %xmm0, (%rax,%rbp)
addq $8, %rbp
cmpq $8000000, %rbp
jne .L20
cmpl $7812, %r12d
jle .L44
call cudaDeviceSynchronize@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L45
leaq 112(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
movl $1, %ecx
movl $8000000, %edx
movq 56(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L46
call cudaDeviceSynchronize@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L47
leaq 160(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
movl $128, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $7813, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 100(%rsp), %rdx
movl $1, %ecx
movq 88(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L25
movq 80(%rsp), %rsi
movq 72(%rsp), %rdi
call _Z29__device_stub__Z8MyKernelPdS_PdS_
jmp .L25
.L36:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L37:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L38:
movl %eax, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L39:
movq 1208(%rsp), %rax
subq %fs:40, %rax
jne .L48
movl $0, %eax
addq $1224, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2071:
.size main, .-main
.section .rodata.str1.1
.LC12:
.string "_Z8MyKernelPdS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2099:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z8MyKernelPdS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2099:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1093567616
.align 8
.LC7:
.long -4194304
.long 1105199103
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "staged.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z16timeval_subtractPdP7timevalS1_
.LCPI0_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z16timeval_subtractPdP7timevalS1_
.p2align 4, 0x90
.type _Z16timeval_subtractPdP7timevalS1_,@function
_Z16timeval_subtractPdP7timevalS1_: # @_Z16timeval_subtractPdP7timevalS1_
.cfi_startproc
# %bb.0:
movq %rdx, %rcx
movq 8(%rdx), %r9
movq %r9, %rax
movabsq $4835703278458516699, %r8 # imm = 0x431BDE82D7B634DB
subq 8(%rsi), %rax
jle .LBB0_2
# %bb.1:
imulq %r8
movq %rdx, %rax
shrq $18, %rax
shrq $63, %rdx
addl %edx, %eax
incl %eax
imull $1000000, %eax, %edx # imm = 0xF4240
movslq %edx, %rdx
subq %rdx, %r9
movq %r9, 8(%rcx)
cltq
addq %rax, (%rcx)
.LBB0_2:
movq 8(%rsi), %rdx
movq 8(%rcx), %r9
movq %rdx, %rax
subq %r9, %rax
cmpq $1000001, %rax # imm = 0xF4241
jl .LBB0_4
# %bb.3:
movq %r9, %rax
subq %rdx, %rax
imulq %r8
movq %rdx, %rax
shrq $63, %rax
sarq $18, %rdx
addq %rax, %rdx
movabsq $4294967296000000, %rax # imm = 0xF424000000000
imulq %rdx, %rax
sarq $32, %rax
addq %r9, %rax
movq %rax, 8(%rcx)
movslq %edx, %rax
subq %rax, (%rcx)
.LBB0_4:
movq 8(%rsi), %rax
subq 8(%rcx), %rax
cvtsi2sd %rax, %xmm0
movq (%rsi), %rdx
divsd .LCPI0_0(%rip), %xmm0
xorl %eax, %eax
subq (%rcx), %rdx
cvtsi2sd %rdx, %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, (%rdi)
setl %al
retq
.Lfunc_end0:
.size _Z16timeval_subtractPdP7timevalS1_, .Lfunc_end0-_Z16timeval_subtractPdP7timevalS1_
.cfi_endproc
# -- End function
.globl _Z23__device_stub__MyKernelPdS_ # -- Begin function _Z23__device_stub__MyKernelPdS_
.p2align 4, 0x90
.type _Z23__device_stub__MyKernelPdS_,@function
_Z23__device_stub__MyKernelPdS_: # @_Z23__device_stub__MyKernelPdS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z8MyKernelPdS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z23__device_stub__MyKernelPdS_, .Lfunc_end1-_Z23__device_stub__MyKernelPdS_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x41dfffffffc00000 # double 2147483647
.LCPI2_1:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1672, %rsp # imm = 0x688
.cfi_def_cfa_offset 1728
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 20(%rsp), %rdi
callq hipGetDevice
leaq 16(%rsp), %rdi
callq hipGetDeviceCount
testl %eax, %eax
jne .LBB2_36
# %bb.1:
cmpl $0, 16(%rsp)
je .LBB2_36
# %bb.2:
movabsq $4294967424, %rbx # imm = 0x100000080
movl 20(%rsp), %esi
leaq 192(%rsp), %r14
movq %r14, %rdi
callq hipGetDevicePropertiesR0600
movl 16(%rsp), %esi
movl 20(%rsp), %edx
xorl %r15d, %r15d
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movl $.L.str.2, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
movl 552(%rsp), %esi
movl 556(%rsp), %edx
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl 528(%rsp), %eax
movl %eax, 12(%rsp) # 4-byte Spill
leaq 7685(%rbx), %rax
movq %rax, 72(%rsp) # 8-byte Spill
jmp .LBB2_3
.p2align 4, 0x90
.LBB2_33: # in Loop: Header=BB2_3 Depth=1
movsd %xmm1, 64(%rsp) # 8-byte Spill
movq %rcx, %rax
subq %r14, %rax
movabsq $4835703278458516699, %rdx # imm = 0x431BDE82D7B634DB
imulq %rdx
movq %rdx, %rax
shrq $63, %rax
sarq $18, %rdx
addq %rax, %rdx
movslq %edx, %rax
movabsq $4294967296000000, %r8 # imm = 0xF424000000000
imulq %r8, %rdx
sarq $32, %rdx
addq %rdx, %rcx
subq %rax, %rdi
.LBB2_34: # %_Z16timeval_subtractPdP7timevalS1_.exit66
# in Loop: Header=BB2_3 Depth=1
subq %r13, %rsi
xorps %xmm1, %xmm1
cvtsi2sd %rsi, %xmm1
addsd %xmm1, %xmm0
subq %rdi, %rbp
subq %rcx, %r14
cvtsi2sd %r14, %xmm2
divsd .LCPI2_1(%rip), %xmm2
xorps %xmm1, %xmm1
cvtsi2sd %rbp, %xmm1
addsd %xmm2, %xmm1
movl $.L.str.6, %edi
movb $2, %al
callq printf
movl $.L.str.7, %edi
movsd 64(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $1, %al
callq printf
movl $.L.str.8, %edi
movsd 24(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $1, %al
callq printf
movq 56(%rsp), %rdi
callq hipHostFree
movq 48(%rsp), %rdi
callq hipHostFree
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
incl %r15d
cmpl $100, %r15d
je .LBB2_35
.LBB2_3: # =>This Loop Header: Depth=1
# Child Loop BB2_8 Depth 2
# Child Loop BB2_23 Depth 2
movl $8000000, %esi # imm = 0x7A1200
leaq 56(%rsp), %rdi
xorl %edx, %edx
callq hipHostMalloc
testl %eax, %eax
jne .LBB2_37
# %bb.4: # in Loop: Header=BB2_3 Depth=1
movl $8000000, %esi # imm = 0x7A1200
leaq 48(%rsp), %rdi
xorl %edx, %edx
callq hipHostMalloc
testl %eax, %eax
jne .LBB2_38
# %bb.5: # in Loop: Header=BB2_3 Depth=1
movl $8000000, %esi # imm = 0x7A1200
leaq 40(%rsp), %rdi
callq hipMalloc
testl %eax, %eax
jne .LBB2_38
# %bb.6: # in Loop: Header=BB2_3 Depth=1
movl $8000000, %esi # imm = 0x7A1200
leaq 32(%rsp), %rdi
callq hipMalloc
testl %eax, %eax
jne .LBB2_38
# %bb.7: # %.preheader95.preheader
# in Loop: Header=BB2_3 Depth=1
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_8: # %.preheader95
# Parent Loop BB2_3 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd .LCPI2_0(%rip), %xmm0
movq 56(%rsp), %rax
movsd %xmm0, (%rax,%rbx,8)
incq %rbx
cmpq $1000000, %rbx # imm = 0xF4240
jne .LBB2_8
# %bb.9: # in Loop: Header=BB2_3 Depth=1
cmpl $7813, 12(%rsp) # 4-byte Folded Reload
# imm = 0x1E85
jl .LBB2_39
# %bb.10: # in Loop: Header=BB2_3 Depth=1
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB2_38
# %bb.11: # in Loop: Header=BB2_3 Depth=1
leaq 176(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 40(%rsp), %rdi
movq 56(%rsp), %rsi
movl $8000000, %edx # imm = 0x7A1200
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_38
# %bb.12: # in Loop: Header=BB2_3 Depth=1
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB2_38
# %bb.13: # in Loop: Header=BB2_3 Depth=1
leaq 144(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 72(%rsp), %rdi # 8-byte Reload
movl $1, %esi
movabsq $4294967424, %rdx # imm = 0x100000080
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_15
# %bb.14: # in Loop: Header=BB2_3 Depth=1
movq 40(%rsp), %rax
movq 32(%rsp), %rcx
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
leaq 136(%rsp), %rax
movq %rax, 192(%rsp)
leaq 128(%rsp), %rax
movq %rax, 200(%rsp)
leaq 112(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
movq 96(%rsp), %rcx
movl 104(%rsp), %r8d
movl $_Z8MyKernelPdS_, %edi
leaq 192(%rsp), %r9
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_15: # in Loop: Header=BB2_3 Depth=1
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB2_38
# %bb.16: # in Loop: Header=BB2_3 Depth=1
leaq 160(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 176(%rsp), %r13
movq 184(%rsp), %r12
movq 168(%rsp), %r14
movq %r12, %rax
movq %r13, %rbx
movq %r12, %rcx
subq %r14, %rax
jle .LBB2_18
# %bb.17: # in Loop: Header=BB2_3 Depth=1
movabsq $4835703278458516699, %rcx # imm = 0x431BDE82D7B634DB
imulq %rcx
movq %rdx, %rax
shrq $18, %rax
shrq $63, %rdx
addl %edx, %eax
incl %eax
imull $1000000, %eax, %ecx # imm = 0xF4240
movslq %ecx, %rdx
movq %r12, %rcx
subq %rdx, %rcx
movslq %eax, %rbx
addq %r13, %rbx
.LBB2_18: # in Loop: Header=BB2_3 Depth=1
movq %r14, %rax
subq %rcx, %rax
cmpq $1000001, %rax # imm = 0xF4241
jl .LBB2_20
# %bb.19: # in Loop: Header=BB2_3 Depth=1
movq %rcx, %rax
subq %r14, %rax
movabsq $4835703278458516699, %rdx # imm = 0x431BDE82D7B634DB
imulq %rdx
movq %rdx, %rax
shrq $63, %rax
sarq $18, %rdx
addq %rax, %rdx
movslq %edx, %rax
movabsq $4294967296000000, %rsi # imm = 0xF424000000000
imulq %rsi, %rdx
sarq $32, %rdx
addq %rdx, %rcx
subq %rax, %rbx
.LBB2_20: # %_Z16timeval_subtractPdP7timevalS1_.exit
# in Loop: Header=BB2_3 Depth=1
movq %r14, %rax
subq %rcx, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
movq 160(%rsp), %rbp
divsd .LCPI2_1(%rip), %xmm0
movsd %xmm0, 24(%rsp) # 8-byte Spill
movq 48(%rsp), %rdi
movq 32(%rsp), %rsi
movl $8000000, %edx # imm = 0x7A1200
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_38
# %bb.21: # in Loop: Header=BB2_3 Depth=1
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB2_38
# %bb.22: # %.preheader
# in Loop: Header=BB2_3 Depth=1
movq %rbp, %rax
subq %rbx, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
movsd 24(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movapd %xmm1, %xmm0
xorpd %xmm1, %xmm1
movq 48(%rsp), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB2_23: # Parent Loop BB2_3 Depth=1
# => This Inner Loop Header: Depth=2
addsd (%rax,%rcx,8), %xmm1
incq %rcx
cmpq $1000000, %rcx # imm = 0xF4240
jne .LBB2_23
# %bb.24: # in Loop: Header=BB2_3 Depth=1
movq 152(%rsp), %rcx
movq %r12, %rax
subq %rcx, %rax
jle .LBB2_26
# %bb.25: # in Loop: Header=BB2_3 Depth=1
movabsq $4835703278458516699, %rdx # imm = 0x431BDE82D7B634DB
imulq %rdx
movq %rdx, %rax
shrq $18, %rax
shrq $63, %rdx
addl %edx, %eax
incl %eax
imull $1000000, %eax, %edx # imm = 0xF4240
movslq %edx, %rdx
subq %rdx, %r12
cltq
addq %rax, %r13
.LBB2_26: # in Loop: Header=BB2_3 Depth=1
movq %rcx, %rax
subq %r12, %rax
cmpq $1000001, %rax # imm = 0xF4241
movsd %xmm0, 24(%rsp) # 8-byte Spill
jl .LBB2_28
# %bb.27: # in Loop: Header=BB2_3 Depth=1
movq %r12, %rax
subq %rcx, %rax
movabsq $4835703278458516699, %rdx # imm = 0x431BDE82D7B634DB
imulq %rdx
movq %rdx, %rax
shrq $63, %rax
sarq $18, %rdx
addq %rax, %rdx
movslq %edx, %rax
movabsq $4294967296000000, %rsi # imm = 0xF424000000000
imulq %rsi, %rdx
sarq $32, %rdx
addq %rdx, %r12
subq %rax, %r13
.LBB2_28: # %_Z16timeval_subtractPdP7timevalS1_.exit63
# in Loop: Header=BB2_3 Depth=1
movq %rcx, %rax
subq %r12, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
movq 144(%rsp), %rsi
divsd .LCPI2_1(%rip), %xmm0
movq %rcx, %rax
subq %r14, %rax
jle .LBB2_29
# %bb.30: # in Loop: Header=BB2_3 Depth=1
movabsq $4835703278458516699, %rdx # imm = 0x431BDE82D7B634DB
imulq %rdx
movq %rdx, %rax
shrq $18, %rax
shrq $63, %rdx
addl %edx, %eax
incl %eax
imull $1000000, %eax, %edx # imm = 0xF4240
movslq %edx, %rdx
subq %rdx, %rcx
movslq %eax, %rdi
addq %rsi, %rdi
jmp .LBB2_31
.p2align 4, 0x90
.LBB2_29: # in Loop: Header=BB2_3 Depth=1
movq %rsi, %rdi
.LBB2_31: # in Loop: Header=BB2_3 Depth=1
movq %r14, %rax
subq %rcx, %rax
cmpq $1000001, %rax # imm = 0xF4241
jge .LBB2_33
# %bb.32: # in Loop: Header=BB2_3 Depth=1
movsd %xmm1, 64(%rsp) # 8-byte Spill
jmp .LBB2_34
.LBB2_35:
xorl %eax, %eax
addq $1672, %rsp # imm = 0x688
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_38:
.cfi_def_cfa_offset 1728
movl $.L.str.4, %edi
movl %eax, %esi
movl %eax, %ebx
xorl %eax, %eax
callq printf
movl %ebx, %edi
callq exit
.LBB2_39:
movl $.L.str.5, %edi
movl $7813, %esi # imm = 0x1E85
movl 12(%rsp), %edx # 4-byte Reload
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.LBB2_37:
movl %eax, %r14d
movl $.L.str.4, %edi
movl %eax, %esi
xorl %eax, %eax
callq printf
movl %r14d, %edi
callq exit
.LBB2_36:
movl $.Lstr, %edi
callq puts@PLT
movl $1, %edi
callq exit
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8MyKernelPdS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8MyKernelPdS_,@object # @_Z8MyKernelPdS_
.section .rodata,"a",@progbits
.globl _Z8MyKernelPdS_
.p2align 3, 0x0
_Z8MyKernelPdS_:
.quad _Z23__device_stub__MyKernelPdS_
.size _Z8MyKernelPdS_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Device count, devid: %d %d\n"
.size .L.str.1, 28
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Device: %s\n"
.size .L.str.2, 12
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n"
.size .L.str.3, 48
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Error %d\n"
.size .L.str.4, 10
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Nblocks > Max_gridsize! %d %d\n"
.size .L.str.5, 33
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Individual timings: %e %e\n"
.size .L.str.6, 27
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Result: %e\n\n"
.size .L.str.7, 13
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Time: %e\n"
.size .L.str.8, 10
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8MyKernelPdS_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "No CUDA devices!"
.size .Lstr, 17
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__MyKernelPdS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8MyKernelPdS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#define N 10000
float *a_cpu, *b_cpu, *c_cpu;
float *a_gpu, *b_gpu, *c_gpu;
__global__ void add(float *a_gpu, float *b_gpu, float *c_gpu, int n){
for(int i = 0; i < n; i++){
c_gpu[i] = a_gpu[i] + b_gpu[i];
}
}
int main() {
a_cpu = (float*)malloc(sizeof(float)*N);
b_cpu = (float*)malloc(sizeof(float)*N);
c_cpu = (float*)malloc(sizeof(float)*N);
cudaMalloc((float**)&a_gpu, sizeof(float) * N);
cudaMalloc((float**)&b_gpu, sizeof(float) * N);
cudaMalloc((float**)&c_gpu, sizeof(float) * N);
for (int i = 1; i <= N; i++){
a_cpu[i] = -i;
b_cpu[i] = i*2;
c_cpu[i] = a_cpu[i] + b_cpu[i];
}
cudaMemcpy(a_gpu, a_cpu, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b_cpu, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(c_gpu, c_cpu, sizeof(float) * N, cudaMemcpyHostToDevice);
add<<<1,10>>>(a_gpu, b_gpu, c_gpu, N);
cudaMemcpy(c_cpu, c_gpu, sizeof(float) * N, cudaMemcpyDeviceToHost);
for (int i = 1; i <= N; i++){
printf("%.0f\n", c_cpu[i]);
}
cudaFree(a_gpu);
cudaFree(b_gpu);
cudaFree(c_gpu);
free(a_cpu);
free(b_cpu);
free(c_cpu);
getchar();
return 0;
} | code for sm_80
Function : _Z3addPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff007624 */
/* 0x000fca00078e00ff */
/*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fda0003f06270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ IADD3 R2, R0.reuse, -0x1, RZ ; /* 0xffffffff00027810 */
/* 0x040fe20007ffe0ff */
/*0050*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0060*/ LOP3.LUT R0, R0, 0x3, RZ, 0xc0, !PT ; /* 0x0000000300007812 */
/* 0x000fe200078ec0ff */
/*0070*/ ULDC.64 UR12, c[0x0][0x118] ; /* 0x00004600000c7ab9 */
/* 0x000fe20000000a00 */
/*0080*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fda0003f06070 */
/*0090*/ @!P0 BRA 0xb80 ; /* 0x00000ae000008947 */
/* 0x000fea0003800000 */
/*00a0*/ IADD3 R8, -R0, c[0x0][0x178], RZ ; /* 0x00005e0000087a10 */
/* 0x000fe20007ffe1ff */
/*00b0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*00c0*/ MOV R4, c[0x0][0x170] ; /* 0x00005c0000047a02 */
/* 0x000fe20000000f00 */
/*00d0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff057624 */
/* 0x000fe200078e00ff */
/*00e0*/ ISETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f04270 */
/*00f0*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff037624 */
/* 0x000fe200078e00ff */
/*0100*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */
/* 0x000fe20000000f00 */
/*0110*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff077624 */
/* 0x000fe200078e00ff */
/*0120*/ MOV R6, c[0x0][0x160] ; /* 0x0000580000067a02 */
/* 0x000fd20000000f00 */
/*0130*/ @!P0 BRA 0x990 ; /* 0x0000085000008947 */
/* 0x000fea0003800000 */
/*0140*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f24270 */
/*0150*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0160*/ @!P1 BRA 0x670 ; /* 0x0000050000009947 */
/* 0x000fea0003800000 */
/*0170*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0180*/ LDG.E R9, [R2.64] ; /* 0x0000000c02097981 */
/* 0x000ea8000c1e1900 */
/*0190*/ LDG.E R10, [R6.64] ; /* 0x0000000c060a7981 */
/* 0x000ea4000c1e1900 */
/*01a0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x004fca0000000000 */
/*01b0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001e8000c10190c */
/*01c0*/ LDG.E R10, [R2.64+0x4] ; /* 0x0000040c020a7981 */
/* 0x000ea8000c1e1900 */
/*01d0*/ LDG.E R11, [R6.64+0x4] ; /* 0x0000040c060b7981 */
/* 0x000ea4000c1e1900 */
/*01e0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x004fca0000000000 */
/*01f0*/ STG.E [R4.64+0x4], R11 ; /* 0x0000040b04007986 */
/* 0x0003e8000c10190c */
/*0200*/ LDG.E R10, [R2.64+0x8] ; /* 0x0000080c020a7981 */
/* 0x000ea8000c1e1900 */
/*0210*/ LDG.E R13, [R6.64+0x8] ; /* 0x0000080c060d7981 */
/* 0x000ea4000c1e1900 */
/*0220*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x004fca0000000000 */
/*0230*/ STG.E [R4.64+0x8], R13 ; /* 0x0000080d04007986 */
/* 0x0005e8000c10190c */
/*0240*/ LDG.E R10, [R2.64+0xc] ; /* 0x00000c0c020a7981 */
/* 0x000ee8000c1e1900 */
/*0250*/ LDG.E R15, [R6.64+0xc] ; /* 0x00000c0c060f7981 */
/* 0x000ee4000c1e1900 */
/*0260*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x008fca0000000000 */
/*0270*/ STG.E [R4.64+0xc], R15 ; /* 0x00000c0f04007986 */
/* 0x0007e8000c10190c */
/*0280*/ LDG.E R9, [R2.64+0x10] ; /* 0x0000100c02097981 */
/* 0x001f28000c1e1900 */
/*0290*/ LDG.E R10, [R6.64+0x10] ; /* 0x0000100c060a7981 */
/* 0x000f24000c1e1900 */
/*02a0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x010fca0000000000 */
/*02b0*/ STG.E [R4.64+0x10], R9 ; /* 0x0000100904007986 */
/* 0x0001e8000c10190c */
/*02c0*/ LDG.E R10, [R2.64+0x14] ; /* 0x0000140c020a7981 */
/* 0x000f28000c1e1900 */
/*02d0*/ LDG.E R11, [R6.64+0x14] ; /* 0x0000140c060b7981 */
/* 0x002f24000c1e1900 */
/*02e0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x010fca0000000000 */
/*02f0*/ STG.E [R4.64+0x14], R11 ; /* 0x0000140b04007986 */
/* 0x0003e8000c10190c */
/*0300*/ LDG.E R10, [R2.64+0x18] ; /* 0x0000180c020a7981 */
/* 0x000f28000c1e1900 */
/*0310*/ LDG.E R13, [R6.64+0x18] ; /* 0x0000180c060d7981 */
/* 0x004f24000c1e1900 */
/*0320*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x010fca0000000000 */
/*0330*/ STG.E [R4.64+0x18], R13 ; /* 0x0000180d04007986 */
/* 0x0005e8000c10190c */
/*0340*/ LDG.E R10, [R2.64+0x1c] ; /* 0x00001c0c020a7981 */
/* 0x000f28000c1e1900 */
/*0350*/ LDG.E R15, [R6.64+0x1c] ; /* 0x00001c0c060f7981 */
/* 0x008f24000c1e1900 */
/*0360*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x010fca0000000000 */
/*0370*/ STG.E [R4.64+0x1c], R15 ; /* 0x00001c0f04007986 */
/* 0x0007e8000c10190c */
/*0380*/ LDG.E R9, [R2.64+0x20] ; /* 0x0000200c02097981 */
/* 0x001f28000c1e1900 */
/*0390*/ LDG.E R10, [R6.64+0x20] ; /* 0x0000200c060a7981 */
/* 0x000f24000c1e1900 */
/*03a0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x010fca0000000000 */
/*03b0*/ STG.E [R4.64+0x20], R9 ; /* 0x0000200904007986 */
/* 0x0001e8000c10190c */
/*03c0*/ LDG.E R10, [R2.64+0x24] ; /* 0x0000240c020a7981 */
/* 0x000f28000c1e1900 */
/*03d0*/ LDG.E R11, [R6.64+0x24] ; /* 0x0000240c060b7981 */
/* 0x002f24000c1e1900 */
/*03e0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x010fca0000000000 */
/*03f0*/ STG.E [R4.64+0x24], R11 ; /* 0x0000240b04007986 */
/* 0x0003e8000c10190c */
/*0400*/ LDG.E R10, [R2.64+0x28] ; /* 0x0000280c020a7981 */
/* 0x000f28000c1e1900 */
/*0410*/ LDG.E R13, [R6.64+0x28] ; /* 0x0000280c060d7981 */
/* 0x004f24000c1e1900 */
/*0420*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x010fca0000000000 */
/*0430*/ STG.E [R4.64+0x28], R13 ; /* 0x0000280d04007986 */
/* 0x0005e8000c10190c */
/*0440*/ LDG.E R10, [R2.64+0x2c] ; /* 0x00002c0c020a7981 */
/* 0x000f28000c1e1900 */
/*0450*/ LDG.E R15, [R6.64+0x2c] ; /* 0x00002c0c060f7981 */
/* 0x008f24000c1e1900 */
/*0460*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x010fca0000000000 */
/*0470*/ STG.E [R4.64+0x2c], R15 ; /* 0x00002c0f04007986 */
/* 0x0007e8000c10190c */
/*0480*/ LDG.E R9, [R2.64+0x30] ; /* 0x0000300c02097981 */
/* 0x001f28000c1e1900 */
/*0490*/ LDG.E R10, [R6.64+0x30] ; /* 0x0000300c060a7981 */
/* 0x000f24000c1e1900 */
/*04a0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x010fca0000000000 */
/*04b0*/ STG.E [R4.64+0x30], R9 ; /* 0x0000300904007986 */
/* 0x000fe8000c10190c */
/*04c0*/ LDG.E R10, [R2.64+0x34] ; /* 0x0000340c020a7981 */
/* 0x000f28000c1e1900 */
/*04d0*/ LDG.E R11, [R6.64+0x34] ; /* 0x0000340c060b7981 */
/* 0x002f24000c1e1900 */
/*04e0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x010fca0000000000 */
/*04f0*/ STG.E [R4.64+0x34], R11 ; /* 0x0000340b04007986 */
/* 0x0001e8000c10190c */
/*0500*/ LDG.E R10, [R2.64+0x38] ; /* 0x0000380c020a7981 */
/* 0x000f28000c1e1900 */
/*0510*/ LDG.E R13, [R6.64+0x38] ; /* 0x0000380c060d7981 */
/* 0x004f22000c1e1900 */
/*0520*/ IADD3 R12, P1, R2, 0x40, RZ ; /* 0x00000040020c7810 */
/* 0x000fe40007f3e0ff */
/*0530*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */
/* 0x000fe20007ffe0ff */
/*0540*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x010fca0000000000 */
/*0550*/ STG.E [R4.64+0x38], R13 ; /* 0x0000380d04007986 */
/* 0x000fe8000c10190c */
/*0560*/ LDG.E R10, [R2.64+0x3c] ; /* 0x00003c0c020a7981 */
/* 0x0002a8000c1e1900 */
/*0570*/ LDG.E R15, [R6.64+0x3c] ; /* 0x00003c0c060f7981 */
/* 0x0086a2000c1e1900 */
/*0580*/ IADD3.X R11, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff0b7210 */
/* 0x001fe20000ffe4ff */
/*0590*/ UIADD3 UR4, UR4, 0x10, URZ ; /* 0x0000001004047890 */
/* 0x000fe2000fffe03f */
/*05a0*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fc40003f24270 */
/*05b0*/ IADD3 R9, P3, R4, 0x40, RZ ; /* 0x0000004004097810 */
/* 0x000fe20007f7e0ff */
/*05c0*/ IMAD.MOV.U32 R2, RZ, RZ, R12 ; /* 0x000000ffff027224 */
/* 0x002fe200078e000c */
/*05d0*/ IADD3 R14, P2, R6, 0x40, RZ ; /* 0x00000040060e7810 */
/* 0x000fe40007f5e0ff */
/*05e0*/ MOV R3, R11 ; /* 0x0000000b00037202 */
/* 0x000fc60000000f00 */
/*05f0*/ IMAD.X R7, RZ, RZ, R7, P2 ; /* 0x000000ffff077224 */
/* 0x008fe400010e0607 */
/*0600*/ IMAD.MOV.U32 R6, RZ, RZ, R14 ; /* 0x000000ffff067224 */
/* 0x000fe400078e000e */
/*0610*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x004fe20000000000 */
/*0620*/ IADD3.X R10, RZ, R5, RZ, P3, !PT ; /* 0x00000005ff0a7210 */
/* 0x000fc80001ffe4ff */
/*0630*/ STG.E [R4.64+0x3c], R15 ; /* 0x00003c0f04007986 */
/* 0x0001e4000c10190c */
/*0640*/ MOV R4, R9 ; /* 0x0000000900047202 */
/* 0x001fe20000000f00 */
/*0650*/ IMAD.MOV.U32 R5, RZ, RZ, R10 ; /* 0x000000ffff057224 */
/* 0x000fe200078e000a */
/*0660*/ @P1 BRA 0x180 ; /* 0xfffffb1000001947 */
/* 0x000fea000383ffff */
/*0670*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */
/* 0x000fda0003f24270 */
/*0680*/ @!P1 BRA 0x970 ; /* 0x000002e000009947 */
/* 0x000fea0003800000 */
/*0690*/ LDG.E R9, [R2.64] ; /* 0x0000000c02097981 */
/* 0x000ea8000c1e1900 */
/*06a0*/ LDG.E R10, [R6.64] ; /* 0x0000000c060a7981 */
/* 0x000ea4000c1e1900 */
/*06b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x004fca0000000000 */
/*06c0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001e8000c10190c */
/*06d0*/ LDG.E R10, [R2.64+0x4] ; /* 0x0000040c020a7981 */
/* 0x000ea8000c1e1900 */
/*06e0*/ LDG.E R11, [R6.64+0x4] ; /* 0x0000040c060b7981 */
/* 0x000ea4000c1e1900 */
/*06f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x004fca0000000000 */
/*0700*/ STG.E [R4.64+0x4], R11 ; /* 0x0000040b04007986 */
/* 0x0003e8000c10190c */
/*0710*/ LDG.E R10, [R2.64+0x8] ; /* 0x0000080c020a7981 */
/* 0x000ea8000c1e1900 */
/*0720*/ LDG.E R13, [R6.64+0x8] ; /* 0x0000080c060d7981 */
/* 0x000ea4000c1e1900 */
/*0730*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x004fca0000000000 */
/*0740*/ STG.E [R4.64+0x8], R13 ; /* 0x0000080d04007986 */
/* 0x0005e8000c10190c */
/*0750*/ LDG.E R10, [R2.64+0xc] ; /* 0x00000c0c020a7981 */
/* 0x000ee8000c1e1900 */
/*0760*/ LDG.E R15, [R6.64+0xc] ; /* 0x00000c0c060f7981 */
/* 0x000ee4000c1e1900 */
/*0770*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x008fca0000000000 */
/*0780*/ STG.E [R4.64+0xc], R15 ; /* 0x00000c0f04007986 */
/* 0x0007e8000c10190c */
/*0790*/ LDG.E R9, [R2.64+0x10] ; /* 0x0000100c02097981 */
/* 0x001f28000c1e1900 */
/*07a0*/ LDG.E R10, [R6.64+0x10] ; /* 0x0000100c060a7981 */
/* 0x000f24000c1e1900 */
/*07b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x010fca0000000000 */
/*07c0*/ STG.E [R4.64+0x10], R9 ; /* 0x0000100904007986 */
/* 0x000fe8000c10190c */
/*07d0*/ LDG.E R10, [R2.64+0x14] ; /* 0x0000140c020a7981 */
/* 0x000f28000c1e1900 */
/*07e0*/ LDG.E R11, [R6.64+0x14] ; /* 0x0000140c060b7981 */
/* 0x002f24000c1e1900 */
/*07f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x010fca0000000000 */
/*0800*/ STG.E [R4.64+0x14], R11 ; /* 0x0000140b04007986 */
/* 0x0001e8000c10190c */
/*0810*/ LDG.E R10, [R2.64+0x18] ; /* 0x0000180c020a7981 */
/* 0x000f28000c1e1900 */
/*0820*/ LDG.E R13, [R6.64+0x18] ; /* 0x0000180c060d7981 */
/* 0x004f24000c1e1900 */
/*0830*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x010fca0000000000 */
/*0840*/ STG.E [R4.64+0x18], R13 ; /* 0x0000180d04007986 */
/* 0x000fe8000c10190c */
/*0850*/ LDG.E R10, [R2.64+0x1c] ; /* 0x00001c0c020a7981 */
/* 0x0002a8000c1e1900 */
/*0860*/ LDG.E R15, [R6.64+0x1c] ; /* 0x00001c0c060f7981 */
/* 0x0086a2000c1e1900 */
/*0870*/ IADD3 R11, P2, R2, 0x20, RZ ; /* 0x00000020020b7810 */
/* 0x001fe20007f5e0ff */
/*0880*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */
/* 0x000fe2000fffe03f */
/*0890*/ IADD3 R9, P3, R4, 0x20, RZ ; /* 0x0000002004097810 */
/* 0x000fc40007f7e0ff */
/*08a0*/ IADD3 R14, P1, R6, 0x20, RZ ; /* 0x00000020060e7810 */
/* 0x000fe20007f3e0ff */
/*08b0*/ IMAD.X R12, RZ, RZ, R3, P2 ; /* 0x000000ffff0c7224 */
/* 0x000fe200010e0603 */
/*08c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0e170 */
/*08d0*/ IMAD.MOV.U32 R2, RZ, RZ, R11 ; /* 0x000000ffff027224 */
/* 0x002fe200078e000b */
/*08e0*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */
/* 0x000fe20007ffe0ff */
/*08f0*/ IMAD.MOV.U32 R6, RZ, RZ, R14 ; /* 0x000000ffff067224 */
/* 0x008fe200078e000e */
/*0900*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe40000ffe4ff */
/*0910*/ MOV R3, R12 ; /* 0x0000000c00037202 */
/* 0x000fe20000000f00 */
/*0920*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x004fe20000000000 */
/*0930*/ IADD3.X R10, RZ, R5, RZ, P3, !PT ; /* 0x00000005ff0a7210 */
/* 0x000fc80001ffe4ff */
/*0940*/ STG.E [R4.64+0x1c], R15 ; /* 0x00001c0f04007986 */
/* 0x0001e4000c10190c */
/*0950*/ MOV R4, R9 ; /* 0x0000000900047202 */
/* 0x001fe20000000f00 */
/*0960*/ IMAD.MOV.U32 R5, RZ, RZ, R10 ; /* 0x000000ffff057224 */
/* 0x000fe400078e000a */
/*0970*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */
/* 0x000fda0000705670 */
/*0980*/ @!P0 BRA 0xb80 ; /* 0x000001f000008947 */
/* 0x000fea0003800000 */
/*0990*/ LDG.E R9, [R2.64] ; /* 0x0000000c02097981 */
/* 0x000ea8000c1e1900 */
/*09a0*/ LDG.E R10, [R6.64] ; /* 0x0000000c060a7981 */
/* 0x000ea4000c1e1900 */
/*09b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x004fca0000000000 */
/*09c0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x000fe8000c10190c */
/*09d0*/ LDG.E R10, [R2.64+0x4] ; /* 0x0000040c020a7981 */
/* 0x000ea8000c1e1900 */
/*09e0*/ LDG.E R11, [R6.64+0x4] ; /* 0x0000040c060b7981 */
/* 0x000ea4000c1e1900 */
/*09f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x004fca0000000000 */
/*0a00*/ STG.E [R4.64+0x4], R11 ; /* 0x0000040b04007986 */
/* 0x0001e8000c10190c */
/*0a10*/ LDG.E R10, [R2.64+0x8] ; /* 0x0000080c020a7981 */
/* 0x000ea8000c1e1900 */
/*0a20*/ LDG.E R13, [R6.64+0x8] ; /* 0x0000080c060d7981 */
/* 0x000ea2000c1e1900 */
/*0a30*/ IADD3 R12, P0, R2, 0x10, RZ ; /* 0x00000010020c7810 */
/* 0x000fe40007f1e0ff */
/*0a40*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */
/* 0x000fe20007ffe0ff */
/*0a50*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x004fca0000000000 */
/*0a60*/ STG.E [R4.64+0x8], R13 ; /* 0x0000080d04007986 */
/* 0x000fe8000c10190c */
/*0a70*/ LDG.E R10, [R2.64+0xc] ; /* 0x00000c0c020a7981 */
/* 0x0002a8000c1e1900 */
/*0a80*/ LDG.E R15, [R6.64+0xc] ; /* 0x00000c0c060f7981 */
/* 0x0006a2000c1e1900 */
/*0a90*/ IADD3.X R11, RZ, R3, RZ, P0, !PT ; /* 0x00000003ff0b7210 */
/* 0x001fe200007fe4ff */
/*0aa0*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe2000fffe03f */
/*0ab0*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fc40003f05270 */
/*0ac0*/ IADD3 R9, P2, R4, 0x10, RZ ; /* 0x0000001004097810 */
/* 0x000fe20007f5e0ff */
/*0ad0*/ IMAD.MOV.U32 R2, RZ, RZ, R12 ; /* 0x000000ffff027224 */
/* 0x002fe200078e000c */
/*0ae0*/ IADD3 R14, P1, R6, 0x10, RZ ; /* 0x00000010060e7810 */
/* 0x000fe40007f3e0ff */
/*0af0*/ MOV R3, R11 ; /* 0x0000000b00037202 */
/* 0x000fc60000000f00 */
/*0b00*/ IMAD.X R7, RZ, RZ, R7, P1 ; /* 0x000000ffff077224 */
/* 0x008fe400008e0607 */
/*0b10*/ IMAD.MOV.U32 R6, RZ, RZ, R14 ; /* 0x000000ffff067224 */
/* 0x000fe400078e000e */
/*0b20*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x004fe20000000000 */
/*0b30*/ IADD3.X R10, RZ, R5, RZ, P2, !PT ; /* 0x00000005ff0a7210 */
/* 0x000fc800017fe4ff */
/*0b40*/ STG.E [R4.64+0xc], R15 ; /* 0x00000c0f04007986 */
/* 0x0001e4000c10190c */
/*0b50*/ MOV R4, R9 ; /* 0x0000000900047202 */
/* 0x001fe20000000f00 */
/*0b60*/ IMAD.MOV.U32 R5, RZ, RZ, R10 ; /* 0x000000ffff057224 */
/* 0x000fe200078e000a */
/*0b70*/ @P0 BRA 0x990 ; /* 0xfffffe1000000947 */
/* 0x000fea000383ffff */
/*0b80*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*0b90*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0ba0*/ UMOV UR5, 0x4 ; /* 0x0000000400057882 */
/* 0x000fe40000000000 */
/*0bb0*/ ULDC.64 UR6, c[0x0][0x170] ; /* 0x00005c0000067ab9 */
/* 0x000fe40000000a00 */
/*0bc0*/ ULDC.64 UR8, c[0x0][0x168] ; /* 0x00005a0000087ab9 */
/* 0x000fe40000000a00 */
/*0bd0*/ ULDC.64 UR10, c[0x0][0x160] ; /* 0x00005800000a7ab9 */
/* 0x000fe40000000a00 */
/*0be0*/ UIMAD.WIDE UR6, UR4, UR5, UR6 ; /* 0x00000005040672a5 */
/* 0x000fe4000f8e0206 */
/*0bf0*/ UIMAD.WIDE UR8, UR4, UR5, UR8 ; /* 0x00000005040872a5 */
/* 0x000fc4000f8e0208 */
/*0c00*/ UIMAD.WIDE UR4, UR4, UR5, UR10 ; /* 0x00000005040472a5 */
/* 0x000fca000f8e020a */
/*0c10*/ MOV R2, UR8 ; /* 0x0000000800027c02 */
/* 0x000fe20008000f00 */
/*0c20*/ IMAD.U32 R5, RZ, RZ, UR5 ; /* 0x00000005ff057e24 */
/* 0x000fe2000f8e00ff */
/*0c30*/ MOV R4, UR4 ; /* 0x0000000400047c02 */
/* 0x000fe20008000f00 */
/*0c40*/ IMAD.U32 R3, RZ, RZ, UR9 ; /* 0x00000009ff037e24 */
/* 0x000fc8000f8e00ff */
/*0c50*/ LDG.E R5, [R4.64] ; /* 0x0000000c04057981 */
/* 0x000ea8000c1e1900 */
/*0c60*/ LDG.E R2, [R2.64] ; /* 0x0000000c02027981 */
/* 0x000ea2000c1e1900 */
/*0c70*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */
/* 0x000fc80007ffe0ff */
/*0c80*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f05270 */
/*0c90*/ UIADD3 UR8, UP1, UR8, 0x4, URZ ; /* 0x0000000408087890 */
/* 0x000fe2000ff3e03f */
/*0ca0*/ MOV R6, UR6 ; /* 0x0000000600067c02 */
/* 0x001fe20008000f00 */
/*0cb0*/ UIADD3 UR6, UP0, UR6, 0x4, URZ ; /* 0x0000000406067890 */
/* 0x000fe2000ff1e03f */
/*0cc0*/ MOV R7, UR7 ; /* 0x0000000700077c02 */
/* 0x000fe20008000f00 */
/*0cd0*/ UIADD3 UR4, UP2, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe4000ff5e03f */
/*0ce0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe400087fe43f */
/*0cf0*/ UIADD3.X UR9, URZ, UR9, URZ, UP1, !UPT ; /* 0x000000093f097290 */
/* 0x000fe40008ffe43f */
/*0d00*/ UIADD3.X UR5, URZ, UR5, URZ, UP2, !UPT ; /* 0x000000053f057290 */
/* 0x000fe200097fe43f */
/*0d10*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*0d20*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e2000c10190c */
/*0d30*/ @P0 BRA 0xc10 ; /* 0xfffffed000000947 */
/* 0x000fea000383ffff */
/*0d40*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0d50*/ BRA 0xd50; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0d60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0da0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0db0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0dc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0dd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0de0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0df0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#define N 10000
float *a_cpu, *b_cpu, *c_cpu;
float *a_gpu, *b_gpu, *c_gpu;
__global__ void add(float *a_gpu, float *b_gpu, float *c_gpu, int n){
for(int i = 0; i < n; i++){
c_gpu[i] = a_gpu[i] + b_gpu[i];
}
}
int main() {
a_cpu = (float*)malloc(sizeof(float)*N);
b_cpu = (float*)malloc(sizeof(float)*N);
c_cpu = (float*)malloc(sizeof(float)*N);
cudaMalloc((float**)&a_gpu, sizeof(float) * N);
cudaMalloc((float**)&b_gpu, sizeof(float) * N);
cudaMalloc((float**)&c_gpu, sizeof(float) * N);
for (int i = 1; i <= N; i++){
a_cpu[i] = -i;
b_cpu[i] = i*2;
c_cpu[i] = a_cpu[i] + b_cpu[i];
}
cudaMemcpy(a_gpu, a_cpu, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b_cpu, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(c_gpu, c_cpu, sizeof(float) * N, cudaMemcpyHostToDevice);
add<<<1,10>>>(a_gpu, b_gpu, c_gpu, N);
cudaMemcpy(c_cpu, c_gpu, sizeof(float) * N, cudaMemcpyDeviceToHost);
for (int i = 1; i <= N; i++){
printf("%.0f\n", c_cpu[i]);
}
cudaFree(a_gpu);
cudaFree(b_gpu);
cudaFree(c_gpu);
free(a_cpu);
free(b_cpu);
free(c_cpu);
getchar();
return 0;
} | .file "tmpxft_0018e52c_00000000-6_othermain.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z3addPfS_S_iPfS_S_i
.type _Z27__device_stub__Z3addPfS_S_iPfS_S_i, @function
_Z27__device_stub__Z3addPfS_S_iPfS_S_i:
.LFB3694:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z27__device_stub__Z3addPfS_S_iPfS_S_i, .-_Z27__device_stub__Z3addPfS_S_iPfS_S_i
.globl _Z3addPfS_S_i
.type _Z3addPfS_S_i, @function
_Z3addPfS_S_i:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3addPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3addPfS_S_i, .-_Z3addPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%.0f\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $40, %rsp
.cfi_def_cfa_offset 64
movl $40000, %edi
call malloc@PLT
movq %rax, a_cpu(%rip)
movl $40000, %edi
call malloc@PLT
movq %rax, b_cpu(%rip)
movl $40000, %edi
call malloc@PLT
movq %rax, c_cpu(%rip)
movl $40000, %esi
leaq a_gpu(%rip), %rdi
call cudaMalloc@PLT
movl $40000, %esi
leaq b_gpu(%rip), %rdi
call cudaMalloc@PLT
movl $40000, %esi
leaq c_gpu(%rip), %rdi
call cudaMalloc@PLT
movl $4, %eax
movl $1, %edx
.L12:
movl %edx, %ecx
negl %ecx
pxor %xmm0, %xmm0
cvtsi2ssl %ecx, %xmm0
movq a_cpu(%rip), %rcx
movss %xmm0, (%rcx,%rax)
leal (%rdx,%rdx), %ecx
pxor %xmm0, %xmm0
cvtsi2ssl %ecx, %xmm0
movq b_cpu(%rip), %rcx
movss %xmm0, (%rcx,%rax)
movq a_cpu(%rip), %rcx
movss (%rcx,%rax), %xmm0
movq b_cpu(%rip), %rcx
addss (%rcx,%rax), %xmm0
movq c_cpu(%rip), %rcx
movss %xmm0, (%rcx,%rax)
addl $1, %edx
addq $4, %rax
cmpl $10001, %edx
jne .L12
movl $1, %ecx
movl $40000, %edx
movq a_cpu(%rip), %rsi
movq a_gpu(%rip), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $40000, %edx
movq b_cpu(%rip), %rsi
movq b_gpu(%rip), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $40000, %edx
movq c_cpu(%rip), %rsi
movq c_gpu(%rip), %rdi
call cudaMemcpy@PLT
movl $10, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L13:
movl $2, %ecx
movl $40000, %edx
movq c_gpu(%rip), %rsi
movq c_cpu(%rip), %rdi
call cudaMemcpy@PLT
movl $4, %ebx
leaq .LC0(%rip), %rbp
.L14:
movq c_cpu(%rip), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq $40004, %rbx
jne .L14
movq a_gpu(%rip), %rdi
call cudaFree@PLT
movq b_gpu(%rip), %rdi
call cudaFree@PLT
movq c_gpu(%rip), %rdi
call cudaFree@PLT
movq a_cpu(%rip), %rdi
call free@PLT
movq b_cpu(%rip), %rdi
call free@PLT
movq c_cpu(%rip), %rdi
call free@PLT
movq stdin(%rip), %rdi
call getc@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
movl $10000, %ecx
movq c_gpu(%rip), %rdx
movq b_gpu(%rip), %rsi
movq a_gpu(%rip), %rdi
call _Z27__device_stub__Z3addPfS_S_iPfS_S_i
jmp .L13
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z3addPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl c_gpu
.bss
.align 8
.type c_gpu, @object
.size c_gpu, 8
c_gpu:
.zero 8
.globl b_gpu
.align 8
.type b_gpu, @object
.size b_gpu, 8
b_gpu:
.zero 8
.globl a_gpu
.align 8
.type a_gpu, @object
.size a_gpu, 8
a_gpu:
.zero 8
.globl c_cpu
.align 8
.type c_cpu, @object
.size c_cpu, 8
c_cpu:
.zero 8
.globl b_cpu
.align 8
.type b_cpu, @object
.size b_cpu, 8
b_cpu:
.zero 8
.globl a_cpu
.align 8
.type a_cpu, @object
.size a_cpu, 8
a_cpu:
.zero 8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#define N 10000
float *a_cpu, *b_cpu, *c_cpu;
float *a_gpu, *b_gpu, *c_gpu;
__global__ void add(float *a_gpu, float *b_gpu, float *c_gpu, int n){
for(int i = 0; i < n; i++){
c_gpu[i] = a_gpu[i] + b_gpu[i];
}
}
int main() {
a_cpu = (float*)malloc(sizeof(float)*N);
b_cpu = (float*)malloc(sizeof(float)*N);
c_cpu = (float*)malloc(sizeof(float)*N);
cudaMalloc((float**)&a_gpu, sizeof(float) * N);
cudaMalloc((float**)&b_gpu, sizeof(float) * N);
cudaMalloc((float**)&c_gpu, sizeof(float) * N);
for (int i = 1; i <= N; i++){
a_cpu[i] = -i;
b_cpu[i] = i*2;
c_cpu[i] = a_cpu[i] + b_cpu[i];
}
cudaMemcpy(a_gpu, a_cpu, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b_cpu, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(c_gpu, c_cpu, sizeof(float) * N, cudaMemcpyHostToDevice);
add<<<1,10>>>(a_gpu, b_gpu, c_gpu, N);
cudaMemcpy(c_cpu, c_gpu, sizeof(float) * N, cudaMemcpyDeviceToHost);
for (int i = 1; i <= N; i++){
printf("%.0f\n", c_cpu[i]);
}
cudaFree(a_gpu);
cudaFree(b_gpu);
cudaFree(c_gpu);
free(a_cpu);
free(b_cpu);
free(c_cpu);
getchar();
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#define N 10000
float *a_cpu, *b_cpu, *c_cpu;
float *a_gpu, *b_gpu, *c_gpu;
__global__ void add(float *a_gpu, float *b_gpu, float *c_gpu, int n){
for(int i = 0; i < n; i++){
c_gpu[i] = a_gpu[i] + b_gpu[i];
}
}
int main() {
a_cpu = (float*)malloc(sizeof(float)*N);
b_cpu = (float*)malloc(sizeof(float)*N);
c_cpu = (float*)malloc(sizeof(float)*N);
hipMalloc((float**)&a_gpu, sizeof(float) * N);
hipMalloc((float**)&b_gpu, sizeof(float) * N);
hipMalloc((float**)&c_gpu, sizeof(float) * N);
for (int i = 1; i <= N; i++){
a_cpu[i] = -i;
b_cpu[i] = i*2;
c_cpu[i] = a_cpu[i] + b_cpu[i];
}
hipMemcpy(a_gpu, a_cpu, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(b_gpu, b_cpu, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(c_gpu, c_cpu, sizeof(float) * N, hipMemcpyHostToDevice);
add<<<1,10>>>(a_gpu, b_gpu, c_gpu, N);
hipMemcpy(c_cpu, c_gpu, sizeof(float) * N, hipMemcpyDeviceToHost);
for (int i = 1; i <= N; i++){
printf("%.0f\n", c_cpu[i]);
}
hipFree(a_gpu);
hipFree(b_gpu);
hipFree(c_gpu);
free(a_cpu);
free(b_cpu);
free(c_cpu);
getchar();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#define N 10000
float *a_cpu, *b_cpu, *c_cpu;
float *a_gpu, *b_gpu, *c_gpu;
__global__ void add(float *a_gpu, float *b_gpu, float *c_gpu, int n){
for(int i = 0; i < n; i++){
c_gpu[i] = a_gpu[i] + b_gpu[i];
}
}
int main() {
a_cpu = (float*)malloc(sizeof(float)*N);
b_cpu = (float*)malloc(sizeof(float)*N);
c_cpu = (float*)malloc(sizeof(float)*N);
hipMalloc((float**)&a_gpu, sizeof(float) * N);
hipMalloc((float**)&b_gpu, sizeof(float) * N);
hipMalloc((float**)&c_gpu, sizeof(float) * N);
for (int i = 1; i <= N; i++){
a_cpu[i] = -i;
b_cpu[i] = i*2;
c_cpu[i] = a_cpu[i] + b_cpu[i];
}
hipMemcpy(a_gpu, a_cpu, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(b_gpu, b_cpu, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(c_gpu, c_cpu, sizeof(float) * N, hipMemcpyHostToDevice);
add<<<1,10>>>(a_gpu, b_gpu, c_gpu, N);
hipMemcpy(c_cpu, c_gpu, sizeof(float) * N, hipMemcpyDeviceToHost);
for (int i = 1; i <= N; i++){
printf("%.0f\n", c_cpu[i]);
}
hipFree(a_gpu);
hipFree(b_gpu);
hipFree(c_gpu);
free(a_cpu);
free(b_cpu);
free(c_cpu);
getchar();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPfS_S_i
.globl _Z3addPfS_S_i
.p2align 8
.type _Z3addPfS_S_i,@function
_Z3addPfS_S_i:
s_load_b32 s2, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_3
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_mov_b32_e32 v0, 0
.LBB0_2:
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_add_i32 s2, s2, -1
s_add_u32 s4, s4, 4
s_addc_u32 s5, s5, 0
s_add_u32 s6, s6, 4
s_addc_u32 s7, s7, 0
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_add_u32 s0, s0, 4
s_addc_u32 s1, s1, 0
s_cmp_eq_u32 s2, 0
s_cbranch_scc0 .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPfS_S_i, .Lfunc_end0-_Z3addPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z3addPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#define N 10000
float *a_cpu, *b_cpu, *c_cpu;
float *a_gpu, *b_gpu, *c_gpu;
__global__ void add(float *a_gpu, float *b_gpu, float *c_gpu, int n){
for(int i = 0; i < n; i++){
c_gpu[i] = a_gpu[i] + b_gpu[i];
}
}
int main() {
a_cpu = (float*)malloc(sizeof(float)*N);
b_cpu = (float*)malloc(sizeof(float)*N);
c_cpu = (float*)malloc(sizeof(float)*N);
hipMalloc((float**)&a_gpu, sizeof(float) * N);
hipMalloc((float**)&b_gpu, sizeof(float) * N);
hipMalloc((float**)&c_gpu, sizeof(float) * N);
for (int i = 1; i <= N; i++){
a_cpu[i] = -i;
b_cpu[i] = i*2;
c_cpu[i] = a_cpu[i] + b_cpu[i];
}
hipMemcpy(a_gpu, a_cpu, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(b_gpu, b_cpu, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(c_gpu, c_cpu, sizeof(float) * N, hipMemcpyHostToDevice);
add<<<1,10>>>(a_gpu, b_gpu, c_gpu, N);
hipMemcpy(c_cpu, c_gpu, sizeof(float) * N, hipMemcpyDeviceToHost);
for (int i = 1; i <= N; i++){
printf("%.0f\n", c_cpu[i]);
}
hipFree(a_gpu);
hipFree(b_gpu);
hipFree(c_gpu);
free(a_cpu);
free(b_cpu);
free(c_cpu);
getchar();
return 0;
} | .text
.file "othermain.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addPfS_S_i # -- Begin function _Z18__device_stub__addPfS_S_i
.p2align 4, 0x90
.type _Z18__device_stub__addPfS_S_i,@function
_Z18__device_stub__addPfS_S_i: # @_Z18__device_stub__addPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z18__device_stub__addPfS_S_i, .Lfunc_end0-_Z18__device_stub__addPfS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $112, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -16
movl $40000, %edi # imm = 0x9C40
callq malloc
movq %rax, a_cpu(%rip)
movl $40000, %edi # imm = 0x9C40
callq malloc
movq %rax, b_cpu(%rip)
movl $40000, %edi # imm = 0x9C40
callq malloc
movq %rax, c_cpu(%rip)
movl $a_gpu, %edi
movl $40000, %esi # imm = 0x9C40
callq hipMalloc
movl $b_gpu, %edi
movl $40000, %esi # imm = 0x9C40
callq hipMalloc
movl $c_gpu, %edi
movl $40000, %esi # imm = 0x9C40
callq hipMalloc
movq a_cpu(%rip), %rax
movq b_cpu(%rip), %rcx
movl $-1, %edx
xorl %edi, %edi
movq c_cpu(%rip), %rsi
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %edx, %xmm0
leaq 2(%rdi), %r8
xorps %xmm1, %xmm1
cvtsi2ss %r8d, %xmm1
movss %xmm0, 4(%rax,%rdi,2)
movss %xmm1, 4(%rcx,%rdi,2)
addss 4(%rax,%rdi,2), %xmm1
movss %xmm1, 4(%rsi,%rdi,2)
decl %edx
movq %r8, %rdi
cmpq $20000, %r8 # imm = 0x4E20
jne .LBB1_1
# %bb.2:
movq a_gpu(%rip), %rdi
movq a_cpu(%rip), %rsi
movl $40000, %edx # imm = 0x9C40
movl $1, %ecx
callq hipMemcpy
movq b_gpu(%rip), %rdi
movq b_cpu(%rip), %rsi
movl $40000, %edx # imm = 0x9C40
movl $1, %ecx
callq hipMemcpy
movq c_gpu(%rip), %rdi
movq c_cpu(%rip), %rsi
movl $40000, %edx # imm = 0x9C40
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 9(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq a_gpu(%rip), %rax
movq b_gpu(%rip), %rcx
movq c_gpu(%rip), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 56(%rsp)
movl $10000, 4(%rsp) # imm = 0x2710
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq c_cpu(%rip), %rdi
movq c_gpu(%rip), %rsi
movl $40000, %edx # imm = 0x9C40
movl $2, %ecx
callq hipMemcpy
movl $1, %ebx
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movq c_cpu(%rip), %rax
movss (%rax,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %rbx
cmpq $10001, %rbx # imm = 0x2711
jne .LBB1_5
# %bb.6:
movq a_gpu(%rip), %rdi
callq hipFree
movq b_gpu(%rip), %rdi
callq hipFree
movq c_gpu(%rip), %rdi
callq hipFree
movq a_cpu(%rip), %rdi
callq free
movq b_cpu(%rip), %rdi
callq free
movq c_cpu(%rip), %rdi
callq free
movq stdin(%rip), %rdi
callq getc
xorl %eax, %eax
addq $112, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type a_cpu,@object # @a_cpu
.bss
.globl a_cpu
.p2align 3, 0x0
a_cpu:
.quad 0
.size a_cpu, 8
.type b_cpu,@object # @b_cpu
.globl b_cpu
.p2align 3, 0x0
b_cpu:
.quad 0
.size b_cpu, 8
.type c_cpu,@object # @c_cpu
.globl c_cpu
.p2align 3, 0x0
c_cpu:
.quad 0
.size c_cpu, 8
.type a_gpu,@object # @a_gpu
.globl a_gpu
.p2align 3, 0x0
a_gpu:
.quad 0
.size a_gpu, 8
.type b_gpu,@object # @b_gpu
.globl b_gpu
.p2align 3, 0x0
b_gpu:
.quad 0
.size b_gpu, 8
.type c_gpu,@object # @c_gpu
.globl c_gpu
.p2align 3, 0x0
c_gpu:
.quad 0
.size c_gpu, 8
.type _Z3addPfS_S_i,@object # @_Z3addPfS_S_i
.section .rodata,"a",@progbits
.globl _Z3addPfS_S_i
.p2align 3, 0x0
_Z3addPfS_S_i:
.quad _Z18__device_stub__addPfS_S_i
.size _Z3addPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%.0f\n"
.size .L.str, 6
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPfS_S_i"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym a_gpu
.addrsig_sym b_gpu
.addrsig_sym c_gpu
.addrsig_sym _Z3addPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff007624 */
/* 0x000fca00078e00ff */
/*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fda0003f06270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ IADD3 R2, R0.reuse, -0x1, RZ ; /* 0xffffffff00027810 */
/* 0x040fe20007ffe0ff */
/*0050*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0060*/ LOP3.LUT R0, R0, 0x3, RZ, 0xc0, !PT ; /* 0x0000000300007812 */
/* 0x000fe200078ec0ff */
/*0070*/ ULDC.64 UR12, c[0x0][0x118] ; /* 0x00004600000c7ab9 */
/* 0x000fe20000000a00 */
/*0080*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fda0003f06070 */
/*0090*/ @!P0 BRA 0xb80 ; /* 0x00000ae000008947 */
/* 0x000fea0003800000 */
/*00a0*/ IADD3 R8, -R0, c[0x0][0x178], RZ ; /* 0x00005e0000087a10 */
/* 0x000fe20007ffe1ff */
/*00b0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*00c0*/ MOV R4, c[0x0][0x170] ; /* 0x00005c0000047a02 */
/* 0x000fe20000000f00 */
/*00d0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff057624 */
/* 0x000fe200078e00ff */
/*00e0*/ ISETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f04270 */
/*00f0*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff037624 */
/* 0x000fe200078e00ff */
/*0100*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */
/* 0x000fe20000000f00 */
/*0110*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff077624 */
/* 0x000fe200078e00ff */
/*0120*/ MOV R6, c[0x0][0x160] ; /* 0x0000580000067a02 */
/* 0x000fd20000000f00 */
/*0130*/ @!P0 BRA 0x990 ; /* 0x0000085000008947 */
/* 0x000fea0003800000 */
/*0140*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f24270 */
/*0150*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0160*/ @!P1 BRA 0x670 ; /* 0x0000050000009947 */
/* 0x000fea0003800000 */
/*0170*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0180*/ LDG.E R9, [R2.64] ; /* 0x0000000c02097981 */
/* 0x000ea8000c1e1900 */
/*0190*/ LDG.E R10, [R6.64] ; /* 0x0000000c060a7981 */
/* 0x000ea4000c1e1900 */
/*01a0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x004fca0000000000 */
/*01b0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001e8000c10190c */
/*01c0*/ LDG.E R10, [R2.64+0x4] ; /* 0x0000040c020a7981 */
/* 0x000ea8000c1e1900 */
/*01d0*/ LDG.E R11, [R6.64+0x4] ; /* 0x0000040c060b7981 */
/* 0x000ea4000c1e1900 */
/*01e0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x004fca0000000000 */
/*01f0*/ STG.E [R4.64+0x4], R11 ; /* 0x0000040b04007986 */
/* 0x0003e8000c10190c */
/*0200*/ LDG.E R10, [R2.64+0x8] ; /* 0x0000080c020a7981 */
/* 0x000ea8000c1e1900 */
/*0210*/ LDG.E R13, [R6.64+0x8] ; /* 0x0000080c060d7981 */
/* 0x000ea4000c1e1900 */
/*0220*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x004fca0000000000 */
/*0230*/ STG.E [R4.64+0x8], R13 ; /* 0x0000080d04007986 */
/* 0x0005e8000c10190c */
/*0240*/ LDG.E R10, [R2.64+0xc] ; /* 0x00000c0c020a7981 */
/* 0x000ee8000c1e1900 */
/*0250*/ LDG.E R15, [R6.64+0xc] ; /* 0x00000c0c060f7981 */
/* 0x000ee4000c1e1900 */
/*0260*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x008fca0000000000 */
/*0270*/ STG.E [R4.64+0xc], R15 ; /* 0x00000c0f04007986 */
/* 0x0007e8000c10190c */
/*0280*/ LDG.E R9, [R2.64+0x10] ; /* 0x0000100c02097981 */
/* 0x001f28000c1e1900 */
/*0290*/ LDG.E R10, [R6.64+0x10] ; /* 0x0000100c060a7981 */
/* 0x000f24000c1e1900 */
/*02a0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x010fca0000000000 */
/*02b0*/ STG.E [R4.64+0x10], R9 ; /* 0x0000100904007986 */
/* 0x0001e8000c10190c */
/*02c0*/ LDG.E R10, [R2.64+0x14] ; /* 0x0000140c020a7981 */
/* 0x000f28000c1e1900 */
/*02d0*/ LDG.E R11, [R6.64+0x14] ; /* 0x0000140c060b7981 */
/* 0x002f24000c1e1900 */
/*02e0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x010fca0000000000 */
/*02f0*/ STG.E [R4.64+0x14], R11 ; /* 0x0000140b04007986 */
/* 0x0003e8000c10190c */
/*0300*/ LDG.E R10, [R2.64+0x18] ; /* 0x0000180c020a7981 */
/* 0x000f28000c1e1900 */
/*0310*/ LDG.E R13, [R6.64+0x18] ; /* 0x0000180c060d7981 */
/* 0x004f24000c1e1900 */
/*0320*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x010fca0000000000 */
/*0330*/ STG.E [R4.64+0x18], R13 ; /* 0x0000180d04007986 */
/* 0x0005e8000c10190c */
/*0340*/ LDG.E R10, [R2.64+0x1c] ; /* 0x00001c0c020a7981 */
/* 0x000f28000c1e1900 */
/*0350*/ LDG.E R15, [R6.64+0x1c] ; /* 0x00001c0c060f7981 */
/* 0x008f24000c1e1900 */
/*0360*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x010fca0000000000 */
/*0370*/ STG.E [R4.64+0x1c], R15 ; /* 0x00001c0f04007986 */
/* 0x0007e8000c10190c */
/*0380*/ LDG.E R9, [R2.64+0x20] ; /* 0x0000200c02097981 */
/* 0x001f28000c1e1900 */
/*0390*/ LDG.E R10, [R6.64+0x20] ; /* 0x0000200c060a7981 */
/* 0x000f24000c1e1900 */
/*03a0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x010fca0000000000 */
/*03b0*/ STG.E [R4.64+0x20], R9 ; /* 0x0000200904007986 */
/* 0x0001e8000c10190c */
/*03c0*/ LDG.E R10, [R2.64+0x24] ; /* 0x0000240c020a7981 */
/* 0x000f28000c1e1900 */
/*03d0*/ LDG.E R11, [R6.64+0x24] ; /* 0x0000240c060b7981 */
/* 0x002f24000c1e1900 */
/*03e0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x010fca0000000000 */
/*03f0*/ STG.E [R4.64+0x24], R11 ; /* 0x0000240b04007986 */
/* 0x0003e8000c10190c */
/*0400*/ LDG.E R10, [R2.64+0x28] ; /* 0x0000280c020a7981 */
/* 0x000f28000c1e1900 */
/*0410*/ LDG.E R13, [R6.64+0x28] ; /* 0x0000280c060d7981 */
/* 0x004f24000c1e1900 */
/*0420*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x010fca0000000000 */
/*0430*/ STG.E [R4.64+0x28], R13 ; /* 0x0000280d04007986 */
/* 0x0005e8000c10190c */
/*0440*/ LDG.E R10, [R2.64+0x2c] ; /* 0x00002c0c020a7981 */
/* 0x000f28000c1e1900 */
/*0450*/ LDG.E R15, [R6.64+0x2c] ; /* 0x00002c0c060f7981 */
/* 0x008f24000c1e1900 */
/*0460*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x010fca0000000000 */
/*0470*/ STG.E [R4.64+0x2c], R15 ; /* 0x00002c0f04007986 */
/* 0x0007e8000c10190c */
/*0480*/ LDG.E R9, [R2.64+0x30] ; /* 0x0000300c02097981 */
/* 0x001f28000c1e1900 */
/*0490*/ LDG.E R10, [R6.64+0x30] ; /* 0x0000300c060a7981 */
/* 0x000f24000c1e1900 */
/*04a0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x010fca0000000000 */
/*04b0*/ STG.E [R4.64+0x30], R9 ; /* 0x0000300904007986 */
/* 0x000fe8000c10190c */
/*04c0*/ LDG.E R10, [R2.64+0x34] ; /* 0x0000340c020a7981 */
/* 0x000f28000c1e1900 */
/*04d0*/ LDG.E R11, [R6.64+0x34] ; /* 0x0000340c060b7981 */
/* 0x002f24000c1e1900 */
/*04e0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x010fca0000000000 */
/*04f0*/ STG.E [R4.64+0x34], R11 ; /* 0x0000340b04007986 */
/* 0x0001e8000c10190c */
/*0500*/ LDG.E R10, [R2.64+0x38] ; /* 0x0000380c020a7981 */
/* 0x000f28000c1e1900 */
/*0510*/ LDG.E R13, [R6.64+0x38] ; /* 0x0000380c060d7981 */
/* 0x004f22000c1e1900 */
/*0520*/ IADD3 R12, P1, R2, 0x40, RZ ; /* 0x00000040020c7810 */
/* 0x000fe40007f3e0ff */
/*0530*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */
/* 0x000fe20007ffe0ff */
/*0540*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x010fca0000000000 */
/*0550*/ STG.E [R4.64+0x38], R13 ; /* 0x0000380d04007986 */
/* 0x000fe8000c10190c */
/*0560*/ LDG.E R10, [R2.64+0x3c] ; /* 0x00003c0c020a7981 */
/* 0x0002a8000c1e1900 */
/*0570*/ LDG.E R15, [R6.64+0x3c] ; /* 0x00003c0c060f7981 */
/* 0x0086a2000c1e1900 */
/*0580*/ IADD3.X R11, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff0b7210 */
/* 0x001fe20000ffe4ff */
/*0590*/ UIADD3 UR4, UR4, 0x10, URZ ; /* 0x0000001004047890 */
/* 0x000fe2000fffe03f */
/*05a0*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fc40003f24270 */
/*05b0*/ IADD3 R9, P3, R4, 0x40, RZ ; /* 0x0000004004097810 */
/* 0x000fe20007f7e0ff */
/*05c0*/ IMAD.MOV.U32 R2, RZ, RZ, R12 ; /* 0x000000ffff027224 */
/* 0x002fe200078e000c */
/*05d0*/ IADD3 R14, P2, R6, 0x40, RZ ; /* 0x00000040060e7810 */
/* 0x000fe40007f5e0ff */
/*05e0*/ MOV R3, R11 ; /* 0x0000000b00037202 */
/* 0x000fc60000000f00 */
/*05f0*/ IMAD.X R7, RZ, RZ, R7, P2 ; /* 0x000000ffff077224 */
/* 0x008fe400010e0607 */
/*0600*/ IMAD.MOV.U32 R6, RZ, RZ, R14 ; /* 0x000000ffff067224 */
/* 0x000fe400078e000e */
/*0610*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x004fe20000000000 */
/*0620*/ IADD3.X R10, RZ, R5, RZ, P3, !PT ; /* 0x00000005ff0a7210 */
/* 0x000fc80001ffe4ff */
/*0630*/ STG.E [R4.64+0x3c], R15 ; /* 0x00003c0f04007986 */
/* 0x0001e4000c10190c */
/*0640*/ MOV R4, R9 ; /* 0x0000000900047202 */
/* 0x001fe20000000f00 */
/*0650*/ IMAD.MOV.U32 R5, RZ, RZ, R10 ; /* 0x000000ffff057224 */
/* 0x000fe200078e000a */
/*0660*/ @P1 BRA 0x180 ; /* 0xfffffb1000001947 */
/* 0x000fea000383ffff */
/*0670*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */
/* 0x000fda0003f24270 */
/*0680*/ @!P1 BRA 0x970 ; /* 0x000002e000009947 */
/* 0x000fea0003800000 */
/*0690*/ LDG.E R9, [R2.64] ; /* 0x0000000c02097981 */
/* 0x000ea8000c1e1900 */
/*06a0*/ LDG.E R10, [R6.64] ; /* 0x0000000c060a7981 */
/* 0x000ea4000c1e1900 */
/*06b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x004fca0000000000 */
/*06c0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001e8000c10190c */
/*06d0*/ LDG.E R10, [R2.64+0x4] ; /* 0x0000040c020a7981 */
/* 0x000ea8000c1e1900 */
/*06e0*/ LDG.E R11, [R6.64+0x4] ; /* 0x0000040c060b7981 */
/* 0x000ea4000c1e1900 */
/*06f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x004fca0000000000 */
/*0700*/ STG.E [R4.64+0x4], R11 ; /* 0x0000040b04007986 */
/* 0x0003e8000c10190c */
/*0710*/ LDG.E R10, [R2.64+0x8] ; /* 0x0000080c020a7981 */
/* 0x000ea8000c1e1900 */
/*0720*/ LDG.E R13, [R6.64+0x8] ; /* 0x0000080c060d7981 */
/* 0x000ea4000c1e1900 */
/*0730*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x004fca0000000000 */
/*0740*/ STG.E [R4.64+0x8], R13 ; /* 0x0000080d04007986 */
/* 0x0005e8000c10190c */
/*0750*/ LDG.E R10, [R2.64+0xc] ; /* 0x00000c0c020a7981 */
/* 0x000ee8000c1e1900 */
/*0760*/ LDG.E R15, [R6.64+0xc] ; /* 0x00000c0c060f7981 */
/* 0x000ee4000c1e1900 */
/*0770*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x008fca0000000000 */
/*0780*/ STG.E [R4.64+0xc], R15 ; /* 0x00000c0f04007986 */
/* 0x0007e8000c10190c */
/*0790*/ LDG.E R9, [R2.64+0x10] ; /* 0x0000100c02097981 */
/* 0x001f28000c1e1900 */
/*07a0*/ LDG.E R10, [R6.64+0x10] ; /* 0x0000100c060a7981 */
/* 0x000f24000c1e1900 */
/*07b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x010fca0000000000 */
/*07c0*/ STG.E [R4.64+0x10], R9 ; /* 0x0000100904007986 */
/* 0x000fe8000c10190c */
/*07d0*/ LDG.E R10, [R2.64+0x14] ; /* 0x0000140c020a7981 */
/* 0x000f28000c1e1900 */
/*07e0*/ LDG.E R11, [R6.64+0x14] ; /* 0x0000140c060b7981 */
/* 0x002f24000c1e1900 */
/*07f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x010fca0000000000 */
/*0800*/ STG.E [R4.64+0x14], R11 ; /* 0x0000140b04007986 */
/* 0x0001e8000c10190c */
/*0810*/ LDG.E R10, [R2.64+0x18] ; /* 0x0000180c020a7981 */
/* 0x000f28000c1e1900 */
/*0820*/ LDG.E R13, [R6.64+0x18] ; /* 0x0000180c060d7981 */
/* 0x004f24000c1e1900 */
/*0830*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x010fca0000000000 */
/*0840*/ STG.E [R4.64+0x18], R13 ; /* 0x0000180d04007986 */
/* 0x000fe8000c10190c */
/*0850*/ LDG.E R10, [R2.64+0x1c] ; /* 0x00001c0c020a7981 */
/* 0x0002a8000c1e1900 */
/*0860*/ LDG.E R15, [R6.64+0x1c] ; /* 0x00001c0c060f7981 */
/* 0x0086a2000c1e1900 */
/*0870*/ IADD3 R11, P2, R2, 0x20, RZ ; /* 0x00000020020b7810 */
/* 0x001fe20007f5e0ff */
/*0880*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */
/* 0x000fe2000fffe03f */
/*0890*/ IADD3 R9, P3, R4, 0x20, RZ ; /* 0x0000002004097810 */
/* 0x000fc40007f7e0ff */
/*08a0*/ IADD3 R14, P1, R6, 0x20, RZ ; /* 0x00000020060e7810 */
/* 0x000fe20007f3e0ff */
/*08b0*/ IMAD.X R12, RZ, RZ, R3, P2 ; /* 0x000000ffff0c7224 */
/* 0x000fe200010e0603 */
/*08c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0e170 */
/*08d0*/ IMAD.MOV.U32 R2, RZ, RZ, R11 ; /* 0x000000ffff027224 */
/* 0x002fe200078e000b */
/*08e0*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */
/* 0x000fe20007ffe0ff */
/*08f0*/ IMAD.MOV.U32 R6, RZ, RZ, R14 ; /* 0x000000ffff067224 */
/* 0x008fe200078e000e */
/*0900*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe40000ffe4ff */
/*0910*/ MOV R3, R12 ; /* 0x0000000c00037202 */
/* 0x000fe20000000f00 */
/*0920*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x004fe20000000000 */
/*0930*/ IADD3.X R10, RZ, R5, RZ, P3, !PT ; /* 0x00000005ff0a7210 */
/* 0x000fc80001ffe4ff */
/*0940*/ STG.E [R4.64+0x1c], R15 ; /* 0x00001c0f04007986 */
/* 0x0001e4000c10190c */
/*0950*/ MOV R4, R9 ; /* 0x0000000900047202 */
/* 0x001fe20000000f00 */
/*0960*/ IMAD.MOV.U32 R5, RZ, RZ, R10 ; /* 0x000000ffff057224 */
/* 0x000fe400078e000a */
/*0970*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */
/* 0x000fda0000705670 */
/*0980*/ @!P0 BRA 0xb80 ; /* 0x000001f000008947 */
/* 0x000fea0003800000 */
/*0990*/ LDG.E R9, [R2.64] ; /* 0x0000000c02097981 */
/* 0x000ea8000c1e1900 */
/*09a0*/ LDG.E R10, [R6.64] ; /* 0x0000000c060a7981 */
/* 0x000ea4000c1e1900 */
/*09b0*/ FADD R9, R9, R10 ; /* 0x0000000a09097221 */
/* 0x004fca0000000000 */
/*09c0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x000fe8000c10190c */
/*09d0*/ LDG.E R10, [R2.64+0x4] ; /* 0x0000040c020a7981 */
/* 0x000ea8000c1e1900 */
/*09e0*/ LDG.E R11, [R6.64+0x4] ; /* 0x0000040c060b7981 */
/* 0x000ea4000c1e1900 */
/*09f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x004fca0000000000 */
/*0a00*/ STG.E [R4.64+0x4], R11 ; /* 0x0000040b04007986 */
/* 0x0001e8000c10190c */
/*0a10*/ LDG.E R10, [R2.64+0x8] ; /* 0x0000080c020a7981 */
/* 0x000ea8000c1e1900 */
/*0a20*/ LDG.E R13, [R6.64+0x8] ; /* 0x0000080c060d7981 */
/* 0x000ea2000c1e1900 */
/*0a30*/ IADD3 R12, P0, R2, 0x10, RZ ; /* 0x00000010020c7810 */
/* 0x000fe40007f1e0ff */
/*0a40*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */
/* 0x000fe20007ffe0ff */
/*0a50*/ FADD R13, R10, R13 ; /* 0x0000000d0a0d7221 */
/* 0x004fca0000000000 */
/*0a60*/ STG.E [R4.64+0x8], R13 ; /* 0x0000080d04007986 */
/* 0x000fe8000c10190c */
/*0a70*/ LDG.E R10, [R2.64+0xc] ; /* 0x00000c0c020a7981 */
/* 0x0002a8000c1e1900 */
/*0a80*/ LDG.E R15, [R6.64+0xc] ; /* 0x00000c0c060f7981 */
/* 0x0006a2000c1e1900 */
/*0a90*/ IADD3.X R11, RZ, R3, RZ, P0, !PT ; /* 0x00000003ff0b7210 */
/* 0x001fe200007fe4ff */
/*0aa0*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe2000fffe03f */
/*0ab0*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fc40003f05270 */
/*0ac0*/ IADD3 R9, P2, R4, 0x10, RZ ; /* 0x0000001004097810 */
/* 0x000fe20007f5e0ff */
/*0ad0*/ IMAD.MOV.U32 R2, RZ, RZ, R12 ; /* 0x000000ffff027224 */
/* 0x002fe200078e000c */
/*0ae0*/ IADD3 R14, P1, R6, 0x10, RZ ; /* 0x00000010060e7810 */
/* 0x000fe40007f3e0ff */
/*0af0*/ MOV R3, R11 ; /* 0x0000000b00037202 */
/* 0x000fc60000000f00 */
/*0b00*/ IMAD.X R7, RZ, RZ, R7, P1 ; /* 0x000000ffff077224 */
/* 0x008fe400008e0607 */
/*0b10*/ IMAD.MOV.U32 R6, RZ, RZ, R14 ; /* 0x000000ffff067224 */
/* 0x000fe400078e000e */
/*0b20*/ FADD R15, R10, R15 ; /* 0x0000000f0a0f7221 */
/* 0x004fe20000000000 */
/*0b30*/ IADD3.X R10, RZ, R5, RZ, P2, !PT ; /* 0x00000005ff0a7210 */
/* 0x000fc800017fe4ff */
/*0b40*/ STG.E [R4.64+0xc], R15 ; /* 0x00000c0f04007986 */
/* 0x0001e4000c10190c */
/*0b50*/ MOV R4, R9 ; /* 0x0000000900047202 */
/* 0x001fe20000000f00 */
/*0b60*/ IMAD.MOV.U32 R5, RZ, RZ, R10 ; /* 0x000000ffff057224 */
/* 0x000fe200078e000a */
/*0b70*/ @P0 BRA 0x990 ; /* 0xfffffe1000000947 */
/* 0x000fea000383ffff */
/*0b80*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*0b90*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0ba0*/ UMOV UR5, 0x4 ; /* 0x0000000400057882 */
/* 0x000fe40000000000 */
/*0bb0*/ ULDC.64 UR6, c[0x0][0x170] ; /* 0x00005c0000067ab9 */
/* 0x000fe40000000a00 */
/*0bc0*/ ULDC.64 UR8, c[0x0][0x168] ; /* 0x00005a0000087ab9 */
/* 0x000fe40000000a00 */
/*0bd0*/ ULDC.64 UR10, c[0x0][0x160] ; /* 0x00005800000a7ab9 */
/* 0x000fe40000000a00 */
/*0be0*/ UIMAD.WIDE UR6, UR4, UR5, UR6 ; /* 0x00000005040672a5 */
/* 0x000fe4000f8e0206 */
/*0bf0*/ UIMAD.WIDE UR8, UR4, UR5, UR8 ; /* 0x00000005040872a5 */
/* 0x000fc4000f8e0208 */
/*0c00*/ UIMAD.WIDE UR4, UR4, UR5, UR10 ; /* 0x00000005040472a5 */
/* 0x000fca000f8e020a */
/*0c10*/ MOV R2, UR8 ; /* 0x0000000800027c02 */
/* 0x000fe20008000f00 */
/*0c20*/ IMAD.U32 R5, RZ, RZ, UR5 ; /* 0x00000005ff057e24 */
/* 0x000fe2000f8e00ff */
/*0c30*/ MOV R4, UR4 ; /* 0x0000000400047c02 */
/* 0x000fe20008000f00 */
/*0c40*/ IMAD.U32 R3, RZ, RZ, UR9 ; /* 0x00000009ff037e24 */
/* 0x000fc8000f8e00ff */
/*0c50*/ LDG.E R5, [R4.64] ; /* 0x0000000c04057981 */
/* 0x000ea8000c1e1900 */
/*0c60*/ LDG.E R2, [R2.64] ; /* 0x0000000c02027981 */
/* 0x000ea2000c1e1900 */
/*0c70*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */
/* 0x000fc80007ffe0ff */
/*0c80*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f05270 */
/*0c90*/ UIADD3 UR8, UP1, UR8, 0x4, URZ ; /* 0x0000000408087890 */
/* 0x000fe2000ff3e03f */
/*0ca0*/ MOV R6, UR6 ; /* 0x0000000600067c02 */
/* 0x001fe20008000f00 */
/*0cb0*/ UIADD3 UR6, UP0, UR6, 0x4, URZ ; /* 0x0000000406067890 */
/* 0x000fe2000ff1e03f */
/*0cc0*/ MOV R7, UR7 ; /* 0x0000000700077c02 */
/* 0x000fe20008000f00 */
/*0cd0*/ UIADD3 UR4, UP2, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe4000ff5e03f */
/*0ce0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe400087fe43f */
/*0cf0*/ UIADD3.X UR9, URZ, UR9, URZ, UP1, !UPT ; /* 0x000000093f097290 */
/* 0x000fe40008ffe43f */
/*0d00*/ UIADD3.X UR5, URZ, UR5, URZ, UP2, !UPT ; /* 0x000000053f057290 */
/* 0x000fe200097fe43f */
/*0d10*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*0d20*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e2000c10190c */
/*0d30*/ @P0 BRA 0xc10 ; /* 0xfffffed000000947 */
/* 0x000fea000383ffff */
/*0d40*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0d50*/ BRA 0xd50; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0d60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0da0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0db0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0dc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0dd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0de0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0df0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPfS_S_i
.globl _Z3addPfS_S_i
.p2align 8
.type _Z3addPfS_S_i,@function
_Z3addPfS_S_i:
s_load_b32 s2, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_3
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_mov_b32_e32 v0, 0
.LBB0_2:
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_add_i32 s2, s2, -1
s_add_u32 s4, s4, 4
s_addc_u32 s5, s5, 0
s_add_u32 s6, s6, 4
s_addc_u32 s7, s7, 0
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_add_u32 s0, s0, 4
s_addc_u32 s1, s1, 0
s_cmp_eq_u32 s2, 0
s_cbranch_scc0 .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPfS_S_i, .Lfunc_end0-_Z3addPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z3addPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0018e52c_00000000-6_othermain.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z3addPfS_S_iPfS_S_i
.type _Z27__device_stub__Z3addPfS_S_iPfS_S_i, @function
_Z27__device_stub__Z3addPfS_S_iPfS_S_i:
.LFB3694:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z27__device_stub__Z3addPfS_S_iPfS_S_i, .-_Z27__device_stub__Z3addPfS_S_iPfS_S_i
.globl _Z3addPfS_S_i
.type _Z3addPfS_S_i, @function
_Z3addPfS_S_i:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3addPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3addPfS_S_i, .-_Z3addPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%.0f\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $40, %rsp
.cfi_def_cfa_offset 64
movl $40000, %edi
call malloc@PLT
movq %rax, a_cpu(%rip)
movl $40000, %edi
call malloc@PLT
movq %rax, b_cpu(%rip)
movl $40000, %edi
call malloc@PLT
movq %rax, c_cpu(%rip)
movl $40000, %esi
leaq a_gpu(%rip), %rdi
call cudaMalloc@PLT
movl $40000, %esi
leaq b_gpu(%rip), %rdi
call cudaMalloc@PLT
movl $40000, %esi
leaq c_gpu(%rip), %rdi
call cudaMalloc@PLT
movl $4, %eax
movl $1, %edx
.L12:
movl %edx, %ecx
negl %ecx
pxor %xmm0, %xmm0
cvtsi2ssl %ecx, %xmm0
movq a_cpu(%rip), %rcx
movss %xmm0, (%rcx,%rax)
leal (%rdx,%rdx), %ecx
pxor %xmm0, %xmm0
cvtsi2ssl %ecx, %xmm0
movq b_cpu(%rip), %rcx
movss %xmm0, (%rcx,%rax)
movq a_cpu(%rip), %rcx
movss (%rcx,%rax), %xmm0
movq b_cpu(%rip), %rcx
addss (%rcx,%rax), %xmm0
movq c_cpu(%rip), %rcx
movss %xmm0, (%rcx,%rax)
addl $1, %edx
addq $4, %rax
cmpl $10001, %edx
jne .L12
movl $1, %ecx
movl $40000, %edx
movq a_cpu(%rip), %rsi
movq a_gpu(%rip), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $40000, %edx
movq b_cpu(%rip), %rsi
movq b_gpu(%rip), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $40000, %edx
movq c_cpu(%rip), %rsi
movq c_gpu(%rip), %rdi
call cudaMemcpy@PLT
movl $10, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L13:
movl $2, %ecx
movl $40000, %edx
movq c_gpu(%rip), %rsi
movq c_cpu(%rip), %rdi
call cudaMemcpy@PLT
movl $4, %ebx
leaq .LC0(%rip), %rbp
.L14:
movq c_cpu(%rip), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq $40004, %rbx
jne .L14
movq a_gpu(%rip), %rdi
call cudaFree@PLT
movq b_gpu(%rip), %rdi
call cudaFree@PLT
movq c_gpu(%rip), %rdi
call cudaFree@PLT
movq a_cpu(%rip), %rdi
call free@PLT
movq b_cpu(%rip), %rdi
call free@PLT
movq c_cpu(%rip), %rdi
call free@PLT
movq stdin(%rip), %rdi
call getc@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
movl $10000, %ecx
movq c_gpu(%rip), %rdx
movq b_gpu(%rip), %rsi
movq a_gpu(%rip), %rdi
call _Z27__device_stub__Z3addPfS_S_iPfS_S_i
jmp .L13
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z3addPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl c_gpu
.bss
.align 8
.type c_gpu, @object
.size c_gpu, 8
c_gpu:
.zero 8
.globl b_gpu
.align 8
.type b_gpu, @object
.size b_gpu, 8
b_gpu:
.zero 8
.globl a_gpu
.align 8
.type a_gpu, @object
.size a_gpu, 8
a_gpu:
.zero 8
.globl c_cpu
.align 8
.type c_cpu, @object
.size c_cpu, 8
c_cpu:
.zero 8
.globl b_cpu
.align 8
.type b_cpu, @object
.size b_cpu, 8
b_cpu:
.zero 8
.globl a_cpu
.align 8
.type a_cpu, @object
.size a_cpu, 8
a_cpu:
.zero 8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "othermain.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addPfS_S_i # -- Begin function _Z18__device_stub__addPfS_S_i
.p2align 4, 0x90
.type _Z18__device_stub__addPfS_S_i,@function
_Z18__device_stub__addPfS_S_i: # @_Z18__device_stub__addPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z18__device_stub__addPfS_S_i, .Lfunc_end0-_Z18__device_stub__addPfS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $112, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -16
movl $40000, %edi # imm = 0x9C40
callq malloc
movq %rax, a_cpu(%rip)
movl $40000, %edi # imm = 0x9C40
callq malloc
movq %rax, b_cpu(%rip)
movl $40000, %edi # imm = 0x9C40
callq malloc
movq %rax, c_cpu(%rip)
movl $a_gpu, %edi
movl $40000, %esi # imm = 0x9C40
callq hipMalloc
movl $b_gpu, %edi
movl $40000, %esi # imm = 0x9C40
callq hipMalloc
movl $c_gpu, %edi
movl $40000, %esi # imm = 0x9C40
callq hipMalloc
movq a_cpu(%rip), %rax
movq b_cpu(%rip), %rcx
movl $-1, %edx
xorl %edi, %edi
movq c_cpu(%rip), %rsi
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %edx, %xmm0
leaq 2(%rdi), %r8
xorps %xmm1, %xmm1
cvtsi2ss %r8d, %xmm1
movss %xmm0, 4(%rax,%rdi,2)
movss %xmm1, 4(%rcx,%rdi,2)
addss 4(%rax,%rdi,2), %xmm1
movss %xmm1, 4(%rsi,%rdi,2)
decl %edx
movq %r8, %rdi
cmpq $20000, %r8 # imm = 0x4E20
jne .LBB1_1
# %bb.2:
movq a_gpu(%rip), %rdi
movq a_cpu(%rip), %rsi
movl $40000, %edx # imm = 0x9C40
movl $1, %ecx
callq hipMemcpy
movq b_gpu(%rip), %rdi
movq b_cpu(%rip), %rsi
movl $40000, %edx # imm = 0x9C40
movl $1, %ecx
callq hipMemcpy
movq c_gpu(%rip), %rdi
movq c_cpu(%rip), %rsi
movl $40000, %edx # imm = 0x9C40
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 9(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq a_gpu(%rip), %rax
movq b_gpu(%rip), %rcx
movq c_gpu(%rip), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 56(%rsp)
movl $10000, 4(%rsp) # imm = 0x2710
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq c_cpu(%rip), %rdi
movq c_gpu(%rip), %rsi
movl $40000, %edx # imm = 0x9C40
movl $2, %ecx
callq hipMemcpy
movl $1, %ebx
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movq c_cpu(%rip), %rax
movss (%rax,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %rbx
cmpq $10001, %rbx # imm = 0x2711
jne .LBB1_5
# %bb.6:
movq a_gpu(%rip), %rdi
callq hipFree
movq b_gpu(%rip), %rdi
callq hipFree
movq c_gpu(%rip), %rdi
callq hipFree
movq a_cpu(%rip), %rdi
callq free
movq b_cpu(%rip), %rdi
callq free
movq c_cpu(%rip), %rdi
callq free
movq stdin(%rip), %rdi
callq getc
xorl %eax, %eax
addq $112, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type a_cpu,@object # @a_cpu
.bss
.globl a_cpu
.p2align 3, 0x0
a_cpu:
.quad 0
.size a_cpu, 8
.type b_cpu,@object # @b_cpu
.globl b_cpu
.p2align 3, 0x0
b_cpu:
.quad 0
.size b_cpu, 8
.type c_cpu,@object # @c_cpu
.globl c_cpu
.p2align 3, 0x0
c_cpu:
.quad 0
.size c_cpu, 8
.type a_gpu,@object # @a_gpu
.globl a_gpu
.p2align 3, 0x0
a_gpu:
.quad 0
.size a_gpu, 8
.type b_gpu,@object # @b_gpu
.globl b_gpu
.p2align 3, 0x0
b_gpu:
.quad 0
.size b_gpu, 8
.type c_gpu,@object # @c_gpu
.globl c_gpu
.p2align 3, 0x0
c_gpu:
.quad 0
.size c_gpu, 8
.type _Z3addPfS_S_i,@object # @_Z3addPfS_S_i
.section .rodata,"a",@progbits
.globl _Z3addPfS_S_i
.p2align 3, 0x0
_Z3addPfS_S_i:
.quad _Z18__device_stub__addPfS_S_i
.size _Z3addPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%.0f\n"
.size .L.str, 6
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPfS_S_i"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym a_gpu
.addrsig_sym b_gpu
.addrsig_sym c_gpu
.addrsig_sym _Z3addPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /**********************************************************************************************
Source Code : sharedMemoryReadingSameWord.cu
Objective : Example code to demonstrate the different access patterns of float3 array
in the global memory the corresponding advantages in terms of the bandwidth
that is achievable
Descritpion : Example code to demonstrate that while reading the same word by all the
threads, there will not be any serialization since all threads are accessing
from the same bank the 32-bit word gets broadcasted to all the threads --
hence bandwidth can be comparable to the value got when there were no
bank conflicts
output: The different bandwidths of the shared memory that are achieved while
accessing the same word and the access pattern without any bank conflicts
Modified : Aug 2011
Author : RarchK
****************************************************************************************/
#include <stdio.h>
#include <cuda.h>
#include <float.h>
#define BLOCK_SIZE 128 // 128 threads per block
#define TRANSFERED_DATA_SIZE 2000000 //2 MB
#define NO_OF_PATTERNS 2
#define NTIMES 10
#define MIN(x,y) ((x)<(y)?(x):(y))
#define MAX(x,y) ((x)>(y)?(x):(y))
void printResults(void);
////////////////////////////////////////////////////////////////////////////////////////////////
//
// access pattern involving accessing the elements with a stride of one 32-bit word
// the access pattern causes no bank conflicts
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void sharedMemAccessWithStride1(void)
{
__shared__ __attribute__((unused)) float array[BLOCK_SIZE];
int idx = threadIdx.x;
float scalar = 5.0f;
array[idx] = scalar;
}
////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
//
// access pattern involving reading the same word by all the threads
// the access pattern is conflict-free
// since the word gets broadcasted to all the threads
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void sharedMemReadSameWord(void)
{
__shared__ float array[BLOCK_SIZE];
int idx = 3;
float __attribute__((unused)) scalar ;
scalar = array[idx];
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static double avgtime[NO_OF_PATTERNS] = {0}, maxtime[NO_OF_PATTERNS] = {0}, mintime[NO_OF_PATTERNS];
static float bandWidths[NO_OF_PATTERNS] = {0};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// the main routene
// for timing the two kernals
// finding the corresponding band widths
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char* argv[])
{
float elapsedTimes[NO_OF_PATTERNS][NTIMES];
cudaEvent_t start,stop;
double bytes = sizeof(float) * TRANSFERED_DATA_SIZE;
// event creation, which will be used for timing the code
cudaEventCreate(&start);
cudaEventCreate(&stop);
//finding the 1D grid size
int gridSize = TRANSFERED_DATA_SIZE/BLOCK_SIZE;
if( TRANSFERED_DATA_SIZE % BLOCK_SIZE != 0 )
gridSize += 1;
// running each pattern NTIMES
for(int k=0; k < NTIMES; k++)
{
// timing the kernels corresponding to different access patterns
// PATTERN 1
cudaEventRecord(start,0);
sharedMemAccessWithStride1 <<< gridSize,BLOCK_SIZE >>> ();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[0][k],start,stop);
// PATTERN 2
cudaEventRecord(start,0);
sharedMemReadSameWord <<< gridSize,BLOCK_SIZE >>> ();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[1][k],start,stop);
} // end of the for loop involving NTIMES
// intializing the mintime array
for(int i=0; i < NO_OF_PATTERNS;i++)
mintime[i] = FLT_MAX;
for (int k=1; k < NTIMES; k++) // skiping the first iteration
{
for (int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i] + elapsedTimes[i][k];
mintime[i] = MIN(mintime[i],elapsedTimes[i][k]);
maxtime[i] = MAX(maxtime[i], elapsedTimes[i][k]);
}
}
// calculation of the different band widths that are achieved by different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i]/(double)(NTIMES-1); // finding the average time
bandWidths[i] = bytes/mintime[i];
}
printResults();
printf("\n\n**** successful termination of the program ****\n\n");
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// prints the results containig the minimum, maximum, average times taken by the two kernels
// the associated maximum bandwidth achieved by the different kernels
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void printResults()
{
printf("Demonstrating that reading same 32-bit word by all threads will not cause bank conflicts\n");
printf("The transfered data size (Bytes): %ld\n",TRANSFERED_DATA_SIZE*sizeof(float));
printf("\n-------------------------------------------------------------------------------------------------------------------------------\n");
printf("Pattern \t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n");
printf("-------------------------------------------------------------------------------------------------------------------------------\n");
// printing the results for different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
switch(i)
{
case 0: printf("Stride of one 32-bit word ");
break;
case 1: printf("Reading the same 32-bit word");
break;
}
printf("\t %.6f \t\t %f \t\t %f \t\t %f\n",bandWidths[i]/1000000,avgtime[i],mintime[i],maxtime[i]);
}
printf("\n ------------------------------------------------------------------------------------------------------------------------------\n");
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// | code for sm_80
Function : _Z21sharedMemReadSameWordv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z26sharedMemAccessWithStride1v
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /**********************************************************************************************
Source Code : sharedMemoryReadingSameWord.cu
Objective : Example code to demonstrate the different access patterns of float3 array
in the global memory the corresponding advantages in terms of the bandwidth
that is achievable
Descritpion : Example code to demonstrate that while reading the same word by all the
threads, there will not be any serialization since all threads are accessing
from the same bank the 32-bit word gets broadcasted to all the threads --
hence bandwidth can be comparable to the value got when there were no
bank conflicts
output: The different bandwidths of the shared memory that are achieved while
accessing the same word and the access pattern without any bank conflicts
Modified : Aug 2011
Author : RarchK
****************************************************************************************/
#include <stdio.h>
#include <cuda.h>
#include <float.h>
#define BLOCK_SIZE 128 // 128 threads per block
#define TRANSFERED_DATA_SIZE 2000000 //2 MB
#define NO_OF_PATTERNS 2
#define NTIMES 10
#define MIN(x,y) ((x)<(y)?(x):(y))
#define MAX(x,y) ((x)>(y)?(x):(y))
void printResults(void);
////////////////////////////////////////////////////////////////////////////////////////////////
//
// access pattern involving accessing the elements with a stride of one 32-bit word
// the access pattern causes no bank conflicts
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void sharedMemAccessWithStride1(void)
{
__shared__ __attribute__((unused)) float array[BLOCK_SIZE];
int idx = threadIdx.x;
float scalar = 5.0f;
array[idx] = scalar;
}
////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
//
// access pattern involving reading the same word by all the threads
// the access pattern is conflict-free
// since the word gets broadcasted to all the threads
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void sharedMemReadSameWord(void)
{
__shared__ float array[BLOCK_SIZE];
int idx = 3;
float __attribute__((unused)) scalar ;
scalar = array[idx];
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static double avgtime[NO_OF_PATTERNS] = {0}, maxtime[NO_OF_PATTERNS] = {0}, mintime[NO_OF_PATTERNS];
static float bandWidths[NO_OF_PATTERNS] = {0};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// the main routene
// for timing the two kernals
// finding the corresponding band widths
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char* argv[])
{
float elapsedTimes[NO_OF_PATTERNS][NTIMES];
cudaEvent_t start,stop;
double bytes = sizeof(float) * TRANSFERED_DATA_SIZE;
// event creation, which will be used for timing the code
cudaEventCreate(&start);
cudaEventCreate(&stop);
//finding the 1D grid size
int gridSize = TRANSFERED_DATA_SIZE/BLOCK_SIZE;
if( TRANSFERED_DATA_SIZE % BLOCK_SIZE != 0 )
gridSize += 1;
// running each pattern NTIMES
for(int k=0; k < NTIMES; k++)
{
// timing the kernels corresponding to different access patterns
// PATTERN 1
cudaEventRecord(start,0);
sharedMemAccessWithStride1 <<< gridSize,BLOCK_SIZE >>> ();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[0][k],start,stop);
// PATTERN 2
cudaEventRecord(start,0);
sharedMemReadSameWord <<< gridSize,BLOCK_SIZE >>> ();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[1][k],start,stop);
} // end of the for loop involving NTIMES
// intializing the mintime array
for(int i=0; i < NO_OF_PATTERNS;i++)
mintime[i] = FLT_MAX;
for (int k=1; k < NTIMES; k++) // skiping the first iteration
{
for (int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i] + elapsedTimes[i][k];
mintime[i] = MIN(mintime[i],elapsedTimes[i][k]);
maxtime[i] = MAX(maxtime[i], elapsedTimes[i][k]);
}
}
// calculation of the different band widths that are achieved by different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i]/(double)(NTIMES-1); // finding the average time
bandWidths[i] = bytes/mintime[i];
}
printResults();
printf("\n\n**** successful termination of the program ****\n\n");
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// prints the results containig the minimum, maximum, average times taken by the two kernels
// the associated maximum bandwidth achieved by the different kernels
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void printResults()
{
printf("Demonstrating that reading same 32-bit word by all threads will not cause bank conflicts\n");
printf("The transfered data size (Bytes): %ld\n",TRANSFERED_DATA_SIZE*sizeof(float));
printf("\n-------------------------------------------------------------------------------------------------------------------------------\n");
printf("Pattern \t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n");
printf("-------------------------------------------------------------------------------------------------------------------------------\n");
// printing the results for different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
switch(i)
{
case 0: printf("Stride of one 32-bit word ");
break;
case 1: printf("Reading the same 32-bit word");
break;
}
printf("\t %.6f \t\t %f \t\t %f \t\t %f\n",bandWidths[i]/1000000,avgtime[i],mintime[i],maxtime[i]);
}
printf("\n ------------------------------------------------------------------------------------------------------------------------------\n");
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// | .file "tmpxft_001402d6_00000000-6_sharedMemoryReadingSameWord.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Demonstrating that reading same 32-bit word by all threads will not cause bank conflicts\n"
.align 8
.LC1:
.string "The transfered data size (Bytes): %ld\n"
.align 8
.LC2:
.string "\n-------------------------------------------------------------------------------------------------------------------------------\n"
.align 8
.LC3:
.string "Pattern \t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n"
.align 8
.LC4:
.string "-------------------------------------------------------------------------------------------------------------------------------\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "Stride of one 32-bit word "
.LC7:
.string "\t %.6f \t\t %f \t\t %f \t\t %f\n"
.LC8:
.string "Reading the same 32-bit word"
.section .rodata.str1.8
.align 8
.LC9:
.string "\n ------------------------------------------------------------------------------------------------------------------------------\n"
.text
.globl _Z12printResultsv
.type _Z12printResultsv, @function
_Z12printResultsv:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $8000000, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movss _ZL10bandWidths(%rip), %xmm0
divss .LC6(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movsd _ZL7maxtime(%rip), %xmm3
movsd _ZL7mintime(%rip), %xmm2
movsd _ZL7avgtime(%rip), %xmm1
leaq .LC7(%rip), %rbx
movq %rbx, %rsi
movl $2, %edi
movl $4, %eax
call __printf_chk@PLT
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movss 4+_ZL10bandWidths(%rip), %xmm0
divss .LC6(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movsd 8+_ZL7maxtime(%rip), %xmm3
movsd 8+_ZL7mintime(%rip), %xmm2
movsd 8+_ZL7avgtime(%rip), %xmm1
movq %rbx, %rsi
movl $2, %edi
movl $4, %eax
call __printf_chk@PLT
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z12printResultsv, .-_Z12printResultsv
.globl _Z45__device_stub__Z26sharedMemAccessWithStride1vv
.type _Z45__device_stub__Z26sharedMemAccessWithStride1vv, @function
_Z45__device_stub__Z26sharedMemAccessWithStride1vv:
.LFB2083:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z26sharedMemAccessWithStride1v(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z45__device_stub__Z26sharedMemAccessWithStride1vv, .-_Z45__device_stub__Z26sharedMemAccessWithStride1vv
.globl _Z26sharedMemAccessWithStride1v
.type _Z26sharedMemAccessWithStride1v, @function
_Z26sharedMemAccessWithStride1v:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z26sharedMemAccessWithStride1vv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z26sharedMemAccessWithStride1v, .-_Z26sharedMemAccessWithStride1v
.globl _Z40__device_stub__Z21sharedMemReadSameWordvv
.type _Z40__device_stub__Z21sharedMemReadSameWordvv, @function
_Z40__device_stub__Z21sharedMemReadSameWordvv:
.LFB2085:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z21sharedMemReadSameWordv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z40__device_stub__Z21sharedMemReadSameWordvv, .-_Z40__device_stub__Z21sharedMemReadSameWordvv
.globl _Z21sharedMemReadSameWordv
.type _Z21sharedMemReadSameWordv, @function
_Z21sharedMemReadSameWordv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z21sharedMemReadSameWordvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z21sharedMemReadSameWordv, .-_Z21sharedMemReadSameWordv
.section .rodata.str1.8
.align 8
.LC13:
.string "\n\n**** successful termination of the program ****\n\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $144, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
call cudaEventCreate@PLT
leaq 16(%rsp), %rdi
call cudaEventCreate@PLT
leaq 48(%rsp), %rbp
leaq 88(%rsp), %rbx
leaq 128(%rsp), %r12
jmp .L24
.L38:
call _Z45__device_stub__Z26sharedMemAccessWithStride1vv
jmp .L22
.L23:
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq 16(%rsp), %rdi
call cudaEventSynchronize@PLT
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call cudaEventElapsedTime@PLT
addq $4, %rbx
cmpq %rbx, %r12
je .L37
.L24:
movl $0, %esi
movq 8(%rsp), %rdi
call cudaEventRecord@PLT
movl $128, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $15625, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L22:
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq 16(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq -40(%rbx), %rdi
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $0, %esi
movq 8(%rsp), %rdi
call cudaEventRecord@PLT
movl $128, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $15625, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L23
call _Z40__device_stub__Z21sharedMemReadSameWordvv
jmp .L23
.L37:
leaq 52(%rsp), %rax
leaq 40(%rbp), %rdx
movsd _ZL7avgtime(%rip), %xmm2
movsd _ZL7maxtime(%rip), %xmm6
movsd 8+_ZL7avgtime(%rip), %xmm1
movsd 8+_ZL7maxtime(%rip), %xmm5
movsd .LC10(%rip), %xmm3
movapd %xmm3, %xmm4
.L25:
pxor %xmm0, %xmm0
cvtss2sd (%rax), %xmm0
addsd %xmm0, %xmm2
minsd %xmm0, %xmm3
maxsd %xmm0, %xmm6
pxor %xmm0, %xmm0
cvtss2sd 40(%rax), %xmm0
addsd %xmm0, %xmm1
minsd %xmm0, %xmm4
maxsd %xmm0, %xmm5
addq $4, %rax
cmpq %rax, %rdx
jne .L25
movsd %xmm3, _ZL7mintime(%rip)
movsd %xmm6, _ZL7maxtime(%rip)
movsd %xmm4, 8+_ZL7mintime(%rip)
movsd %xmm5, 8+_ZL7maxtime(%rip)
movsd .LC11(%rip), %xmm5
divsd %xmm5, %xmm2
movsd %xmm2, _ZL7avgtime(%rip)
movsd .LC12(%rip), %xmm0
movapd %xmm0, %xmm2
divsd %xmm3, %xmm2
cvtsd2ss %xmm2, %xmm2
movss %xmm2, _ZL10bandWidths(%rip)
divsd %xmm5, %xmm1
movsd %xmm1, 8+_ZL7avgtime(%rip)
divsd %xmm4, %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 4+_ZL10bandWidths(%rip)
call _Z12printResultsv
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaEventDestroy@PLT
movq 16(%rsp), %rdi
call cudaEventDestroy@PLT
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L39
movl $0, %eax
addq $144, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC14:
.string "_Z21sharedMemReadSameWordv"
.section .rodata.str1.8
.align 8
.LC15:
.string "_Z26sharedMemAccessWithStride1v"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z21sharedMemReadSameWordv(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z26sharedMemAccessWithStride1v(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL10bandWidths
.comm _ZL10bandWidths,8,8
.local _ZL7mintime
.comm _ZL7mintime,16,16
.local _ZL7maxtime
.comm _ZL7maxtime,16,16
.local _ZL7avgtime
.comm _ZL7avgtime,16,16
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC6:
.long 1232348160
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC10:
.long -536870912
.long 1206910975
.align 8
.LC11:
.long 0
.long 1075970048
.align 8
.LC12:
.long 0
.long 1096713344
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /**********************************************************************************************
Source Code : sharedMemoryReadingSameWord.cu
Objective : Example code to demonstrate the different access patterns of float3 array
in the global memory the corresponding advantages in terms of the bandwidth
that is achievable
Descritpion : Example code to demonstrate that while reading the same word by all the
threads, there will not be any serialization since all threads are accessing
from the same bank the 32-bit word gets broadcasted to all the threads --
hence bandwidth can be comparable to the value got when there were no
bank conflicts
output: The different bandwidths of the shared memory that are achieved while
accessing the same word and the access pattern without any bank conflicts
Modified : Aug 2011
Author : RarchK
****************************************************************************************/
#include <stdio.h>
#include <cuda.h>
#include <float.h>
#define BLOCK_SIZE 128 // 128 threads per block
#define TRANSFERED_DATA_SIZE 2000000 //2 MB
#define NO_OF_PATTERNS 2
#define NTIMES 10
#define MIN(x,y) ((x)<(y)?(x):(y))
#define MAX(x,y) ((x)>(y)?(x):(y))
void printResults(void);
////////////////////////////////////////////////////////////////////////////////////////////////
//
// access pattern involving accessing the elements with a stride of one 32-bit word
// the access pattern causes no bank conflicts
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void sharedMemAccessWithStride1(void)
{
__shared__ __attribute__((unused)) float array[BLOCK_SIZE];
int idx = threadIdx.x;
float scalar = 5.0f;
array[idx] = scalar;
}
////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
//
// access pattern involving reading the same word by all the threads
// the access pattern is conflict-free
// since the word gets broadcasted to all the threads
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void sharedMemReadSameWord(void)
{
__shared__ float array[BLOCK_SIZE];
int idx = 3;
float __attribute__((unused)) scalar ;
scalar = array[idx];
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static double avgtime[NO_OF_PATTERNS] = {0}, maxtime[NO_OF_PATTERNS] = {0}, mintime[NO_OF_PATTERNS];
static float bandWidths[NO_OF_PATTERNS] = {0};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// the main routene
// for timing the two kernals
// finding the corresponding band widths
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char* argv[])
{
float elapsedTimes[NO_OF_PATTERNS][NTIMES];
cudaEvent_t start,stop;
double bytes = sizeof(float) * TRANSFERED_DATA_SIZE;
// event creation, which will be used for timing the code
cudaEventCreate(&start);
cudaEventCreate(&stop);
//finding the 1D grid size
int gridSize = TRANSFERED_DATA_SIZE/BLOCK_SIZE;
if( TRANSFERED_DATA_SIZE % BLOCK_SIZE != 0 )
gridSize += 1;
// running each pattern NTIMES
for(int k=0; k < NTIMES; k++)
{
// timing the kernels corresponding to different access patterns
// PATTERN 1
cudaEventRecord(start,0);
sharedMemAccessWithStride1 <<< gridSize,BLOCK_SIZE >>> ();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[0][k],start,stop);
// PATTERN 2
cudaEventRecord(start,0);
sharedMemReadSameWord <<< gridSize,BLOCK_SIZE >>> ();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[1][k],start,stop);
} // end of the for loop involving NTIMES
// intializing the mintime array
for(int i=0; i < NO_OF_PATTERNS;i++)
mintime[i] = FLT_MAX;
for (int k=1; k < NTIMES; k++) // skiping the first iteration
{
for (int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i] + elapsedTimes[i][k];
mintime[i] = MIN(mintime[i],elapsedTimes[i][k]);
maxtime[i] = MAX(maxtime[i], elapsedTimes[i][k]);
}
}
// calculation of the different band widths that are achieved by different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i]/(double)(NTIMES-1); // finding the average time
bandWidths[i] = bytes/mintime[i];
}
printResults();
printf("\n\n**** successful termination of the program ****\n\n");
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// prints the results containig the minimum, maximum, average times taken by the two kernels
// the associated maximum bandwidth achieved by the different kernels
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void printResults()
{
printf("Demonstrating that reading same 32-bit word by all threads will not cause bank conflicts\n");
printf("The transfered data size (Bytes): %ld\n",TRANSFERED_DATA_SIZE*sizeof(float));
printf("\n-------------------------------------------------------------------------------------------------------------------------------\n");
printf("Pattern \t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n");
printf("-------------------------------------------------------------------------------------------------------------------------------\n");
// printing the results for different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
switch(i)
{
case 0: printf("Stride of one 32-bit word ");
break;
case 1: printf("Reading the same 32-bit word");
break;
}
printf("\t %.6f \t\t %f \t\t %f \t\t %f\n",bandWidths[i]/1000000,avgtime[i],mintime[i],maxtime[i]);
}
printf("\n ------------------------------------------------------------------------------------------------------------------------------\n");
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// | /**********************************************************************************************
Source Code : sharedMemoryReadingSameWord.cu
Objective : Example code to demonstrate the different access patterns of float3 array
in the global memory the corresponding advantages in terms of the bandwidth
that is achievable
Descritpion : Example code to demonstrate that while reading the same word by all the
threads, there will not be any serialization since all threads are accessing
from the same bank the 32-bit word gets broadcasted to all the threads --
hence bandwidth can be comparable to the value got when there were no
bank conflicts
output: The different bandwidths of the shared memory that are achieved while
accessing the same word and the access pattern without any bank conflicts
Modified : Aug 2011
Author : RarchK
****************************************************************************************/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <float.h>
#define BLOCK_SIZE 128 // 128 threads per block
#define TRANSFERED_DATA_SIZE 2000000 //2 MB
#define NO_OF_PATTERNS 2
#define NTIMES 10
#define MIN(x,y) ((x)<(y)?(x):(y))
#define MAX(x,y) ((x)>(y)?(x):(y))
void printResults(void);
////////////////////////////////////////////////////////////////////////////////////////////////
//
// access pattern involving accessing the elements with a stride of one 32-bit word
// the access pattern causes no bank conflicts
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void sharedMemAccessWithStride1(void)
{
__shared__ __attribute__((unused)) float array[BLOCK_SIZE];
int idx = threadIdx.x;
float scalar = 5.0f;
array[idx] = scalar;
}
////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
//
// access pattern involving reading the same word by all the threads
// the access pattern is conflict-free
// since the word gets broadcasted to all the threads
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void sharedMemReadSameWord(void)
{
__shared__ float array[BLOCK_SIZE];
int idx = 3;
float __attribute__((unused)) scalar ;
scalar = array[idx];
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static double avgtime[NO_OF_PATTERNS] = {0}, maxtime[NO_OF_PATTERNS] = {0}, mintime[NO_OF_PATTERNS];
static float bandWidths[NO_OF_PATTERNS] = {0};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// the main routene
// for timing the two kernals
// finding the corresponding band widths
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char* argv[])
{
float elapsedTimes[NO_OF_PATTERNS][NTIMES];
hipEvent_t start,stop;
double bytes = sizeof(float) * TRANSFERED_DATA_SIZE;
// event creation, which will be used for timing the code
hipEventCreate(&start);
hipEventCreate(&stop);
//finding the 1D grid size
int gridSize = TRANSFERED_DATA_SIZE/BLOCK_SIZE;
if( TRANSFERED_DATA_SIZE % BLOCK_SIZE != 0 )
gridSize += 1;
// running each pattern NTIMES
for(int k=0; k < NTIMES; k++)
{
// timing the kernels corresponding to different access patterns
// PATTERN 1
hipEventRecord(start,0);
sharedMemAccessWithStride1 <<< gridSize,BLOCK_SIZE >>> ();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimes[0][k],start,stop);
// PATTERN 2
hipEventRecord(start,0);
sharedMemReadSameWord <<< gridSize,BLOCK_SIZE >>> ();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimes[1][k],start,stop);
} // end of the for loop involving NTIMES
// intializing the mintime array
for(int i=0; i < NO_OF_PATTERNS;i++)
mintime[i] = FLT_MAX;
for (int k=1; k < NTIMES; k++) // skiping the first iteration
{
for (int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i] + elapsedTimes[i][k];
mintime[i] = MIN(mintime[i],elapsedTimes[i][k]);
maxtime[i] = MAX(maxtime[i], elapsedTimes[i][k]);
}
}
// calculation of the different band widths that are achieved by different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i]/(double)(NTIMES-1); // finding the average time
bandWidths[i] = bytes/mintime[i];
}
printResults();
printf("\n\n**** successful termination of the program ****\n\n");
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// prints the results containig the minimum, maximum, average times taken by the two kernels
// the associated maximum bandwidth achieved by the different kernels
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void printResults()
{
printf("Demonstrating that reading same 32-bit word by all threads will not cause bank conflicts\n");
printf("The transfered data size (Bytes): %ld\n",TRANSFERED_DATA_SIZE*sizeof(float));
printf("\n-------------------------------------------------------------------------------------------------------------------------------\n");
printf("Pattern \t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n");
printf("-------------------------------------------------------------------------------------------------------------------------------\n");
// printing the results for different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
switch(i)
{
case 0: printf("Stride of one 32-bit word ");
break;
case 1: printf("Reading the same 32-bit word");
break;
}
printf("\t %.6f \t\t %f \t\t %f \t\t %f\n",bandWidths[i]/1000000,avgtime[i],mintime[i],maxtime[i]);
}
printf("\n ------------------------------------------------------------------------------------------------------------------------------\n");
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /**********************************************************************************************
Source Code : sharedMemoryReadingSameWord.cu
Objective : Example code to demonstrate the different access patterns of float3 array
in the global memory the corresponding advantages in terms of the bandwidth
that is achievable
Descritpion : Example code to demonstrate that while reading the same word by all the
threads, there will not be any serialization since all threads are accessing
from the same bank the 32-bit word gets broadcasted to all the threads --
hence bandwidth can be comparable to the value got when there were no
bank conflicts
output: The different bandwidths of the shared memory that are achieved while
accessing the same word and the access pattern without any bank conflicts
Modified : Aug 2011
Author : RarchK
****************************************************************************************/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <float.h>
#define BLOCK_SIZE 128 // 128 threads per block
#define TRANSFERED_DATA_SIZE 2000000 //2 MB
#define NO_OF_PATTERNS 2
#define NTIMES 10
#define MIN(x,y) ((x)<(y)?(x):(y))
#define MAX(x,y) ((x)>(y)?(x):(y))
void printResults(void);
////////////////////////////////////////////////////////////////////////////////////////////////
//
// access pattern involving accessing the elements with a stride of one 32-bit word
// the access pattern causes no bank conflicts
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void sharedMemAccessWithStride1(void)
{
__shared__ __attribute__((unused)) float array[BLOCK_SIZE];
int idx = threadIdx.x;
float scalar = 5.0f;
array[idx] = scalar;
}
////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
//
// access pattern involving reading the same word by all the threads
// the access pattern is conflict-free
// since the word gets broadcasted to all the threads
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void sharedMemReadSameWord(void)
{
__shared__ float array[BLOCK_SIZE];
int idx = 3;
float __attribute__((unused)) scalar ;
scalar = array[idx];
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static double avgtime[NO_OF_PATTERNS] = {0}, maxtime[NO_OF_PATTERNS] = {0}, mintime[NO_OF_PATTERNS];
static float bandWidths[NO_OF_PATTERNS] = {0};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// the main routene
// for timing the two kernals
// finding the corresponding band widths
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char* argv[])
{
float elapsedTimes[NO_OF_PATTERNS][NTIMES];
hipEvent_t start,stop;
double bytes = sizeof(float) * TRANSFERED_DATA_SIZE;
// event creation, which will be used for timing the code
hipEventCreate(&start);
hipEventCreate(&stop);
//finding the 1D grid size
int gridSize = TRANSFERED_DATA_SIZE/BLOCK_SIZE;
if( TRANSFERED_DATA_SIZE % BLOCK_SIZE != 0 )
gridSize += 1;
// running each pattern NTIMES
for(int k=0; k < NTIMES; k++)
{
// timing the kernels corresponding to different access patterns
// PATTERN 1
hipEventRecord(start,0);
sharedMemAccessWithStride1 <<< gridSize,BLOCK_SIZE >>> ();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimes[0][k],start,stop);
// PATTERN 2
hipEventRecord(start,0);
sharedMemReadSameWord <<< gridSize,BLOCK_SIZE >>> ();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimes[1][k],start,stop);
} // end of the for loop involving NTIMES
// intializing the mintime array
for(int i=0; i < NO_OF_PATTERNS;i++)
mintime[i] = FLT_MAX;
for (int k=1; k < NTIMES; k++) // skiping the first iteration
{
for (int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i] + elapsedTimes[i][k];
mintime[i] = MIN(mintime[i],elapsedTimes[i][k]);
maxtime[i] = MAX(maxtime[i], elapsedTimes[i][k]);
}
}
// calculation of the different band widths that are achieved by different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i]/(double)(NTIMES-1); // finding the average time
bandWidths[i] = bytes/mintime[i];
}
printResults();
printf("\n\n**** successful termination of the program ****\n\n");
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// prints the results containig the minimum, maximum, average times taken by the two kernels
// the associated maximum bandwidth achieved by the different kernels
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void printResults()
{
printf("Demonstrating that reading same 32-bit word by all threads will not cause bank conflicts\n");
printf("The transfered data size (Bytes): %ld\n",TRANSFERED_DATA_SIZE*sizeof(float));
printf("\n-------------------------------------------------------------------------------------------------------------------------------\n");
printf("Pattern \t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n");
printf("-------------------------------------------------------------------------------------------------------------------------------\n");
// printing the results for different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
switch(i)
{
case 0: printf("Stride of one 32-bit word ");
break;
case 1: printf("Reading the same 32-bit word");
break;
}
printf("\t %.6f \t\t %f \t\t %f \t\t %f\n",bandWidths[i]/1000000,avgtime[i],mintime[i],maxtime[i]);
}
printf("\n ------------------------------------------------------------------------------------------------------------------------------\n");
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z26sharedMemAccessWithStride1v
.globl _Z26sharedMemAccessWithStride1v
.p2align 8
.type _Z26sharedMemAccessWithStride1v,@function
_Z26sharedMemAccessWithStride1v:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z26sharedMemAccessWithStride1v
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z26sharedMemAccessWithStride1v, .Lfunc_end0-_Z26sharedMemAccessWithStride1v
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z21sharedMemReadSameWordv
.globl _Z21sharedMemReadSameWordv
.p2align 8
.type _Z21sharedMemReadSameWordv,@function
_Z21sharedMemReadSameWordv:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21sharedMemReadSameWordv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z21sharedMemReadSameWordv, .Lfunc_end1-_Z21sharedMemReadSameWordv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z26sharedMemAccessWithStride1v
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z26sharedMemAccessWithStride1v.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21sharedMemReadSameWordv
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z21sharedMemReadSameWordv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /**********************************************************************************************
Source Code : sharedMemoryReadingSameWord.cu
Objective : Example code to demonstrate the different access patterns of float3 array
in the global memory the corresponding advantages in terms of the bandwidth
that is achievable
Descritpion : Example code to demonstrate that while reading the same word by all the
threads, there will not be any serialization since all threads are accessing
from the same bank the 32-bit word gets broadcasted to all the threads --
hence bandwidth can be comparable to the value got when there were no
bank conflicts
output: The different bandwidths of the shared memory that are achieved while
accessing the same word and the access pattern without any bank conflicts
Modified : Aug 2011
Author : RarchK
****************************************************************************************/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <float.h>
#define BLOCK_SIZE 128 // 128 threads per block
#define TRANSFERED_DATA_SIZE 2000000 //2 MB
#define NO_OF_PATTERNS 2
#define NTIMES 10
#define MIN(x,y) ((x)<(y)?(x):(y))
#define MAX(x,y) ((x)>(y)?(x):(y))
void printResults(void);
////////////////////////////////////////////////////////////////////////////////////////////////
//
// access pattern involving accessing the elements with a stride of one 32-bit word
// the access pattern causes no bank conflicts
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void sharedMemAccessWithStride1(void)
{
__shared__ __attribute__((unused)) float array[BLOCK_SIZE];
int idx = threadIdx.x;
float scalar = 5.0f;
array[idx] = scalar;
}
////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
//
// access pattern involving reading the same word by all the threads
// the access pattern is conflict-free
// since the word gets broadcasted to all the threads
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void sharedMemReadSameWord(void)
{
__shared__ float array[BLOCK_SIZE];
int idx = 3;
float __attribute__((unused)) scalar ;
scalar = array[idx];
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static double avgtime[NO_OF_PATTERNS] = {0}, maxtime[NO_OF_PATTERNS] = {0}, mintime[NO_OF_PATTERNS];
static float bandWidths[NO_OF_PATTERNS] = {0};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// the main routene
// for timing the two kernals
// finding the corresponding band widths
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char* argv[])
{
float elapsedTimes[NO_OF_PATTERNS][NTIMES];
hipEvent_t start,stop;
double bytes = sizeof(float) * TRANSFERED_DATA_SIZE;
// event creation, which will be used for timing the code
hipEventCreate(&start);
hipEventCreate(&stop);
//finding the 1D grid size
int gridSize = TRANSFERED_DATA_SIZE/BLOCK_SIZE;
if( TRANSFERED_DATA_SIZE % BLOCK_SIZE != 0 )
gridSize += 1;
// running each pattern NTIMES
for(int k=0; k < NTIMES; k++)
{
// timing the kernels corresponding to different access patterns
// PATTERN 1
hipEventRecord(start,0);
sharedMemAccessWithStride1 <<< gridSize,BLOCK_SIZE >>> ();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimes[0][k],start,stop);
// PATTERN 2
hipEventRecord(start,0);
sharedMemReadSameWord <<< gridSize,BLOCK_SIZE >>> ();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimes[1][k],start,stop);
} // end of the for loop involving NTIMES
// intializing the mintime array
for(int i=0; i < NO_OF_PATTERNS;i++)
mintime[i] = FLT_MAX;
for (int k=1; k < NTIMES; k++) // skiping the first iteration
{
for (int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i] + elapsedTimes[i][k];
mintime[i] = MIN(mintime[i],elapsedTimes[i][k]);
maxtime[i] = MAX(maxtime[i], elapsedTimes[i][k]);
}
}
// calculation of the different band widths that are achieved by different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i]/(double)(NTIMES-1); // finding the average time
bandWidths[i] = bytes/mintime[i];
}
printResults();
printf("\n\n**** successful termination of the program ****\n\n");
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// prints the results containig the minimum, maximum, average times taken by the two kernels
// the associated maximum bandwidth achieved by the different kernels
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void printResults()
{
printf("Demonstrating that reading same 32-bit word by all threads will not cause bank conflicts\n");
printf("The transfered data size (Bytes): %ld\n",TRANSFERED_DATA_SIZE*sizeof(float));
printf("\n-------------------------------------------------------------------------------------------------------------------------------\n");
printf("Pattern \t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n");
printf("-------------------------------------------------------------------------------------------------------------------------------\n");
// printing the results for different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
switch(i)
{
case 0: printf("Stride of one 32-bit word ");
break;
case 1: printf("Reading the same 32-bit word");
break;
}
printf("\t %.6f \t\t %f \t\t %f \t\t %f\n",bandWidths[i]/1000000,avgtime[i],mintime[i],maxtime[i]);
}
printf("\n ------------------------------------------------------------------------------------------------------------------------------\n");
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// | .text
.file "sharedMemoryReadingSameWord.hip"
.globl _Z41__device_stub__sharedMemAccessWithStride1v # -- Begin function _Z41__device_stub__sharedMemAccessWithStride1v
.p2align 4, 0x90
.type _Z41__device_stub__sharedMemAccessWithStride1v,@function
_Z41__device_stub__sharedMemAccessWithStride1v: # @_Z41__device_stub__sharedMemAccessWithStride1v
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z26sharedMemAccessWithStride1v, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z41__device_stub__sharedMemAccessWithStride1v, .Lfunc_end0-_Z41__device_stub__sharedMemAccessWithStride1v
.cfi_endproc
# -- End function
.globl _Z36__device_stub__sharedMemReadSameWordv # -- Begin function _Z36__device_stub__sharedMemReadSameWordv
.p2align 4, 0x90
.type _Z36__device_stub__sharedMemReadSameWordv,@function
_Z36__device_stub__sharedMemReadSameWordv: # @_Z36__device_stub__sharedMemReadSameWordv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z21sharedMemReadSameWordv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end1:
.size _Z36__device_stub__sharedMemReadSameWordv, .Lfunc_end1-_Z36__device_stub__sharedMemReadSameWordv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x4022000000000000 # double 9
.LCPI2_1:
.quad 0x415e848000000000 # double 8.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movabsq $4294967424, %rbx # imm = 0x100000080
leaq 8(%rsp), %rdi
callq hipEventCreate
movq %rsp, %rdi
callq hipEventCreate
leaq 80(%rsp), %r14
movl $10, %ebp
leaq 15497(%rbx), %r15
leaq 56(%rsp), %r12
leaq 64(%rsp), %r13
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_5: # in Loop: Header=BB2_1 Depth=1
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
leaq 40(%r14), %rdi
movq 8(%rsp), %rsi
movq (%rsp), %rdx
callq hipEventElapsedTime
addq $4, %r14
decq %rbp
je .LBB2_6
.LBB2_1: # =>This Inner Loop Header: Depth=1
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq %r15, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_3
# %bb.2: # in Loop: Header=BB2_1 Depth=1
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
movq %r12, %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
movl $_Z26sharedMemAccessWithStride1v, %edi
movq %r13, %r9
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_3: # in Loop: Header=BB2_1 Depth=1
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movq 8(%rsp), %rsi
movq (%rsp), %rdx
movq %r14, %rdi
callq hipEventElapsedTime
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq %r15, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_5
# %bb.4: # in Loop: Header=BB2_1 Depth=1
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
movq %r12, %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
movl $_Z21sharedMemReadSameWordv, %edi
movq %r13, %r9
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB2_5
.LBB2_6: # %.preheader70.preheader
movq $-8, %rax
movabsq $5183643170566569984, %rcx # imm = 0x47EFFFFFE0000000
.p2align 4, 0x90
.LBB2_7: # %.preheader70
# =>This Inner Loop Header: Depth=1
movq %rcx, _ZL7mintime+8(%rax)
addq $8, %rax
je .LBB2_7
# %bb.8: # %.preheader68.preheader
movl $1, %eax
leaq 84(%rsp), %rcx
.p2align 4, 0x90
.LBB2_9: # %.preheader68
# =>This Loop Header: Depth=1
# Child Loop BB2_10 Depth 2
movq $-8, %rdx
movq %rcx, %rsi
.p2align 4, 0x90
.LBB2_10: # Parent Loop BB2_9 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rsi), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movsd _ZL7avgtime+8(%rdx), %xmm1 # xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movsd %xmm1, _ZL7avgtime+8(%rdx)
movsd _ZL7mintime+8(%rdx), %xmm1 # xmm1 = mem[0],zero
minsd %xmm0, %xmm1
movsd %xmm1, _ZL7mintime+8(%rdx)
movsd _ZL7maxtime+8(%rdx), %xmm1 # xmm1 = mem[0],zero
maxsd %xmm0, %xmm1
movsd %xmm1, _ZL7maxtime+8(%rdx)
addq $40, %rsi
addq $8, %rdx
je .LBB2_10
# %bb.11: # in Loop: Header=BB2_9 Depth=1
incq %rax
addq $4, %rcx
cmpq $10, %rax
jne .LBB2_9
# %bb.12: # %.preheader.preheader
movq $-4, %rax
movsd .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero
movsd .LCPI2_1(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB2_13: # %.preheader
# =>This Inner Loop Header: Depth=1
movsd _ZL7avgtime+8(%rax,%rax), %xmm2 # xmm2 = mem[0],zero
divsd %xmm0, %xmm2
movsd %xmm2, _ZL7avgtime+8(%rax,%rax)
movapd %xmm1, %xmm2
divsd _ZL7mintime+8(%rax,%rax), %xmm2
cvtsd2ss %xmm2, %xmm2
movss %xmm2, _ZL10bandWidths+4(%rax)
addq $4, %rax
je .LBB2_13
# %bb.14:
callq _Z12printResultsv
movl $.Lstr, %edi
callq puts@PLT
movq 8(%rsp), %rdi
callq hipEventDestroy
movq (%rsp), %rdi
callq hipEventDestroy
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z12printResultsv
.LCPI3_0:
.long 0x49742400 # float 1.0E+6
.text
.globl _Z12printResultsv
.p2align 4, 0x90
.type _Z12printResultsv,@function
_Z12printResultsv: # @_Z12printResultsv
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $.Lstr.1, %edi
callq puts@PLT
movl $.L.str.2, %edi
movl $8000000, %esi # imm = 0x7A1200
xorl %eax, %eax
callq printf
movl $.Lstr.2, %edi
callq puts@PLT
movl $.Lstr.3, %edi
callq puts@PLT
movl $.Lstr.4, %edi
callq puts@PLT
movq $-4, %rbx
movl $.L.str.6, %r14d
.p2align 4, 0x90
.LBB3_1: # =>This Inner Loop Header: Depth=1
cmpq $-4, %rbx
movl $.L.str.7, %edi
cmoveq %r14, %rdi
xorl %eax, %eax
callq printf
movss _ZL10bandWidths+4(%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI3_0(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movsd _ZL7avgtime+8(%rbx,%rbx), %xmm1 # xmm1 = mem[0],zero
movsd _ZL7mintime+8(%rbx,%rbx), %xmm2 # xmm2 = mem[0],zero
movsd _ZL7maxtime+8(%rbx,%rbx), %xmm3 # xmm3 = mem[0],zero
movl $.L.str.8, %edi
movb $4, %al
callq printf
addq $4, %rbx
je .LBB3_1
# %bb.2:
movl $.Lstr.5, %edi
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
jmp puts@PLT # TAILCALL
.Lfunc_end3:
.size _Z12printResultsv, .Lfunc_end3-_Z12printResultsv
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z26sharedMemAccessWithStride1v, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21sharedMemReadSameWordv, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z26sharedMemAccessWithStride1v,@object # @_Z26sharedMemAccessWithStride1v
.section .rodata,"a",@progbits
.globl _Z26sharedMemAccessWithStride1v
.p2align 3, 0x0
_Z26sharedMemAccessWithStride1v:
.quad _Z41__device_stub__sharedMemAccessWithStride1v
.size _Z26sharedMemAccessWithStride1v, 8
.type _Z21sharedMemReadSameWordv,@object # @_Z21sharedMemReadSameWordv
.globl _Z21sharedMemReadSameWordv
.p2align 3, 0x0
_Z21sharedMemReadSameWordv:
.quad _Z36__device_stub__sharedMemReadSameWordv
.size _Z21sharedMemReadSameWordv, 8
.type _ZL7mintime,@object # @_ZL7mintime
.local _ZL7mintime
.comm _ZL7mintime,16,16
.type _ZL7avgtime,@object # @_ZL7avgtime
.local _ZL7avgtime
.comm _ZL7avgtime,16,16
.type _ZL7maxtime,@object # @_ZL7maxtime
.local _ZL7maxtime
.comm _ZL7maxtime,16,16
.type _ZL10bandWidths,@object # @_ZL10bandWidths
.local _ZL10bandWidths
.comm _ZL10bandWidths,8,4
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "The transfered data size (Bytes): %ld\n"
.size .L.str.2, 39
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Stride of one 32-bit word "
.size .L.str.6, 29
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Reading the same 32-bit word"
.size .L.str.7, 29
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "\t %.6f \t\t %f \t\t %f \t\t %f\n"
.size .L.str.8, 28
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z26sharedMemAccessWithStride1v"
.size .L__unnamed_1, 32
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z21sharedMemReadSameWordv"
.size .L__unnamed_2, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "\n\n**** successful termination of the program ****\n"
.size .Lstr, 52
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Demonstrating that reading same 32-bit word by all threads will not cause bank conflicts"
.size .Lstr.1, 89
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "\n-------------------------------------------------------------------------------------------------------------------------------"
.size .Lstr.2, 129
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Pattern \t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)"
.size .Lstr.3, 81
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "-------------------------------------------------------------------------------------------------------------------------------"
.size .Lstr.4, 128
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "\n ------------------------------------------------------------------------------------------------------------------------------"
.size .Lstr.5, 129
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z41__device_stub__sharedMemAccessWithStride1v
.addrsig_sym _Z36__device_stub__sharedMemReadSameWordv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z26sharedMemAccessWithStride1v
.addrsig_sym _Z21sharedMemReadSameWordv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z21sharedMemReadSameWordv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z26sharedMemAccessWithStride1v
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z26sharedMemAccessWithStride1v
.globl _Z26sharedMemAccessWithStride1v
.p2align 8
.type _Z26sharedMemAccessWithStride1v,@function
_Z26sharedMemAccessWithStride1v:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z26sharedMemAccessWithStride1v
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z26sharedMemAccessWithStride1v, .Lfunc_end0-_Z26sharedMemAccessWithStride1v
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z21sharedMemReadSameWordv
.globl _Z21sharedMemReadSameWordv
.p2align 8
.type _Z21sharedMemReadSameWordv,@function
_Z21sharedMemReadSameWordv:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21sharedMemReadSameWordv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z21sharedMemReadSameWordv, .Lfunc_end1-_Z21sharedMemReadSameWordv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z26sharedMemAccessWithStride1v
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z26sharedMemAccessWithStride1v.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21sharedMemReadSameWordv
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z21sharedMemReadSameWordv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001402d6_00000000-6_sharedMemoryReadingSameWord.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Demonstrating that reading same 32-bit word by all threads will not cause bank conflicts\n"
.align 8
.LC1:
.string "The transfered data size (Bytes): %ld\n"
.align 8
.LC2:
.string "\n-------------------------------------------------------------------------------------------------------------------------------\n"
.align 8
.LC3:
.string "Pattern \t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n"
.align 8
.LC4:
.string "-------------------------------------------------------------------------------------------------------------------------------\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "Stride of one 32-bit word "
.LC7:
.string "\t %.6f \t\t %f \t\t %f \t\t %f\n"
.LC8:
.string "Reading the same 32-bit word"
.section .rodata.str1.8
.align 8
.LC9:
.string "\n ------------------------------------------------------------------------------------------------------------------------------\n"
.text
.globl _Z12printResultsv
.type _Z12printResultsv, @function
_Z12printResultsv:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $8000000, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movss _ZL10bandWidths(%rip), %xmm0
divss .LC6(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movsd _ZL7maxtime(%rip), %xmm3
movsd _ZL7mintime(%rip), %xmm2
movsd _ZL7avgtime(%rip), %xmm1
leaq .LC7(%rip), %rbx
movq %rbx, %rsi
movl $2, %edi
movl $4, %eax
call __printf_chk@PLT
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movss 4+_ZL10bandWidths(%rip), %xmm0
divss .LC6(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movsd 8+_ZL7maxtime(%rip), %xmm3
movsd 8+_ZL7mintime(%rip), %xmm2
movsd 8+_ZL7avgtime(%rip), %xmm1
movq %rbx, %rsi
movl $2, %edi
movl $4, %eax
call __printf_chk@PLT
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z12printResultsv, .-_Z12printResultsv
.globl _Z45__device_stub__Z26sharedMemAccessWithStride1vv
.type _Z45__device_stub__Z26sharedMemAccessWithStride1vv, @function
_Z45__device_stub__Z26sharedMemAccessWithStride1vv:
.LFB2083:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z26sharedMemAccessWithStride1v(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z45__device_stub__Z26sharedMemAccessWithStride1vv, .-_Z45__device_stub__Z26sharedMemAccessWithStride1vv
.globl _Z26sharedMemAccessWithStride1v
.type _Z26sharedMemAccessWithStride1v, @function
_Z26sharedMemAccessWithStride1v:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z26sharedMemAccessWithStride1vv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z26sharedMemAccessWithStride1v, .-_Z26sharedMemAccessWithStride1v
.globl _Z40__device_stub__Z21sharedMemReadSameWordvv
.type _Z40__device_stub__Z21sharedMemReadSameWordvv, @function
_Z40__device_stub__Z21sharedMemReadSameWordvv:
.LFB2085:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z21sharedMemReadSameWordv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z40__device_stub__Z21sharedMemReadSameWordvv, .-_Z40__device_stub__Z21sharedMemReadSameWordvv
.globl _Z21sharedMemReadSameWordv
.type _Z21sharedMemReadSameWordv, @function
_Z21sharedMemReadSameWordv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z21sharedMemReadSameWordvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z21sharedMemReadSameWordv, .-_Z21sharedMemReadSameWordv
.section .rodata.str1.8
.align 8
.LC13:
.string "\n\n**** successful termination of the program ****\n\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $144, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
call cudaEventCreate@PLT
leaq 16(%rsp), %rdi
call cudaEventCreate@PLT
leaq 48(%rsp), %rbp
leaq 88(%rsp), %rbx
leaq 128(%rsp), %r12
jmp .L24
.L38:
call _Z45__device_stub__Z26sharedMemAccessWithStride1vv
jmp .L22
.L23:
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq 16(%rsp), %rdi
call cudaEventSynchronize@PLT
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call cudaEventElapsedTime@PLT
addq $4, %rbx
cmpq %rbx, %r12
je .L37
.L24:
movl $0, %esi
movq 8(%rsp), %rdi
call cudaEventRecord@PLT
movl $128, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $15625, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L22:
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq 16(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq -40(%rbx), %rdi
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $0, %esi
movq 8(%rsp), %rdi
call cudaEventRecord@PLT
movl $128, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $15625, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L23
call _Z40__device_stub__Z21sharedMemReadSameWordvv
jmp .L23
.L37:
leaq 52(%rsp), %rax
leaq 40(%rbp), %rdx
movsd _ZL7avgtime(%rip), %xmm2
movsd _ZL7maxtime(%rip), %xmm6
movsd 8+_ZL7avgtime(%rip), %xmm1
movsd 8+_ZL7maxtime(%rip), %xmm5
movsd .LC10(%rip), %xmm3
movapd %xmm3, %xmm4
.L25:
pxor %xmm0, %xmm0
cvtss2sd (%rax), %xmm0
addsd %xmm0, %xmm2
minsd %xmm0, %xmm3
maxsd %xmm0, %xmm6
pxor %xmm0, %xmm0
cvtss2sd 40(%rax), %xmm0
addsd %xmm0, %xmm1
minsd %xmm0, %xmm4
maxsd %xmm0, %xmm5
addq $4, %rax
cmpq %rax, %rdx
jne .L25
movsd %xmm3, _ZL7mintime(%rip)
movsd %xmm6, _ZL7maxtime(%rip)
movsd %xmm4, 8+_ZL7mintime(%rip)
movsd %xmm5, 8+_ZL7maxtime(%rip)
movsd .LC11(%rip), %xmm5
divsd %xmm5, %xmm2
movsd %xmm2, _ZL7avgtime(%rip)
movsd .LC12(%rip), %xmm0
movapd %xmm0, %xmm2
divsd %xmm3, %xmm2
cvtsd2ss %xmm2, %xmm2
movss %xmm2, _ZL10bandWidths(%rip)
divsd %xmm5, %xmm1
movsd %xmm1, 8+_ZL7avgtime(%rip)
divsd %xmm4, %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 4+_ZL10bandWidths(%rip)
call _Z12printResultsv
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaEventDestroy@PLT
movq 16(%rsp), %rdi
call cudaEventDestroy@PLT
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L39
movl $0, %eax
addq $144, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC14:
.string "_Z21sharedMemReadSameWordv"
.section .rodata.str1.8
.align 8
.LC15:
.string "_Z26sharedMemAccessWithStride1v"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z21sharedMemReadSameWordv(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z26sharedMemAccessWithStride1v(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL10bandWidths
.comm _ZL10bandWidths,8,8
.local _ZL7mintime
.comm _ZL7mintime,16,16
.local _ZL7maxtime
.comm _ZL7maxtime,16,16
.local _ZL7avgtime
.comm _ZL7avgtime,16,16
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC6:
.long 1232348160
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC10:
.long -536870912
.long 1206910975
.align 8
.LC11:
.long 0
.long 1075970048
.align 8
.LC12:
.long 0
.long 1096713344
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sharedMemoryReadingSameWord.hip"
.globl _Z41__device_stub__sharedMemAccessWithStride1v # -- Begin function _Z41__device_stub__sharedMemAccessWithStride1v
.p2align 4, 0x90
.type _Z41__device_stub__sharedMemAccessWithStride1v,@function
_Z41__device_stub__sharedMemAccessWithStride1v: # @_Z41__device_stub__sharedMemAccessWithStride1v
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z26sharedMemAccessWithStride1v, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z41__device_stub__sharedMemAccessWithStride1v, .Lfunc_end0-_Z41__device_stub__sharedMemAccessWithStride1v
.cfi_endproc
# -- End function
.globl _Z36__device_stub__sharedMemReadSameWordv # -- Begin function _Z36__device_stub__sharedMemReadSameWordv
.p2align 4, 0x90
.type _Z36__device_stub__sharedMemReadSameWordv,@function
_Z36__device_stub__sharedMemReadSameWordv: # @_Z36__device_stub__sharedMemReadSameWordv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z21sharedMemReadSameWordv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end1:
.size _Z36__device_stub__sharedMemReadSameWordv, .Lfunc_end1-_Z36__device_stub__sharedMemReadSameWordv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x4022000000000000 # double 9
.LCPI2_1:
.quad 0x415e848000000000 # double 8.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movabsq $4294967424, %rbx # imm = 0x100000080
leaq 8(%rsp), %rdi
callq hipEventCreate
movq %rsp, %rdi
callq hipEventCreate
leaq 80(%rsp), %r14
movl $10, %ebp
leaq 15497(%rbx), %r15
leaq 56(%rsp), %r12
leaq 64(%rsp), %r13
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_5: # in Loop: Header=BB2_1 Depth=1
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
leaq 40(%r14), %rdi
movq 8(%rsp), %rsi
movq (%rsp), %rdx
callq hipEventElapsedTime
addq $4, %r14
decq %rbp
je .LBB2_6
.LBB2_1: # =>This Inner Loop Header: Depth=1
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq %r15, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_3
# %bb.2: # in Loop: Header=BB2_1 Depth=1
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
movq %r12, %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
movl $_Z26sharedMemAccessWithStride1v, %edi
movq %r13, %r9
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_3: # in Loop: Header=BB2_1 Depth=1
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movq 8(%rsp), %rsi
movq (%rsp), %rdx
movq %r14, %rdi
callq hipEventElapsedTime
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq %r15, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_5
# %bb.4: # in Loop: Header=BB2_1 Depth=1
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
movq %r12, %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
movl $_Z21sharedMemReadSameWordv, %edi
movq %r13, %r9
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB2_5
.LBB2_6: # %.preheader70.preheader
movq $-8, %rax
movabsq $5183643170566569984, %rcx # imm = 0x47EFFFFFE0000000
.p2align 4, 0x90
.LBB2_7: # %.preheader70
# =>This Inner Loop Header: Depth=1
movq %rcx, _ZL7mintime+8(%rax)
addq $8, %rax
je .LBB2_7
# %bb.8: # %.preheader68.preheader
movl $1, %eax
leaq 84(%rsp), %rcx
.p2align 4, 0x90
.LBB2_9: # %.preheader68
# =>This Loop Header: Depth=1
# Child Loop BB2_10 Depth 2
movq $-8, %rdx
movq %rcx, %rsi
.p2align 4, 0x90
.LBB2_10: # Parent Loop BB2_9 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rsi), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movsd _ZL7avgtime+8(%rdx), %xmm1 # xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movsd %xmm1, _ZL7avgtime+8(%rdx)
movsd _ZL7mintime+8(%rdx), %xmm1 # xmm1 = mem[0],zero
minsd %xmm0, %xmm1
movsd %xmm1, _ZL7mintime+8(%rdx)
movsd _ZL7maxtime+8(%rdx), %xmm1 # xmm1 = mem[0],zero
maxsd %xmm0, %xmm1
movsd %xmm1, _ZL7maxtime+8(%rdx)
addq $40, %rsi
addq $8, %rdx
je .LBB2_10
# %bb.11: # in Loop: Header=BB2_9 Depth=1
incq %rax
addq $4, %rcx
cmpq $10, %rax
jne .LBB2_9
# %bb.12: # %.preheader.preheader
movq $-4, %rax
movsd .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero
movsd .LCPI2_1(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB2_13: # %.preheader
# =>This Inner Loop Header: Depth=1
movsd _ZL7avgtime+8(%rax,%rax), %xmm2 # xmm2 = mem[0],zero
divsd %xmm0, %xmm2
movsd %xmm2, _ZL7avgtime+8(%rax,%rax)
movapd %xmm1, %xmm2
divsd _ZL7mintime+8(%rax,%rax), %xmm2
cvtsd2ss %xmm2, %xmm2
movss %xmm2, _ZL10bandWidths+4(%rax)
addq $4, %rax
je .LBB2_13
# %bb.14:
callq _Z12printResultsv
movl $.Lstr, %edi
callq puts@PLT
movq 8(%rsp), %rdi
callq hipEventDestroy
movq (%rsp), %rdi
callq hipEventDestroy
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z12printResultsv
.LCPI3_0:
.long 0x49742400 # float 1.0E+6
.text
.globl _Z12printResultsv
.p2align 4, 0x90
.type _Z12printResultsv,@function
_Z12printResultsv: # @_Z12printResultsv
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $.Lstr.1, %edi
callq puts@PLT
movl $.L.str.2, %edi
movl $8000000, %esi # imm = 0x7A1200
xorl %eax, %eax
callq printf
movl $.Lstr.2, %edi
callq puts@PLT
movl $.Lstr.3, %edi
callq puts@PLT
movl $.Lstr.4, %edi
callq puts@PLT
movq $-4, %rbx
movl $.L.str.6, %r14d
.p2align 4, 0x90
.LBB3_1: # =>This Inner Loop Header: Depth=1
cmpq $-4, %rbx
movl $.L.str.7, %edi
cmoveq %r14, %rdi
xorl %eax, %eax
callq printf
movss _ZL10bandWidths+4(%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI3_0(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movsd _ZL7avgtime+8(%rbx,%rbx), %xmm1 # xmm1 = mem[0],zero
movsd _ZL7mintime+8(%rbx,%rbx), %xmm2 # xmm2 = mem[0],zero
movsd _ZL7maxtime+8(%rbx,%rbx), %xmm3 # xmm3 = mem[0],zero
movl $.L.str.8, %edi
movb $4, %al
callq printf
addq $4, %rbx
je .LBB3_1
# %bb.2:
movl $.Lstr.5, %edi
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
jmp puts@PLT # TAILCALL
.Lfunc_end3:
.size _Z12printResultsv, .Lfunc_end3-_Z12printResultsv
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z26sharedMemAccessWithStride1v, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21sharedMemReadSameWordv, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z26sharedMemAccessWithStride1v,@object # @_Z26sharedMemAccessWithStride1v
.section .rodata,"a",@progbits
.globl _Z26sharedMemAccessWithStride1v
.p2align 3, 0x0
_Z26sharedMemAccessWithStride1v:
.quad _Z41__device_stub__sharedMemAccessWithStride1v
.size _Z26sharedMemAccessWithStride1v, 8
.type _Z21sharedMemReadSameWordv,@object # @_Z21sharedMemReadSameWordv
.globl _Z21sharedMemReadSameWordv
.p2align 3, 0x0
_Z21sharedMemReadSameWordv:
.quad _Z36__device_stub__sharedMemReadSameWordv
.size _Z21sharedMemReadSameWordv, 8
.type _ZL7mintime,@object # @_ZL7mintime
.local _ZL7mintime
.comm _ZL7mintime,16,16
.type _ZL7avgtime,@object # @_ZL7avgtime
.local _ZL7avgtime
.comm _ZL7avgtime,16,16
.type _ZL7maxtime,@object # @_ZL7maxtime
.local _ZL7maxtime
.comm _ZL7maxtime,16,16
.type _ZL10bandWidths,@object # @_ZL10bandWidths
.local _ZL10bandWidths
.comm _ZL10bandWidths,8,4
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "The transfered data size (Bytes): %ld\n"
.size .L.str.2, 39
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Stride of one 32-bit word "
.size .L.str.6, 29
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Reading the same 32-bit word"
.size .L.str.7, 29
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "\t %.6f \t\t %f \t\t %f \t\t %f\n"
.size .L.str.8, 28
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z26sharedMemAccessWithStride1v"
.size .L__unnamed_1, 32
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z21sharedMemReadSameWordv"
.size .L__unnamed_2, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "\n\n**** successful termination of the program ****\n"
.size .Lstr, 52
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Demonstrating that reading same 32-bit word by all threads will not cause bank conflicts"
.size .Lstr.1, 89
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "\n-------------------------------------------------------------------------------------------------------------------------------"
.size .Lstr.2, 129
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Pattern \t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)"
.size .Lstr.3, 81
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "-------------------------------------------------------------------------------------------------------------------------------"
.size .Lstr.4, 128
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "\n ------------------------------------------------------------------------------------------------------------------------------"
.size .Lstr.5, 129
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z41__device_stub__sharedMemAccessWithStride1v
.addrsig_sym _Z36__device_stub__sharedMemReadSameWordv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z26sharedMemAccessWithStride1v
.addrsig_sym _Z21sharedMemReadSameWordv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cstdlib>
#include <stdio.h>
#include <cassert> #include <cuda_runtime.h>
/*
Naive implementation.
Allocate one thread for one element in result matrix, processing dot(Arow, Bcol);
*/
__global__ void kMatrixMul0 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2){
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x >= n2 || y >= m1){return;}
float sum=0.0;
for (int i=0; i<m2; i++){
// if (blockIdx.x == 0 && threadIdx.x == 0 && blockIdx.y == 0 && threadIdx.y == 0) {
// printf("sum:%f mat1:%f mat2:%f i:%d\n", sum, d_mat1[y*m2+i], d_mat2[i*n2+x], i);
// }
sum += d_mat1[y*m2+i] * d_mat2[i*n2+x];
}
d_res[n2*y+x] = sum;
};
/* Using shared memory */
template<int BLOCK_SIZE>
__global__ void kMatrixMul1 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2) {
//assume squre block
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//trivial opt: calculate these in CPU, shared by all blocks
int aStart = blockDim.y*blockIdx.y * m2;
int bStart = blockDim.x*blockIdx.x;
int bStep = blockDim.y*n2;
int tx = threadIdx.x;
int ty = threadIdx.y;
float c = 0.0f;
for (int a = aStart, b = bStart; a < m2; a += blockDim.x, b += bStep) {
//load data
As[ty][tx] = d_mat1[a + m2 * ty + tx];
Bs[ty][tx] = d_mat2[b + n2 * ty + tx];
__syncthreads();
for (int k = 0; k<blockDim.x; k++){
c += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
d_res[(blockDim.y*blockIdx.y+ty)*n2+blockDim.x*blockIdx.x+tx] = c;
}
/* Using register blocking */
template<int BLOCK_SIZE, int REGT_SIZE>
__global__ void kMatrixMul2 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2) {
//assume squre block
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//trivial opt: calculate these in CPU, shared by all blocks
int aStart = blockDim.y*blockIdx.y * m2;
int bStart = blockDim.x*blockIdx.x;
int bStep = blockDim.y*n2;
int tx = threadIdx.x;
int ty = threadIdx.y;
float c = 0.0f;
int reg_tiles = BLOCK_SIZE / REGT_SIZE;
float acc[REGT_SIZE][REGT_SIZE];
float Ar;
float Br[REGT_SIZE];
#pragma unroll
for (int i = 0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
acc[i][j] = 0.0f;
}
}
//block 32x32, each thread work on 2x2
for (int a = aStart, b = bStart; a < m2; a += blockDim.x, b += bStep) {
//load data
#pragma unroll
for (int i = 0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
As[ty+i][tx+j] = d_mat1[a + m2 * (ty + i) + tx + j];
Bs[ty+i][tx+j] = d_mat2[b + n2 * (ty + i) + tx + j];
}
}
__syncthreads();
for (int k=0; k<BLOCK_SIZE; k+=REGT_SIZE){
#pragma unroll
for (int i=0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
int ax = k + j;
int ay = k + i;
acc[ty+i][tx+j] += As[ty][k] * Bs[k][tx];
}
}
}
__syncthreads();
}
d_res[(blockDim.y*blockIdx.y+ty)*n2+blockDim.x*blockIdx.x+tx] = c;
}
float* MatrixMultGPU0(float *mat1, int m1, int m2, float *mat2, int n1, int n2){
float *d_res, *d_mat1, *d_mat2;
cudaEvent_t start, end;
cudaError_t error;
error = cudaEventCreate(&start);
error = cudaEventCreate(&end);
//malloc the device memory for matrices
cudaError_t result = cudaMalloc((void**)&d_res, sizeof(float)*m1*n2);
result = cudaMalloc((void**)&d_mat1, sizeof(float)*m1*m2);
assert (result == cudaSuccess);
result = cudaMalloc((void**)&d_mat2, sizeof(float)*n1*n2);
assert (result == cudaSuccess);
//init source matrices in device memory
result = cudaMemcpy(d_mat1, mat1, sizeof(float)*m1*m2, cudaMemcpyHostToDevice);
assert (result == cudaSuccess);
result = cudaMemcpy(d_mat2, mat2, sizeof(float)*n1*n2, cudaMemcpyHostToDevice);
assert (result == cudaSuccess);
cudaEventRecord(start, NULL);
int N = 64;
dim3 block_size(N, N);
//grid width in blocks
int grid_wib = ceil(float(n2)/float(N));
//grid height in blocks
int grid_hib = ceil(float(m1)/float(N));
dim3 grid_size(grid_wib, grid_hib);
//naive version
//kMatrixMul0<<<grid_size, block_size>>>(d_res, d_mat1, m1, m2, d_mat2, n1, n2);
//sharedMem version
kMatrixMul1<64><<<grid_size, block_size>>>(d_res, d_mat1, m1, m2, d_mat2, n1, n2);
//copy back the multiplication result
float* res = new float[m1*n2];
result = cudaMemcpy(res, d_res, sizeof(float)*m1*n2, cudaMemcpyDeviceToHost);
assert (result == cudaSuccess);
cudaEventRecord(end, NULL);
error = cudaEventSynchronize(end);
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, end);
printf("calculation Time:%f ms\n", msecTotal);
cudaFree(d_res);
cudaFree(d_mat1);
cudaFree(d_mat2);
return res;
}
float* MatrixMulCPU(float *mat1, int m1, int m2, float *mat2, int n1, int n2){
assert(m2 == n1 && "matrx a's cols != matrix b's rows");
float* res = new float[m1*n2] ;
for (int i = 0; i < m1; i++) {
for (int j = 0; j < n2; j++) {
res[i*m1 + j] = 0;
for (int x = 0; x < m2; x++) {
res[i*m1+j] += mat1[i*m1+x] * mat2[x*n1+j];
}
}
}
return res;
}
float CompareMatrix(float mat1[], float mat2[], int m1, int m2){
float err = 0;
for (int x=0; x<m1; x++){
for (int y=0; y<m2; y++){
err += mat1[m2*y+x] - mat2[m2*y+x];
}
}
return err;
/*
if (err > 0.1){
printf("matrix comparison failed.error:%f\n", err);
return false;
}
return true;
*/
}
void FillMatrix(float mat[], int m1, int m2, float d=1.0){
for (int i=0; i<m1; i++){
for (int j=0; j<m2; j++){
if (d<0.0) {
mat[i*m1+j] = static_cast<float>(rand())/static_cast<float>(RAND_MAX) * 10.0;
} else {
mat[i*m1+j] = d;
}
}
}
}
void PrintMatrix(float mat[], int m1, int m2){
for (int i=0; i<m1; i++){
for (int j=0; j<m2; j++){
printf("%f,", mat[i*m1+j]);
}
printf("\n");
}
}
int main(int argc, char *argv[]){
int m1 = 1024;
int m2;
int n1 = m2 = 1024;
int n2 = 1024;
float *matrix_a = new float[m1*m2];
float *matrix_b = new float[n1*n2];
FillMatrix((float*)matrix_a, m1, m2);
FillMatrix((float*)matrix_b, n1, n2);
//float *ref = MatrixMulCPU((float*)matrix_a, m1, m2, (float*)matrix_b, n1, n2);
float *res = MatrixMultGPU0((float*)matrix_a, m1, m2, (float*)matrix_b, n1, n2);
//printf("error:%f\n", CompareMatrix(ref, res, m1, n2));
//PrintMatrix(res, m1, m2);
delete[] matrix_a;
delete[] matrix_b;
delete res;
//delete ref;
} | .file "tmpxft_000499f5_00000000-6_matrixmult.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL49__device_stub__Z11kMatrixMul1ILi64EEvPfS0_iiS0_iiPfS_iiS_ii, @function
_ZL49__device_stub__Z11kMatrixMul1ILi64EEvPfS0_iiS0_iiPfS_iiS_ii:
.LFB2093:
.cfi_startproc
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movl %edx, 28(%rsp)
movl %ecx, 24(%rsp)
movq %r8, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L5
.L1:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L1
.L6:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2093:
.size _ZL49__device_stub__Z11kMatrixMul1ILi64EEvPfS0_iiS0_iiPfS_iiS_ii, .-_ZL49__device_stub__Z11kMatrixMul1ILi64EEvPfS0_iiS0_iiPfS_iiS_ii
.section .text._Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,"axG",@progbits,_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,comdat
.weak _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.type _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, @function
_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii:
.LFB2142:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _ZL49__device_stub__Z11kMatrixMul1ILi64EEvPfS0_iiS0_iiPfS_iiS_ii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2142:
.size _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, .-_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.text
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2069:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2069:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "calculation Time:%f ms\n"
.text
.globl _Z14MatrixMultGPU0PfiiS_ii
.type _Z14MatrixMultGPU0PfiiS_ii, @function
_Z14MatrixMultGPU0PfiiS_ii:
.LFB2061:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 16(%rsp)
movl %esi, %r13d
movl %edx, 8(%rsp)
movq %rcx, 24(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, %r12d
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
leaq 72(%rsp), %rdi
call cudaEventCreate@PLT
movslq %r13d, %rbp
movslq %r12d, %rbx
movq %rbp, %r15
imulq %rbx, %r15
leaq 0(,%r15,4), %r14
leaq 40(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movslq 8(%rsp), %rax
imulq %rax, %rbp
salq $2, %rbp
leaq 48(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movslq 12(%rsp), %rax
imulq %rax, %rbx
salq $2, %rbx
leaq 56(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq 16(%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movl $64, 80(%rsp)
movl $64, 84(%rsp)
movl $1, 88(%rsp)
pxor %xmm0, %xmm0
cvtsi2ssl %r12d, %xmm0
mulss .LC0(%rip), %xmm0
movaps %xmm0, %xmm1
movss .LC6(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC1(%rip), %xmm4
ucomiss %xmm2, %xmm4
jbe .L12
cvttss2sil %xmm0, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
cmpnless %xmm2, %xmm1
movss .LC3(%rip), %xmm4
andps %xmm4, %xmm1
addss %xmm2, %xmm1
andnps %xmm0, %xmm3
orps %xmm3, %xmm1
.L12:
pxor %xmm0, %xmm0
cvtsi2ssl %r13d, %xmm0
mulss .LC0(%rip), %xmm0
movaps %xmm0, %xmm4
movss .LC6(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC1(%rip), %xmm5
ucomiss %xmm2, %xmm5
jbe .L13
cvttss2sil %xmm0, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
cmpnless %xmm2, %xmm4
movss .LC3(%rip), %xmm5
andps %xmm5, %xmm4
addss %xmm2, %xmm4
andnps %xmm0, %xmm3
orps %xmm3, %xmm4
.L13:
cvttss2sil %xmm1, %eax
movl %eax, 92(%rsp)
cvttss2sil %xmm4, %eax
movl %eax, 96(%rsp)
movl 88(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 80(%rsp), %rdx
movq 92(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L20
.L14:
movabsq $2305843009213693950, %rax
cmpq %r15, %rax
jb .L15
movq %r14, %rdi
call _Znam@PLT
movq %rax, %rbx
movl $2, %ecx
movq %r14, %rdx
movq 40(%rsp), %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 36(%rsp)
leaq 36(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 36(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L21
movq %rbx, %rax
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
subq $8, %rsp
.cfi_def_cfa_offset 184
pushq %r12
.cfi_def_cfa_offset 192
movl 28(%rsp), %r9d
movq 72(%rsp), %r8
movl 24(%rsp), %ecx
movl %r13d, %edx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call _ZL49__device_stub__Z11kMatrixMul1ILi64EEvPfS0_iiS0_iiPfS_iiS_ii
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L14
.L15:
movq 104(%rsp), %rax
subq %fs:40, %rax
je .L16
call __stack_chk_fail@PLT
.L16:
call __cxa_throw_bad_array_new_length@PLT
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z14MatrixMultGPU0PfiiS_ii, .-_Z14MatrixMultGPU0PfiiS_ii
.globl _Z12MatrixMulCPUPfiiS_ii
.type _Z12MatrixMulCPUPfiiS_ii, @function
_Z12MatrixMulCPUPfiiS_ii:
.LFB2062:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %edx, %ebp
movq %rcx, 8(%rsp)
movl %esi, %eax
imull %r9d, %eax
cltq
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L23
movq %rdi, %r13
movl %esi, %r12d
movl %r8d, %r15d
movl %r9d, %ebx
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, %r14
testl %r12d, %r12d
jle .L22
movslq %r15d, %rsi
salq $2, %rsi
movl $0, %edx
movl $0, %eax
movslq %ebp, %r15
movq %r14, %rdi
movq %r15, %rcx
jmp .L25
.L23:
call __cxa_throw_bad_array_new_length@PLT
.L29:
movslq %edx, %r8
leaq 0(,%r8,4), %r15
leaq (%rdi,%r15), %r9
movq 8(%rsp), %r11
addq %r13, %r15
addq %rcx, %r8
leaq 0(%r13,%r8,4), %r8
movl $0, %r10d
movl %eax, (%rsp)
movl %edx, 4(%rsp)
.L28:
movq %r9, %r14
movl $0x00000000, (%r9)
testl %ebp, %ebp
jle .L26
movq %r11, %rdx
movq %r15, %rax
pxor %xmm1, %xmm1
.L27:
movss (%rax), %xmm0
mulss (%rdx), %xmm0
addss %xmm0, %xmm1
addq $4, %rax
addq %rsi, %rdx
cmpq %r8, %rax
jne .L27
movss %xmm1, (%r14)
.L26:
addl $1, %r10d
addq $4, %r9
addq $4, %r11
cmpl %r10d, %ebx
jne .L28
movl (%rsp), %eax
movl 4(%rsp), %edx
.L30:
addl $1, %eax
addl %r12d, %edx
cmpl %eax, %r12d
je .L33
.L25:
testl %ebx, %ebx
jg .L29
jmp .L30
.L33:
movq %rdi, %r14
.L22:
movq %r14, %rax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _Z12MatrixMulCPUPfiiS_ii, .-_Z12MatrixMulCPUPfiiS_ii
.globl _Z13CompareMatrixPfS_ii
.type _Z13CompareMatrixPfS_ii, @function
_Z13CompareMatrixPfS_ii:
.LFB2063:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L41
movslq %edx, %r10
movslq %ecx, %r8
salq $2, %r8
movl $0, %r9d
pxor %xmm1, %xmm1
jmp .L37
.L38:
movss (%rdi,%rax), %xmm0
subss (%rsi,%rax), %xmm0
addss %xmm0, %xmm1
addl $1, %edx
addq %r8, %rax
cmpl %edx, %ecx
jne .L38
.L40:
addq $1, %r9
cmpq %r10, %r9
je .L35
.L37:
leaq 0(,%r9,4), %rax
movl $0, %edx
testl %ecx, %ecx
jg .L38
jmp .L40
.L41:
pxor %xmm1, %xmm1
.L35:
movaps %xmm1, %xmm0
ret
.cfi_endproc
.LFE2063:
.size _Z13CompareMatrixPfS_ii, .-_Z13CompareMatrixPfS_ii
.globl _Z10FillMatrixPfiif
.type _Z10FillMatrixPfiif, @function
_Z10FillMatrixPfiif:
.LFB2064:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %edx, 12(%rsp)
testl %esi, %esi
jle .L44
movq %rdi, %r13
movl %esi, %r15d
movd %xmm0, %ebp
movl %edx, %r12d
movl $0, 8(%rsp)
movl $0, %r14d
jmp .L46
.L58:
call rand@PLT
movl %eax, %edx
movslq %ebx, %rax
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
mulss .LC7(%rip), %xmm0
mulss .LC8(%rip), %xmm0
movss %xmm0, 0(%r13,%rax,4)
.L49:
addl $1, %ebx
cmpl %r12d, %ebx
je .L51
.L50:
pxor %xmm1, %xmm1
movd %ebp, %xmm2
comiss %xmm2, %xmm1
ja .L58
movslq %ebx, %rax
movl %ebp, 0(%r13,%rax,4)
jmp .L49
.L51:
addl $1, %r14d
addl %r15d, %r12d
addl %r15d, 8(%rsp)
cmpl %r14d, %r15d
je .L44
.L46:
movl 8(%rsp), %ebx
cmpl $0, 12(%rsp)
jg .L50
jmp .L51
.L44:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _Z10FillMatrixPfiif, .-_Z10FillMatrixPfiif
.section .rodata.str1.1
.LC9:
.string "%f,"
.LC10:
.string "\n"
.text
.globl _Z11PrintMatrixPfii
.type _Z11PrintMatrixPfii, @function
_Z11PrintMatrixPfii:
.LFB2065:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 16(%rsp)
movl %edx, 12(%rsp)
testl %esi, %esi
jle .L59
movl %esi, %r15d
movl $0, %r14d
movl $0, %r13d
movl %edx, %eax
cltq
movq %rax, 24(%rsp)
leaq .LC9(%rip), %r12
jmp .L61
.L63:
movslq %r14d, %rax
movq 16(%rsp), %rcx
leaq (%rcx,%rax,4), %rbx
movq 24(%rsp), %rdx
addq %rdx, %rax
leaq (%rcx,%rax,4), %rbp
.L62:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L62
.L64:
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addl %r15d, %r14d
cmpl %r13d, %r15d
je .L59
.L61:
cmpl $0, 12(%rsp)
jg .L63
jmp .L64
.L59:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size _Z11PrintMatrixPfii, .-_Z11PrintMatrixPfii
.globl main
.type main, @function
main:
.LFB2066:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbp
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbx
movss .LC3(%rip), %xmm0
movl $1024, %edx
movl $1024, %esi
movq %rbp, %rdi
call _Z10FillMatrixPfiif
movss .LC3(%rip), %xmm0
movl $1024, %edx
movl $1024, %esi
movq %rbx, %rdi
call _Z10FillMatrixPfiif
movl $1024, %r9d
movl $1024, %r8d
movq %rbx, %rcx
movl $1024, %edx
movl $1024, %esi
movq %rbp, %rdi
call _Z14MatrixMultGPU0PfiiS_ii
movq %rax, %r12
movq %rbp, %rdi
call _ZdaPv@PLT
movq %rbx, %rdi
call _ZdaPv@PLT
testq %r12, %r12
je .L68
movl $4, %esi
movq %r12, %rdi
call _ZdlPvm@PLT
.L68:
movl $0, %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size main, .-main
.globl _Z39__device_stub__Z11kMatrixMul0PfS_iiS_iiPfS_iiS_ii
.type _Z39__device_stub__Z11kMatrixMul0PfS_iiS_iiPfS_iiS_ii, @function
_Z39__device_stub__Z11kMatrixMul0PfS_iiS_iiPfS_iiS_ii:
.LFB2091:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movl %edx, 28(%rsp)
movl %ecx, 24(%rsp)
movq %r8, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L74
.L70:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L75
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L74:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11kMatrixMul0PfS_iiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L70
.L75:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2091:
.size _Z39__device_stub__Z11kMatrixMul0PfS_iiS_iiPfS_iiS_ii, .-_Z39__device_stub__Z11kMatrixMul0PfS_iiS_iiPfS_iiS_ii
.globl _Z11kMatrixMul0PfS_iiS_ii
.type _Z11kMatrixMul0PfS_iiS_ii, @function
_Z11kMatrixMul0PfS_iiS_ii:
.LFB2092:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z39__device_stub__Z11kMatrixMul0PfS_iiS_iiPfS_iiS_ii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _Z11kMatrixMul0PfS_iiS_ii, .-_Z11kMatrixMul0PfS_iiS_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC11:
.string "_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii"
.section .rodata.str1.1
.LC12:
.string "_Z11kMatrixMul0PfS_iiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2096:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z11kMatrixMul0PfS_iiS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1015021568
.align 4
.LC1:
.long 1258291200
.align 4
.LC3:
.long 1065353216
.align 4
.LC6:
.long 2147483647
.align 4
.LC7:
.long 805306368
.align 4
.LC8:
.long 1092616192
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cstdlib>
#include <stdio.h>
#include <cassert> #include <cuda_runtime.h>
/*
Naive implementation.
Allocate one thread for one element in result matrix, processing dot(Arow, Bcol);
*/
__global__ void kMatrixMul0 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2){
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x >= n2 || y >= m1){return;}
float sum=0.0;
for (int i=0; i<m2; i++){
// if (blockIdx.x == 0 && threadIdx.x == 0 && blockIdx.y == 0 && threadIdx.y == 0) {
// printf("sum:%f mat1:%f mat2:%f i:%d\n", sum, d_mat1[y*m2+i], d_mat2[i*n2+x], i);
// }
sum += d_mat1[y*m2+i] * d_mat2[i*n2+x];
}
d_res[n2*y+x] = sum;
};
/* Using shared memory */
template<int BLOCK_SIZE>
__global__ void kMatrixMul1 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2) {
//assume squre block
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//trivial opt: calculate these in CPU, shared by all blocks
int aStart = blockDim.y*blockIdx.y * m2;
int bStart = blockDim.x*blockIdx.x;
int bStep = blockDim.y*n2;
int tx = threadIdx.x;
int ty = threadIdx.y;
float c = 0.0f;
for (int a = aStart, b = bStart; a < m2; a += blockDim.x, b += bStep) {
//load data
As[ty][tx] = d_mat1[a + m2 * ty + tx];
Bs[ty][tx] = d_mat2[b + n2 * ty + tx];
__syncthreads();
for (int k = 0; k<blockDim.x; k++){
c += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
d_res[(blockDim.y*blockIdx.y+ty)*n2+blockDim.x*blockIdx.x+tx] = c;
}
/* Using register blocking */
template<int BLOCK_SIZE, int REGT_SIZE>
__global__ void kMatrixMul2 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2) {
//assume squre block
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//trivial opt: calculate these in CPU, shared by all blocks
int aStart = blockDim.y*blockIdx.y * m2;
int bStart = blockDim.x*blockIdx.x;
int bStep = blockDim.y*n2;
int tx = threadIdx.x;
int ty = threadIdx.y;
float c = 0.0f;
int reg_tiles = BLOCK_SIZE / REGT_SIZE;
float acc[REGT_SIZE][REGT_SIZE];
float Ar;
float Br[REGT_SIZE];
#pragma unroll
for (int i = 0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
acc[i][j] = 0.0f;
}
}
//block 32x32, each thread work on 2x2
for (int a = aStart, b = bStart; a < m2; a += blockDim.x, b += bStep) {
//load data
#pragma unroll
for (int i = 0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
As[ty+i][tx+j] = d_mat1[a + m2 * (ty + i) + tx + j];
Bs[ty+i][tx+j] = d_mat2[b + n2 * (ty + i) + tx + j];
}
}
__syncthreads();
for (int k=0; k<BLOCK_SIZE; k+=REGT_SIZE){
#pragma unroll
for (int i=0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
int ax = k + j;
int ay = k + i;
acc[ty+i][tx+j] += As[ty][k] * Bs[k][tx];
}
}
}
__syncthreads();
}
d_res[(blockDim.y*blockIdx.y+ty)*n2+blockDim.x*blockIdx.x+tx] = c;
}
float* MatrixMultGPU0(float *mat1, int m1, int m2, float *mat2, int n1, int n2){
float *d_res, *d_mat1, *d_mat2;
cudaEvent_t start, end;
cudaError_t error;
error = cudaEventCreate(&start);
error = cudaEventCreate(&end);
//malloc the device memory for matrices
cudaError_t result = cudaMalloc((void**)&d_res, sizeof(float)*m1*n2);
result = cudaMalloc((void**)&d_mat1, sizeof(float)*m1*m2);
assert (result == cudaSuccess);
result = cudaMalloc((void**)&d_mat2, sizeof(float)*n1*n2);
assert (result == cudaSuccess);
//init source matrices in device memory
result = cudaMemcpy(d_mat1, mat1, sizeof(float)*m1*m2, cudaMemcpyHostToDevice);
assert (result == cudaSuccess);
result = cudaMemcpy(d_mat2, mat2, sizeof(float)*n1*n2, cudaMemcpyHostToDevice);
assert (result == cudaSuccess);
cudaEventRecord(start, NULL);
int N = 64;
dim3 block_size(N, N);
//grid width in blocks
int grid_wib = ceil(float(n2)/float(N));
//grid height in blocks
int grid_hib = ceil(float(m1)/float(N));
dim3 grid_size(grid_wib, grid_hib);
//naive version
//kMatrixMul0<<<grid_size, block_size>>>(d_res, d_mat1, m1, m2, d_mat2, n1, n2);
//sharedMem version
kMatrixMul1<64><<<grid_size, block_size>>>(d_res, d_mat1, m1, m2, d_mat2, n1, n2);
//copy back the multiplication result
float* res = new float[m1*n2];
result = cudaMemcpy(res, d_res, sizeof(float)*m1*n2, cudaMemcpyDeviceToHost);
assert (result == cudaSuccess);
cudaEventRecord(end, NULL);
error = cudaEventSynchronize(end);
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, end);
printf("calculation Time:%f ms\n", msecTotal);
cudaFree(d_res);
cudaFree(d_mat1);
cudaFree(d_mat2);
return res;
}
float* MatrixMulCPU(float *mat1, int m1, int m2, float *mat2, int n1, int n2){
assert(m2 == n1 && "matrx a's cols != matrix b's rows");
float* res = new float[m1*n2] ;
for (int i = 0; i < m1; i++) {
for (int j = 0; j < n2; j++) {
res[i*m1 + j] = 0;
for (int x = 0; x < m2; x++) {
res[i*m1+j] += mat1[i*m1+x] * mat2[x*n1+j];
}
}
}
return res;
}
float CompareMatrix(float mat1[], float mat2[], int m1, int m2){
float err = 0;
for (int x=0; x<m1; x++){
for (int y=0; y<m2; y++){
err += mat1[m2*y+x] - mat2[m2*y+x];
}
}
return err;
/*
if (err > 0.1){
printf("matrix comparison failed.error:%f\n", err);
return false;
}
return true;
*/
}
void FillMatrix(float mat[], int m1, int m2, float d=1.0){
for (int i=0; i<m1; i++){
for (int j=0; j<m2; j++){
if (d<0.0) {
mat[i*m1+j] = static_cast<float>(rand())/static_cast<float>(RAND_MAX) * 10.0;
} else {
mat[i*m1+j] = d;
}
}
}
}
void PrintMatrix(float mat[], int m1, int m2){
for (int i=0; i<m1; i++){
for (int j=0; j<m2; j++){
printf("%f,", mat[i*m1+j]);
}
printf("\n");
}
}
int main(int argc, char *argv[]){
int m1 = 1024;
int m2;
int n1 = m2 = 1024;
int n2 = 1024;
float *matrix_a = new float[m1*m2];
float *matrix_b = new float[n1*n2];
FillMatrix((float*)matrix_a, m1, m2);
FillMatrix((float*)matrix_b, n1, n2);
//float *ref = MatrixMulCPU((float*)matrix_a, m1, m2, (float*)matrix_b, n1, n2);
float *res = MatrixMultGPU0((float*)matrix_a, m1, m2, (float*)matrix_b, n1, n2);
//printf("error:%f\n", CompareMatrix(ref, res, m1, n2));
//PrintMatrix(res, m1, m2);
delete[] matrix_a;
delete[] matrix_b;
delete res;
//delete ref;
} | #include <hip/hip_runtime.h>
#include <cstdlib>
#include <stdio.h>
#include <cassert> #include <cuda_runtime.h>
/*
Naive implementation.
Allocate one thread for one element in result matrix, processing dot(Arow, Bcol);
*/
__global__ void kMatrixMul0 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2){
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x >= n2 || y >= m1){return;}
float sum=0.0;
for (int i=0; i<m2; i++){
// if (blockIdx.x == 0 && threadIdx.x == 0 && blockIdx.y == 0 && threadIdx.y == 0) {
// printf("sum:%f mat1:%f mat2:%f i:%d\n", sum, d_mat1[y*m2+i], d_mat2[i*n2+x], i);
// }
sum += d_mat1[y*m2+i] * d_mat2[i*n2+x];
}
d_res[n2*y+x] = sum;
};
/* Using shared memory */
template<int BLOCK_SIZE>
__global__ void kMatrixMul1 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2) {
//assume squre block
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//trivial opt: calculate these in CPU, shared by all blocks
int aStart = blockDim.y*blockIdx.y * m2;
int bStart = blockDim.x*blockIdx.x;
int bStep = blockDim.y*n2;
int tx = threadIdx.x;
int ty = threadIdx.y;
float c = 0.0f;
for (int a = aStart, b = bStart; a < m2; a += blockDim.x, b += bStep) {
//load data
As[ty][tx] = d_mat1[a + m2 * ty + tx];
Bs[ty][tx] = d_mat2[b + n2 * ty + tx];
__syncthreads();
for (int k = 0; k<blockDim.x; k++){
c += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
d_res[(blockDim.y*blockIdx.y+ty)*n2+blockDim.x*blockIdx.x+tx] = c;
}
/* Using register blocking */
template<int BLOCK_SIZE, int REGT_SIZE>
__global__ void kMatrixMul2 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2) {
//assume squre block
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//trivial opt: calculate these in CPU, shared by all blocks
int aStart = blockDim.y*blockIdx.y * m2;
int bStart = blockDim.x*blockIdx.x;
int bStep = blockDim.y*n2;
int tx = threadIdx.x;
int ty = threadIdx.y;
float c = 0.0f;
int reg_tiles = BLOCK_SIZE / REGT_SIZE;
float acc[REGT_SIZE][REGT_SIZE];
float Ar;
float Br[REGT_SIZE];
#pragma unroll
for (int i = 0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
acc[i][j] = 0.0f;
}
}
//block 32x32, each thread work on 2x2
for (int a = aStart, b = bStart; a < m2; a += blockDim.x, b += bStep) {
//load data
#pragma unroll
for (int i = 0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
As[ty+i][tx+j] = d_mat1[a + m2 * (ty + i) + tx + j];
Bs[ty+i][tx+j] = d_mat2[b + n2 * (ty + i) + tx + j];
}
}
__syncthreads();
for (int k=0; k<BLOCK_SIZE; k+=REGT_SIZE){
#pragma unroll
for (int i=0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
int ax = k + j;
int ay = k + i;
acc[ty+i][tx+j] += As[ty][k] * Bs[k][tx];
}
}
}
__syncthreads();
}
d_res[(blockDim.y*blockIdx.y+ty)*n2+blockDim.x*blockIdx.x+tx] = c;
}
float* MatrixMultGPU0(float *mat1, int m1, int m2, float *mat2, int n1, int n2){
float *d_res, *d_mat1, *d_mat2;
hipEvent_t start, end;
hipError_t error;
error = hipEventCreate(&start);
error = hipEventCreate(&end);
//malloc the device memory for matrices
hipError_t result = hipMalloc((void**)&d_res, sizeof(float)*m1*n2);
result = hipMalloc((void**)&d_mat1, sizeof(float)*m1*m2);
assert (result == hipSuccess);
result = hipMalloc((void**)&d_mat2, sizeof(float)*n1*n2);
assert (result == hipSuccess);
//init source matrices in device memory
result = hipMemcpy(d_mat1, mat1, sizeof(float)*m1*m2, hipMemcpyHostToDevice);
assert (result == hipSuccess);
result = hipMemcpy(d_mat2, mat2, sizeof(float)*n1*n2, hipMemcpyHostToDevice);
assert (result == hipSuccess);
hipEventRecord(start, NULL);
int N = 64;
dim3 block_size(N, N);
//grid width in blocks
int grid_wib = ceil(float(n2)/float(N));
//grid height in blocks
int grid_hib = ceil(float(m1)/float(N));
dim3 grid_size(grid_wib, grid_hib);
//naive version
//kMatrixMul0<<<grid_size, block_size>>>(d_res, d_mat1, m1, m2, d_mat2, n1, n2);
//sharedMem version
kMatrixMul1<64><<<grid_size, block_size>>>(d_res, d_mat1, m1, m2, d_mat2, n1, n2);
//copy back the multiplication result
float* res = new float[m1*n2];
result = hipMemcpy(res, d_res, sizeof(float)*m1*n2, hipMemcpyDeviceToHost);
assert (result == hipSuccess);
hipEventRecord(end, NULL);
error = hipEventSynchronize(end);
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, end);
printf("calculation Time:%f ms\n", msecTotal);
hipFree(d_res);
hipFree(d_mat1);
hipFree(d_mat2);
return res;
}
float* MatrixMulCPU(float *mat1, int m1, int m2, float *mat2, int n1, int n2){
assert(m2 == n1 && "matrx a's cols != matrix b's rows");
float* res = new float[m1*n2] ;
for (int i = 0; i < m1; i++) {
for (int j = 0; j < n2; j++) {
res[i*m1 + j] = 0;
for (int x = 0; x < m2; x++) {
res[i*m1+j] += mat1[i*m1+x] * mat2[x*n1+j];
}
}
}
return res;
}
float CompareMatrix(float mat1[], float mat2[], int m1, int m2){
float err = 0;
for (int x=0; x<m1; x++){
for (int y=0; y<m2; y++){
err += mat1[m2*y+x] - mat2[m2*y+x];
}
}
return err;
/*
if (err > 0.1){
printf("matrix comparison failed.error:%f\n", err);
return false;
}
return true;
*/
}
void FillMatrix(float mat[], int m1, int m2, float d=1.0){
for (int i=0; i<m1; i++){
for (int j=0; j<m2; j++){
if (d<0.0) {
mat[i*m1+j] = static_cast<float>(rand())/static_cast<float>(RAND_MAX) * 10.0;
} else {
mat[i*m1+j] = d;
}
}
}
}
void PrintMatrix(float mat[], int m1, int m2){
for (int i=0; i<m1; i++){
for (int j=0; j<m2; j++){
printf("%f,", mat[i*m1+j]);
}
printf("\n");
}
}
int main(int argc, char *argv[]){
int m1 = 1024;
int m2;
int n1 = m2 = 1024;
int n2 = 1024;
float *matrix_a = new float[m1*m2];
float *matrix_b = new float[n1*n2];
FillMatrix((float*)matrix_a, m1, m2);
FillMatrix((float*)matrix_b, n1, n2);
//float *ref = MatrixMulCPU((float*)matrix_a, m1, m2, (float*)matrix_b, n1, n2);
float *res = MatrixMultGPU0((float*)matrix_a, m1, m2, (float*)matrix_b, n1, n2);
//printf("error:%f\n", CompareMatrix(ref, res, m1, n2));
//PrintMatrix(res, m1, m2);
delete[] matrix_a;
delete[] matrix_b;
delete res;
//delete ref;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <cstdlib>
#include <stdio.h>
#include <cassert> #include <cuda_runtime.h>
/*
Naive implementation.
Allocate one thread for one element in result matrix, processing dot(Arow, Bcol);
*/
__global__ void kMatrixMul0 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2){
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x >= n2 || y >= m1){return;}
float sum=0.0;
for (int i=0; i<m2; i++){
// if (blockIdx.x == 0 && threadIdx.x == 0 && blockIdx.y == 0 && threadIdx.y == 0) {
// printf("sum:%f mat1:%f mat2:%f i:%d\n", sum, d_mat1[y*m2+i], d_mat2[i*n2+x], i);
// }
sum += d_mat1[y*m2+i] * d_mat2[i*n2+x];
}
d_res[n2*y+x] = sum;
};
/* Using shared memory */
template<int BLOCK_SIZE>
__global__ void kMatrixMul1 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2) {
//assume squre block
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//trivial opt: calculate these in CPU, shared by all blocks
int aStart = blockDim.y*blockIdx.y * m2;
int bStart = blockDim.x*blockIdx.x;
int bStep = blockDim.y*n2;
int tx = threadIdx.x;
int ty = threadIdx.y;
float c = 0.0f;
for (int a = aStart, b = bStart; a < m2; a += blockDim.x, b += bStep) {
//load data
As[ty][tx] = d_mat1[a + m2 * ty + tx];
Bs[ty][tx] = d_mat2[b + n2 * ty + tx];
__syncthreads();
for (int k = 0; k<blockDim.x; k++){
c += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
d_res[(blockDim.y*blockIdx.y+ty)*n2+blockDim.x*blockIdx.x+tx] = c;
}
/* Using register blocking */
template<int BLOCK_SIZE, int REGT_SIZE>
__global__ void kMatrixMul2 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2) {
//assume squre block
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//trivial opt: calculate these in CPU, shared by all blocks
int aStart = blockDim.y*blockIdx.y * m2;
int bStart = blockDim.x*blockIdx.x;
int bStep = blockDim.y*n2;
int tx = threadIdx.x;
int ty = threadIdx.y;
float c = 0.0f;
int reg_tiles = BLOCK_SIZE / REGT_SIZE;
float acc[REGT_SIZE][REGT_SIZE];
float Ar;
float Br[REGT_SIZE];
#pragma unroll
for (int i = 0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
acc[i][j] = 0.0f;
}
}
//block 32x32, each thread work on 2x2
for (int a = aStart, b = bStart; a < m2; a += blockDim.x, b += bStep) {
//load data
#pragma unroll
for (int i = 0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
As[ty+i][tx+j] = d_mat1[a + m2 * (ty + i) + tx + j];
Bs[ty+i][tx+j] = d_mat2[b + n2 * (ty + i) + tx + j];
}
}
__syncthreads();
for (int k=0; k<BLOCK_SIZE; k+=REGT_SIZE){
#pragma unroll
for (int i=0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
int ax = k + j;
int ay = k + i;
acc[ty+i][tx+j] += As[ty][k] * Bs[k][tx];
}
}
}
__syncthreads();
}
d_res[(blockDim.y*blockIdx.y+ty)*n2+blockDim.x*blockIdx.x+tx] = c;
}
float* MatrixMultGPU0(float *mat1, int m1, int m2, float *mat2, int n1, int n2){
float *d_res, *d_mat1, *d_mat2;
hipEvent_t start, end;
hipError_t error;
error = hipEventCreate(&start);
error = hipEventCreate(&end);
//malloc the device memory for matrices
hipError_t result = hipMalloc((void**)&d_res, sizeof(float)*m1*n2);
result = hipMalloc((void**)&d_mat1, sizeof(float)*m1*m2);
assert (result == hipSuccess);
result = hipMalloc((void**)&d_mat2, sizeof(float)*n1*n2);
assert (result == hipSuccess);
//init source matrices in device memory
result = hipMemcpy(d_mat1, mat1, sizeof(float)*m1*m2, hipMemcpyHostToDevice);
assert (result == hipSuccess);
result = hipMemcpy(d_mat2, mat2, sizeof(float)*n1*n2, hipMemcpyHostToDevice);
assert (result == hipSuccess);
hipEventRecord(start, NULL);
int N = 64;
dim3 block_size(N, N);
//grid width in blocks
int grid_wib = ceil(float(n2)/float(N));
//grid height in blocks
int grid_hib = ceil(float(m1)/float(N));
dim3 grid_size(grid_wib, grid_hib);
//naive version
//kMatrixMul0<<<grid_size, block_size>>>(d_res, d_mat1, m1, m2, d_mat2, n1, n2);
//sharedMem version
kMatrixMul1<64><<<grid_size, block_size>>>(d_res, d_mat1, m1, m2, d_mat2, n1, n2);
//copy back the multiplication result
float* res = new float[m1*n2];
result = hipMemcpy(res, d_res, sizeof(float)*m1*n2, hipMemcpyDeviceToHost);
assert (result == hipSuccess);
hipEventRecord(end, NULL);
error = hipEventSynchronize(end);
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, end);
printf("calculation Time:%f ms\n", msecTotal);
hipFree(d_res);
hipFree(d_mat1);
hipFree(d_mat2);
return res;
}
float* MatrixMulCPU(float *mat1, int m1, int m2, float *mat2, int n1, int n2){
assert(m2 == n1 && "matrx a's cols != matrix b's rows");
float* res = new float[m1*n2] ;
for (int i = 0; i < m1; i++) {
for (int j = 0; j < n2; j++) {
res[i*m1 + j] = 0;
for (int x = 0; x < m2; x++) {
res[i*m1+j] += mat1[i*m1+x] * mat2[x*n1+j];
}
}
}
return res;
}
float CompareMatrix(float mat1[], float mat2[], int m1, int m2){
float err = 0;
for (int x=0; x<m1; x++){
for (int y=0; y<m2; y++){
err += mat1[m2*y+x] - mat2[m2*y+x];
}
}
return err;
/*
if (err > 0.1){
printf("matrix comparison failed.error:%f\n", err);
return false;
}
return true;
*/
}
void FillMatrix(float mat[], int m1, int m2, float d=1.0){
for (int i=0; i<m1; i++){
for (int j=0; j<m2; j++){
if (d<0.0) {
mat[i*m1+j] = static_cast<float>(rand())/static_cast<float>(RAND_MAX) * 10.0;
} else {
mat[i*m1+j] = d;
}
}
}
}
void PrintMatrix(float mat[], int m1, int m2){
for (int i=0; i<m1; i++){
for (int j=0; j<m2; j++){
printf("%f,", mat[i*m1+j]);
}
printf("\n");
}
}
int main(int argc, char *argv[]){
int m1 = 1024;
int m2;
int n1 = m2 = 1024;
int n2 = 1024;
float *matrix_a = new float[m1*m2];
float *matrix_b = new float[n1*n2];
FillMatrix((float*)matrix_a, m1, m2);
FillMatrix((float*)matrix_b, n1, n2);
//float *ref = MatrixMulCPU((float*)matrix_a, m1, m2, (float*)matrix_b, n1, n2);
float *res = MatrixMultGPU0((float*)matrix_a, m1, m2, (float*)matrix_b, n1, n2);
//printf("error:%f\n", CompareMatrix(ref, res, m1, n2));
//PrintMatrix(res, m1, m2);
delete[] matrix_a;
delete[] matrix_b;
delete res;
//delete ref;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11kMatrixMul0PfS_iiS_ii
.globl _Z11kMatrixMul0PfS_iiS_ii
.p2align 8
.type _Z11kMatrixMul0PfS_iiS_ii,@function
_Z11kMatrixMul0PfS_iiS_ii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s5, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s3, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_6
s_load_b32 s5, s[0:1], 0x14
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s5, 1
s_cbranch_scc1 .LBB0_4
s_clause 0x1
s_load_b64 s[6:7], s[0:1], 0x8
s_load_b64 s[2:3], s[0:1], 0x18
v_mul_lo_u32 v2, v1, s5
v_mov_b32_e32 v6, 0
v_mov_b32_e32 v4, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo
.p2align 6
.LBB0_3:
v_ashrrev_i32_e32 v5, 31, v4
s_add_i32 s5, s5, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s5, 0
v_lshlrev_b64 v[7:8], 2, v[4:5]
v_add_nc_u32_e32 v4, s4, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s2, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s3, v8, vcc_lo
global_load_b32 v5, v[2:3], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v2, vcc_lo, v2, 4
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v5, v7
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v6, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v1, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v6, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11kMatrixMul0PfS_iiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11kMatrixMul0PfS_iiS_ii, .Lfunc_end0-_Z11kMatrixMul0PfS_iiS_ii
.section .AMDGPU.csdata,"",@progbits
.section .text._Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,"axG",@progbits,_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,comdat
.protected _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.globl _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.p2align 8
.type _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,@function
_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s9, s[0:1], 0x14
s_load_b32 s3, s[0:1], 0x24
v_dual_mov_b32 v4, 0 :: v_dual_and_b32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s13, s2, 16
s_and_b32 s12, s2, 0xffff
s_mul_i32 s8, s15, s13
s_mul_i32 s10, s14, s12
s_mul_i32 s11, s8, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s11, s9
s_cbranch_scc1 .LBB1_5
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x8
s_load_b64 s[6:7], s[0:1], 0x18
v_mad_u64_u32 v[2:3], null, v0, s9, v[1:2]
v_lshlrev_b32_e32 v4, 2, v1
s_cmp_lg_u32 s12, 0
v_lshlrev_b32_e32 v5, 8, v0
s_cselect_b32 s2, -1, 0
s_mul_i32 s13, s13, s3
v_or_b32_e32 v7, 0x4000, v4
v_cndmask_b32_e64 v9, 0, 1, s2
v_add_nc_u32_e32 v6, v5, v4
v_mad_u64_u32 v[3:4], null, v0, s3, v[1:2]
v_mov_b32_e32 v4, 0
v_add_nc_u32_e32 v8, v7, v5
v_cmp_ne_u32_e64 s2, 1, v9
s_mov_b32 s14, s10
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_3
.p2align 6
.LBB1_2:
s_add_i32 s11, s11, s12
s_add_i32 s14, s14, s13
s_cmp_ge_i32 s11, s9
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_5
.LBB1_3:
v_add_nc_u32_e32 v9, s11, v2
v_add_nc_u32_e32 v11, s14, v3
s_mov_b32 s15, s12
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v10, 31, v9
v_ashrrev_i32_e32 v12, 31, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[9:10], 2, v[9:10]
v_lshlrev_b64 v[11:12], 2, v[11:12]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v9, vcc_lo, s4, v9
v_add_co_ci_u32_e32 v10, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v11, vcc_lo, s6, v11
v_add_co_ci_u32_e32 v12, vcc_lo, s7, v12, vcc_lo
s_and_b32 vcc_lo, exec_lo, s2
global_load_b32 v13, v[9:10], off
global_load_b32 v11, v[11:12], off
v_dual_mov_b32 v10, v5 :: v_dual_mov_b32 v9, v7
s_waitcnt vmcnt(1)
ds_store_b32 v6, v13
s_waitcnt vmcnt(0)
ds_store_b32 v8, v11
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_vccnz .LBB1_2
.LBB1_4:
ds_load_b32 v11, v10
ds_load_b32 v12, v9
v_add_nc_u32_e32 v10, 4, v10
v_add_nc_u32_e32 v9, 0x100, v9
s_add_i32 s15, s15, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s15, 0
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v4, v11, v12
s_cbranch_scc0 .LBB1_4
s_branch .LBB1_2
.LBB1_5:
s_set_inst_prefetch_distance 0x2
v_add_nc_u32_e32 v0, s8, v0
s_load_b64 s[0:1], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v0, s3
v_add3_u32 v1, s10, v1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v4, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.amdhsa_group_segment_fixed_size 32768
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,"axG",@progbits,_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,comdat
.Lfunc_end1:
.size _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, .Lfunc_end1-_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11kMatrixMul0PfS_iiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11kMatrixMul0PfS_iiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 32768
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <cstdlib>
#include <stdio.h>
#include <cassert> #include <cuda_runtime.h>
/*
Naive implementation.
Allocate one thread for one element in result matrix, processing dot(Arow, Bcol);
*/
__global__ void kMatrixMul0 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2){
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x >= n2 || y >= m1){return;}
float sum=0.0;
for (int i=0; i<m2; i++){
// if (blockIdx.x == 0 && threadIdx.x == 0 && blockIdx.y == 0 && threadIdx.y == 0) {
// printf("sum:%f mat1:%f mat2:%f i:%d\n", sum, d_mat1[y*m2+i], d_mat2[i*n2+x], i);
// }
sum += d_mat1[y*m2+i] * d_mat2[i*n2+x];
}
d_res[n2*y+x] = sum;
};
/* Using shared memory */
template<int BLOCK_SIZE>
__global__ void kMatrixMul1 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2) {
//assume squre block
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//trivial opt: calculate these in CPU, shared by all blocks
int aStart = blockDim.y*blockIdx.y * m2;
int bStart = blockDim.x*blockIdx.x;
int bStep = blockDim.y*n2;
int tx = threadIdx.x;
int ty = threadIdx.y;
float c = 0.0f;
for (int a = aStart, b = bStart; a < m2; a += blockDim.x, b += bStep) {
//load data
As[ty][tx] = d_mat1[a + m2 * ty + tx];
Bs[ty][tx] = d_mat2[b + n2 * ty + tx];
__syncthreads();
for (int k = 0; k<blockDim.x; k++){
c += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
d_res[(blockDim.y*blockIdx.y+ty)*n2+blockDim.x*blockIdx.x+tx] = c;
}
/* Using register blocking */
template<int BLOCK_SIZE, int REGT_SIZE>
__global__ void kMatrixMul2 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2) {
//assume squre block
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//trivial opt: calculate these in CPU, shared by all blocks
int aStart = blockDim.y*blockIdx.y * m2;
int bStart = blockDim.x*blockIdx.x;
int bStep = blockDim.y*n2;
int tx = threadIdx.x;
int ty = threadIdx.y;
float c = 0.0f;
int reg_tiles = BLOCK_SIZE / REGT_SIZE;
float acc[REGT_SIZE][REGT_SIZE];
float Ar;
float Br[REGT_SIZE];
#pragma unroll
for (int i = 0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
acc[i][j] = 0.0f;
}
}
//block 32x32, each thread work on 2x2
for (int a = aStart, b = bStart; a < m2; a += blockDim.x, b += bStep) {
//load data
#pragma unroll
for (int i = 0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
As[ty+i][tx+j] = d_mat1[a + m2 * (ty + i) + tx + j];
Bs[ty+i][tx+j] = d_mat2[b + n2 * (ty + i) + tx + j];
}
}
__syncthreads();
for (int k=0; k<BLOCK_SIZE; k+=REGT_SIZE){
#pragma unroll
for (int i=0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
int ax = k + j;
int ay = k + i;
acc[ty+i][tx+j] += As[ty][k] * Bs[k][tx];
}
}
}
__syncthreads();
}
d_res[(blockDim.y*blockIdx.y+ty)*n2+blockDim.x*blockIdx.x+tx] = c;
}
float* MatrixMultGPU0(float *mat1, int m1, int m2, float *mat2, int n1, int n2){
float *d_res, *d_mat1, *d_mat2;
hipEvent_t start, end;
hipError_t error;
error = hipEventCreate(&start);
error = hipEventCreate(&end);
//malloc the device memory for matrices
hipError_t result = hipMalloc((void**)&d_res, sizeof(float)*m1*n2);
result = hipMalloc((void**)&d_mat1, sizeof(float)*m1*m2);
assert (result == hipSuccess);
result = hipMalloc((void**)&d_mat2, sizeof(float)*n1*n2);
assert (result == hipSuccess);
//init source matrices in device memory
result = hipMemcpy(d_mat1, mat1, sizeof(float)*m1*m2, hipMemcpyHostToDevice);
assert (result == hipSuccess);
result = hipMemcpy(d_mat2, mat2, sizeof(float)*n1*n2, hipMemcpyHostToDevice);
assert (result == hipSuccess);
hipEventRecord(start, NULL);
int N = 64;
dim3 block_size(N, N);
//grid width in blocks
int grid_wib = ceil(float(n2)/float(N));
//grid height in blocks
int grid_hib = ceil(float(m1)/float(N));
dim3 grid_size(grid_wib, grid_hib);
//naive version
//kMatrixMul0<<<grid_size, block_size>>>(d_res, d_mat1, m1, m2, d_mat2, n1, n2);
//sharedMem version
kMatrixMul1<64><<<grid_size, block_size>>>(d_res, d_mat1, m1, m2, d_mat2, n1, n2);
//copy back the multiplication result
float* res = new float[m1*n2];
result = hipMemcpy(res, d_res, sizeof(float)*m1*n2, hipMemcpyDeviceToHost);
assert (result == hipSuccess);
hipEventRecord(end, NULL);
error = hipEventSynchronize(end);
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, end);
printf("calculation Time:%f ms\n", msecTotal);
hipFree(d_res);
hipFree(d_mat1);
hipFree(d_mat2);
return res;
}
float* MatrixMulCPU(float *mat1, int m1, int m2, float *mat2, int n1, int n2){
assert(m2 == n1 && "matrx a's cols != matrix b's rows");
float* res = new float[m1*n2] ;
for (int i = 0; i < m1; i++) {
for (int j = 0; j < n2; j++) {
res[i*m1 + j] = 0;
for (int x = 0; x < m2; x++) {
res[i*m1+j] += mat1[i*m1+x] * mat2[x*n1+j];
}
}
}
return res;
}
float CompareMatrix(float mat1[], float mat2[], int m1, int m2){
float err = 0;
for (int x=0; x<m1; x++){
for (int y=0; y<m2; y++){
err += mat1[m2*y+x] - mat2[m2*y+x];
}
}
return err;
/*
if (err > 0.1){
printf("matrix comparison failed.error:%f\n", err);
return false;
}
return true;
*/
}
void FillMatrix(float mat[], int m1, int m2, float d=1.0){
for (int i=0; i<m1; i++){
for (int j=0; j<m2; j++){
if (d<0.0) {
mat[i*m1+j] = static_cast<float>(rand())/static_cast<float>(RAND_MAX) * 10.0;
} else {
mat[i*m1+j] = d;
}
}
}
}
void PrintMatrix(float mat[], int m1, int m2){
for (int i=0; i<m1; i++){
for (int j=0; j<m2; j++){
printf("%f,", mat[i*m1+j]);
}
printf("\n");
}
}
int main(int argc, char *argv[]){
int m1 = 1024;
int m2;
int n1 = m2 = 1024;
int n2 = 1024;
float *matrix_a = new float[m1*m2];
float *matrix_b = new float[n1*n2];
FillMatrix((float*)matrix_a, m1, m2);
FillMatrix((float*)matrix_b, n1, n2);
//float *ref = MatrixMulCPU((float*)matrix_a, m1, m2, (float*)matrix_b, n1, n2);
float *res = MatrixMultGPU0((float*)matrix_a, m1, m2, (float*)matrix_b, n1, n2);
//printf("error:%f\n", CompareMatrix(ref, res, m1, n2));
//PrintMatrix(res, m1, m2);
delete[] matrix_a;
delete[] matrix_b;
delete res;
//delete ref;
} | .text
.file "matrixmult.hip"
.globl _Z26__device_stub__kMatrixMul0PfS_iiS_ii # -- Begin function _Z26__device_stub__kMatrixMul0PfS_iiS_ii
.p2align 4, 0x90
.type _Z26__device_stub__kMatrixMul0PfS_iiS_ii,@function
_Z26__device_stub__kMatrixMul0PfS_iiS_ii: # @_Z26__device_stub__kMatrixMul0PfS_iiS_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11kMatrixMul0PfS_iiS_ii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z26__device_stub__kMatrixMul0PfS_iiS_ii, .Lfunc_end0-_Z26__device_stub__kMatrixMul0PfS_iiS_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z14MatrixMultGPU0PfiiS_ii
.LCPI1_0:
.long 0x3c800000 # float 0.015625
.text
.globl _Z14MatrixMultGPU0PfiiS_ii
.p2align 4, 0x90
.type _Z14MatrixMultGPU0PfiiS_ii,@function
_Z14MatrixMultGPU0PfiiS_ii: # @_Z14MatrixMultGPU0PfiiS_ii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %r15d
movl %r8d, %r13d
movq %rcx, 80(%rsp) # 8-byte Spill
movl %edx, %r12d
movl %esi, %ebx
movq %rdi, 72(%rsp) # 8-byte Spill
leaq 64(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movslq %ebx, %rax
movq %rax, 88(%rsp) # 8-byte Spill
leaq (,%rax,4), %rbp
movslq %r15d, %r14
movq %rbp, %rsi
imulq %r14, %rsi
leaq 32(%rsp), %rdi
movq %rsi, 96(%rsp) # 8-byte Spill
callq hipMalloc
movl %r12d, 40(%rsp) # 4-byte Spill
movslq %r12d, %r12
imulq %rbp, %r12
leaq 24(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
movl %r13d, 44(%rsp) # 4-byte Spill
movslq %r13d, %rbp
imulq %r14, %rbp
shlq $2, %rbp
leaq 16(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq 72(%rsp), %rsi # 8-byte Reload
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movq 80(%rsp), %rsi # 8-byte Reload
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
movq 64(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
cvtsi2ss %r14d, %xmm0
mulss .LCPI1_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r14d
xorps %xmm0, %xmm0
cvtsi2ssl 88(%rsp), %xmm0 # 4-byte Folded Reload
mulss .LCPI1_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %edi
shlq $32, %rdi
orq %r14, %rdi
movabsq $274877907008, %rdx # imm = 0x4000000040
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 168(%rsp)
movq %rcx, 160(%rsp)
movl %ebx, 60(%rsp)
movl 40(%rsp), %eax # 4-byte Reload
movl %eax, 56(%rsp)
movq %rdx, 152(%rsp)
movl 44(%rsp), %eax # 4-byte Reload
movl %eax, 52(%rsp)
movl %r15d, 48(%rsp)
leaq 168(%rsp), %rax
movq %rax, 176(%rsp)
leaq 160(%rsp), %rax
movq %rax, 184(%rsp)
leaq 60(%rsp), %rax
movq %rax, 192(%rsp)
leaq 56(%rsp), %rax
movq %rax, 200(%rsp)
leaq 152(%rsp), %rax
movq %rax, 208(%rsp)
leaq 52(%rsp), %rax
movq %rax, 216(%rsp)
leaq 48(%rsp), %rax
movq %rax, 224(%rsp)
leaq 136(%rsp), %rdi
leaq 120(%rsp), %rsi
leaq 112(%rsp), %rdx
leaq 104(%rsp), %rcx
callq __hipPopCallConfiguration
movq 136(%rsp), %rsi
movl 144(%rsp), %edx
movq 120(%rsp), %rcx
movl 128(%rsp), %r8d
leaq 176(%rsp), %r9
movl $_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, %edi
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
imull %ebx, %r15d
movslq %r15d, %rax
leaq (,%rax,4), %rcx
testl %eax, %eax
movq $-1, %rdi
cmovnsq %rcx, %rdi
callq _Znam
movq %rax, %rbx
movq 32(%rsp), %rsi
movq %rax, %rdi
movq 96(%rsp), %rdx # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movl $0, 176(%rsp)
movq 64(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 176(%rsp), %rdi
callq hipEventElapsedTime
movss 176(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rax
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z14MatrixMultGPU0PfiiS_ii, .Lfunc_end1-_Z14MatrixMultGPU0PfiiS_ii
.cfi_endproc
# -- End function
.section .text._Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii,"axG",@progbits,_Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii,comdat
.weak _Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii # -- Begin function _Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii
.p2align 4, 0x90
.type _Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii,@function
_Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii: # @_Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii, .Lfunc_end2-_Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii
.cfi_endproc
# -- End function
.text
.globl _Z12MatrixMulCPUPfiiS_ii # -- Begin function _Z12MatrixMulCPUPfiiS_ii
.p2align 4, 0x90
.type _Z12MatrixMulCPUPfiiS_ii,@function
_Z12MatrixMulCPUPfiiS_ii: # @_Z12MatrixMulCPUPfiiS_ii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %ebx
movl %r8d, %r12d
movq %rcx, (%rsp) # 8-byte Spill
movl %edx, %ebp
movl %esi, %r13d
movq %rdi, %r15
movl %r9d, %eax
imull %esi, %eax
cltq
leaq (,%rax,4), %rcx
testl %eax, %eax
movq $-1, %rdi
cmovnsq %rcx, %rdi
callq _Znam
testl %r13d, %r13d
jle .LBB3_10
# %bb.1: # %.preheader.lr.ph
movslq %r12d, %rcx
movl %r13d, %edx
movl %ebx, %esi
movl %ebp, %edi
leaq (,%rdx,4), %r8
shlq $2, %rcx
xorl %r9d, %r9d
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_9: # %._crit_edge33
# in Loop: Header=BB3_2 Depth=1
incq %r9
addq %r8, %r15
cmpq %rdx, %r9
je .LBB3_10
.LBB3_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
# Child Loop BB3_6 Depth 3
testl %ebx, %ebx
jle .LBB3_9
# %bb.3: # %.lr.ph32
# in Loop: Header=BB3_2 Depth=1
movq %r9, %r10
imulq %rdx, %r10
leaq (%rax,%r10,4), %r10
movq (%rsp), %r11 # 8-byte Reload
xorl %r12d, %r12d
jmp .LBB3_4
.p2align 4, 0x90
.LBB3_8: # in Loop: Header=BB3_4 Depth=2
incq %r12
addq $4, %r11
cmpq %rsi, %r12
je .LBB3_9
.LBB3_4: # Parent Loop BB3_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_6 Depth 3
movl $0, (%r10,%r12,4)
testl %ebp, %ebp
jle .LBB3_8
# %bb.5: # %.lr.ph
# in Loop: Header=BB3_4 Depth=2
movss (%r10,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movq %r11, %r13
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_6: # Parent Loop BB3_2 Depth=1
# Parent Loop BB3_4 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r15,%r14,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r13), %xmm1
addss %xmm1, %xmm0
incq %r14
addq %rcx, %r13
cmpq %r14, %rdi
jne .LBB3_6
# %bb.7: # %._crit_edge
# in Loop: Header=BB3_4 Depth=2
movss %xmm0, (%r10,%r12,4)
jmp .LBB3_8
.LBB3_10: # %._crit_edge35
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z12MatrixMulCPUPfiiS_ii, .Lfunc_end3-_Z12MatrixMulCPUPfiiS_ii
.cfi_endproc
# -- End function
.globl _Z13CompareMatrixPfS_ii # -- Begin function _Z13CompareMatrixPfS_ii
.p2align 4, 0x90
.type _Z13CompareMatrixPfS_ii,@function
_Z13CompareMatrixPfS_ii: # @_Z13CompareMatrixPfS_ii
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB4_1
# %bb.3: # %.preheader.lr.ph
movslq %ecx, %rax
movl %edx, %edx
movl %eax, %r8d
shlq $2, %rax
xorps %xmm0, %xmm0
xorl %r9d, %r9d
jmp .LBB4_4
.p2align 4, 0x90
.LBB4_7: # %._crit_edge
# in Loop: Header=BB4_4 Depth=1
incq %r9
addq $4, %rsi
addq $4, %rdi
cmpq %rdx, %r9
je .LBB4_2
.LBB4_4: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_6 Depth 2
testl %ecx, %ecx
jle .LBB4_7
# %bb.5: # %.lr.ph.preheader
# in Loop: Header=BB4_4 Depth=1
movq %r8, %r10
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB4_6: # %.lr.ph
# Parent Loop BB4_4 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdi,%r11), %xmm1 # xmm1 = mem[0],zero,zero,zero
subss (%rsi,%r11), %xmm1
addss %xmm1, %xmm0
addq %rax, %r11
decq %r10
jne .LBB4_6
jmp .LBB4_7
.LBB4_1:
xorps %xmm0, %xmm0
.LBB4_2: # %._crit_edge22
retq
.Lfunc_end4:
.size _Z13CompareMatrixPfS_ii, .Lfunc_end4-_Z13CompareMatrixPfS_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z10FillMatrixPfiif
.LCPI5_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI5_1:
.long 0x41200000 # float 10
.text
.globl _Z10FillMatrixPfiif
.p2align 4, 0x90
.type _Z10FillMatrixPfiif,@function
_Z10FillMatrixPfiif: # @_Z10FillMatrixPfiif
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, (%rsp) # 4-byte Spill
movq %rdi, 8(%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB5_8
# %bb.1: # %.preheader.lr.ph
movl %esi, %ebp
movl %esi, %eax
movq %rax, 16(%rsp) # 8-byte Spill
movl (%rsp), %r12d # 4-byte Reload
xorl %r13d, %r13d
xorps %xmm2, %xmm2
xorl %r14d, %r14d
movss %xmm0, 4(%rsp) # 4-byte Spill
jmp .LBB5_2
.p2align 4, 0x90
.LBB5_7: # %._crit_edge
# in Loop: Header=BB5_2 Depth=1
incq %r14
addl %ebp, %r13d
cmpq 16(%rsp), %r14 # 8-byte Folded Reload
je .LBB5_8
.LBB5_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB5_4 Depth 2
cmpl $0, (%rsp) # 4-byte Folded Reload
jle .LBB5_7
# %bb.3: # %.lr.ph
# in Loop: Header=BB5_2 Depth=1
movl %r13d, %eax
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbx
xorl %r15d, %r15d
jmp .LBB5_4
.p2align 4, 0x90
.LBB5_6: # in Loop: Header=BB5_4 Depth=2
movss %xmm1, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r12
je .LBB5_7
.LBB5_4: # Parent Loop BB5_2 Depth=1
# => This Inner Loop Header: Depth=2
ucomiss %xmm0, %xmm2
movaps %xmm0, %xmm1
jbe .LBB5_6
# %bb.5: # in Loop: Header=BB5_4 Depth=2
callq rand
xorps %xmm2, %xmm2
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
mulss .LCPI5_0(%rip), %xmm1
mulss .LCPI5_1(%rip), %xmm1
jmp .LBB5_6
.LBB5_8: # %._crit_edge19
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z10FillMatrixPfiif, .Lfunc_end5-_Z10FillMatrixPfiif
.cfi_endproc
# -- End function
.globl _Z11PrintMatrixPfii # -- Begin function _Z11PrintMatrixPfii
.p2align 4, 0x90
.type _Z11PrintMatrixPfii,@function
_Z11PrintMatrixPfii: # @_Z11PrintMatrixPfii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, 4(%rsp) # 4-byte Spill
movq %rdi, 8(%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB6_6
# %bb.1: # %.preheader.lr.ph
movl %esi, %ebp
movl %esi, %eax
movq %rax, 16(%rsp) # 8-byte Spill
movl 4(%rsp), %r12d # 4-byte Reload
xorl %r13d, %r13d
xorl %r14d, %r14d
jmp .LBB6_2
.p2align 4, 0x90
.LBB6_5: # %._crit_edge
# in Loop: Header=BB6_2 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addl %ebp, %r13d
cmpq 16(%rsp), %r14 # 8-byte Folded Reload
je .LBB6_6
.LBB6_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB6_4 Depth 2
cmpl $0, 4(%rsp) # 4-byte Folded Reload
jle .LBB6_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB6_2 Depth=1
movl %r13d, %eax
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbx
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB6_4: # Parent Loop BB6_2 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
incq %r15
cmpq %r15, %r12
jne .LBB6_4
jmp .LBB6_5
.LBB6_6: # %._crit_edge13
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end6:
.size _Z11PrintMatrixPfii, .Lfunc_end6-_Z11PrintMatrixPfii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %rbx
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r14
xorl %eax, %eax
movq %rbx, %rcx
.p2align 4, 0x90
.LBB7_1: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB7_2 Depth 2
xorl %edx, %edx
.p2align 4, 0x90
.LBB7_2: # Parent Loop BB7_1 Depth=1
# => This Inner Loop Header: Depth=2
movl $1065353216, (%rcx,%rdx,4) # imm = 0x3F800000
incq %rdx
cmpq $1024, %rdx # imm = 0x400
jne .LBB7_2
# %bb.3: # %._crit_edge.i
# in Loop: Header=BB7_1 Depth=1
incq %rax
addq $4096, %rcx # imm = 0x1000
cmpq $1024, %rax # imm = 0x400
jne .LBB7_1
# %bb.4: # %.preheader.i19.preheader
xorl %eax, %eax
movq %r14, %rcx
.p2align 4, 0x90
.LBB7_5: # %.preheader.i19
# =>This Loop Header: Depth=1
# Child Loop BB7_6 Depth 2
xorl %edx, %edx
.p2align 4, 0x90
.LBB7_6: # Parent Loop BB7_5 Depth=1
# => This Inner Loop Header: Depth=2
movl $1065353216, (%rcx,%rdx,4) # imm = 0x3F800000
incq %rdx
cmpq $1024, %rdx # imm = 0x400
jne .LBB7_6
# %bb.7: # %._crit_edge.i24
# in Loop: Header=BB7_5 Depth=1
incq %rax
addq $4096, %rcx # imm = 0x1000
cmpq $1024, %rax # imm = 0x400
jne .LBB7_5
# %bb.8: # %_Z10FillMatrixPfiif.exit27
movq %rbx, %rdi
movl $1024, %esi # imm = 0x400
movl $1024, %edx # imm = 0x400
movq %r14, %rcx
movl $1024, %r8d # imm = 0x400
movl $1024, %r9d # imm = 0x400
callq _Z14MatrixMultGPU0PfiiS_ii
movq %rax, %r15
movq %rbx, %rdi
callq _ZdaPv
movq %r14, %rdi
callq _ZdaPv
movq %r15, %rdi
callq _ZdlPv
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size main, .Lfunc_end7-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11kMatrixMul0PfS_iiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11kMatrixMul0PfS_iiS_ii,@object # @_Z11kMatrixMul0PfS_iiS_ii
.section .rodata,"a",@progbits
.globl _Z11kMatrixMul0PfS_iiS_ii
.p2align 3, 0x0
_Z11kMatrixMul0PfS_iiS_ii:
.quad _Z26__device_stub__kMatrixMul0PfS_iiS_ii
.size _Z11kMatrixMul0PfS_iiS_ii, 8
.type _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,@object # @_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.section .rodata._Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,"aG",@progbits,_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,comdat
.weak _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.p2align 3, 0x0
_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii:
.quad _Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii
.size _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "calculation Time:%f ms\n"
.size .L.str, 24
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%f,"
.size .L.str.1, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11kMatrixMul0PfS_iiS_ii"
.size .L__unnamed_1, 26
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii"
.size .L__unnamed_2, 36
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__kMatrixMul0PfS_iiS_ii
.addrsig_sym _Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11kMatrixMul0PfS_iiS_ii
.addrsig_sym _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000499f5_00000000-6_matrixmult.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL49__device_stub__Z11kMatrixMul1ILi64EEvPfS0_iiS0_iiPfS_iiS_ii, @function
_ZL49__device_stub__Z11kMatrixMul1ILi64EEvPfS0_iiS0_iiPfS_iiS_ii:
.LFB2093:
.cfi_startproc
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movl %edx, 28(%rsp)
movl %ecx, 24(%rsp)
movq %r8, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L5
.L1:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L1
.L6:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2093:
.size _ZL49__device_stub__Z11kMatrixMul1ILi64EEvPfS0_iiS0_iiPfS_iiS_ii, .-_ZL49__device_stub__Z11kMatrixMul1ILi64EEvPfS0_iiS0_iiPfS_iiS_ii
.section .text._Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,"axG",@progbits,_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,comdat
.weak _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.type _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, @function
_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii:
.LFB2142:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _ZL49__device_stub__Z11kMatrixMul1ILi64EEvPfS0_iiS0_iiPfS_iiS_ii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2142:
.size _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, .-_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.text
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2069:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2069:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "calculation Time:%f ms\n"
.text
.globl _Z14MatrixMultGPU0PfiiS_ii
.type _Z14MatrixMultGPU0PfiiS_ii, @function
_Z14MatrixMultGPU0PfiiS_ii:
.LFB2061:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 16(%rsp)
movl %esi, %r13d
movl %edx, 8(%rsp)
movq %rcx, 24(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, %r12d
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
leaq 72(%rsp), %rdi
call cudaEventCreate@PLT
movslq %r13d, %rbp
movslq %r12d, %rbx
movq %rbp, %r15
imulq %rbx, %r15
leaq 0(,%r15,4), %r14
leaq 40(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movslq 8(%rsp), %rax
imulq %rax, %rbp
salq $2, %rbp
leaq 48(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movslq 12(%rsp), %rax
imulq %rax, %rbx
salq $2, %rbx
leaq 56(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq 16(%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movl $64, 80(%rsp)
movl $64, 84(%rsp)
movl $1, 88(%rsp)
pxor %xmm0, %xmm0
cvtsi2ssl %r12d, %xmm0
mulss .LC0(%rip), %xmm0
movaps %xmm0, %xmm1
movss .LC6(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC1(%rip), %xmm4
ucomiss %xmm2, %xmm4
jbe .L12
cvttss2sil %xmm0, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
cmpnless %xmm2, %xmm1
movss .LC3(%rip), %xmm4
andps %xmm4, %xmm1
addss %xmm2, %xmm1
andnps %xmm0, %xmm3
orps %xmm3, %xmm1
.L12:
pxor %xmm0, %xmm0
cvtsi2ssl %r13d, %xmm0
mulss .LC0(%rip), %xmm0
movaps %xmm0, %xmm4
movss .LC6(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC1(%rip), %xmm5
ucomiss %xmm2, %xmm5
jbe .L13
cvttss2sil %xmm0, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
cmpnless %xmm2, %xmm4
movss .LC3(%rip), %xmm5
andps %xmm5, %xmm4
addss %xmm2, %xmm4
andnps %xmm0, %xmm3
orps %xmm3, %xmm4
.L13:
cvttss2sil %xmm1, %eax
movl %eax, 92(%rsp)
cvttss2sil %xmm4, %eax
movl %eax, 96(%rsp)
movl 88(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 80(%rsp), %rdx
movq 92(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L20
.L14:
movabsq $2305843009213693950, %rax
cmpq %r15, %rax
jb .L15
movq %r14, %rdi
call _Znam@PLT
movq %rax, %rbx
movl $2, %ecx
movq %r14, %rdx
movq 40(%rsp), %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 36(%rsp)
leaq 36(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 36(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L21
movq %rbx, %rax
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
subq $8, %rsp
.cfi_def_cfa_offset 184
pushq %r12
.cfi_def_cfa_offset 192
movl 28(%rsp), %r9d
movq 72(%rsp), %r8
movl 24(%rsp), %ecx
movl %r13d, %edx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call _ZL49__device_stub__Z11kMatrixMul1ILi64EEvPfS0_iiS0_iiPfS_iiS_ii
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L14
.L15:
movq 104(%rsp), %rax
subq %fs:40, %rax
je .L16
call __stack_chk_fail@PLT
.L16:
call __cxa_throw_bad_array_new_length@PLT
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z14MatrixMultGPU0PfiiS_ii, .-_Z14MatrixMultGPU0PfiiS_ii
.globl _Z12MatrixMulCPUPfiiS_ii
.type _Z12MatrixMulCPUPfiiS_ii, @function
_Z12MatrixMulCPUPfiiS_ii:
.LFB2062:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %edx, %ebp
movq %rcx, 8(%rsp)
movl %esi, %eax
imull %r9d, %eax
cltq
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L23
movq %rdi, %r13
movl %esi, %r12d
movl %r8d, %r15d
movl %r9d, %ebx
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, %r14
testl %r12d, %r12d
jle .L22
movslq %r15d, %rsi
salq $2, %rsi
movl $0, %edx
movl $0, %eax
movslq %ebp, %r15
movq %r14, %rdi
movq %r15, %rcx
jmp .L25
.L23:
call __cxa_throw_bad_array_new_length@PLT
.L29:
movslq %edx, %r8
leaq 0(,%r8,4), %r15
leaq (%rdi,%r15), %r9
movq 8(%rsp), %r11
addq %r13, %r15
addq %rcx, %r8
leaq 0(%r13,%r8,4), %r8
movl $0, %r10d
movl %eax, (%rsp)
movl %edx, 4(%rsp)
.L28:
movq %r9, %r14
movl $0x00000000, (%r9)
testl %ebp, %ebp
jle .L26
movq %r11, %rdx
movq %r15, %rax
pxor %xmm1, %xmm1
.L27:
movss (%rax), %xmm0
mulss (%rdx), %xmm0
addss %xmm0, %xmm1
addq $4, %rax
addq %rsi, %rdx
cmpq %r8, %rax
jne .L27
movss %xmm1, (%r14)
.L26:
addl $1, %r10d
addq $4, %r9
addq $4, %r11
cmpl %r10d, %ebx
jne .L28
movl (%rsp), %eax
movl 4(%rsp), %edx
.L30:
addl $1, %eax
addl %r12d, %edx
cmpl %eax, %r12d
je .L33
.L25:
testl %ebx, %ebx
jg .L29
jmp .L30
.L33:
movq %rdi, %r14
.L22:
movq %r14, %rax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _Z12MatrixMulCPUPfiiS_ii, .-_Z12MatrixMulCPUPfiiS_ii
.globl _Z13CompareMatrixPfS_ii
.type _Z13CompareMatrixPfS_ii, @function
_Z13CompareMatrixPfS_ii:
.LFB2063:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L41
movslq %edx, %r10
movslq %ecx, %r8
salq $2, %r8
movl $0, %r9d
pxor %xmm1, %xmm1
jmp .L37
.L38:
movss (%rdi,%rax), %xmm0
subss (%rsi,%rax), %xmm0
addss %xmm0, %xmm1
addl $1, %edx
addq %r8, %rax
cmpl %edx, %ecx
jne .L38
.L40:
addq $1, %r9
cmpq %r10, %r9
je .L35
.L37:
leaq 0(,%r9,4), %rax
movl $0, %edx
testl %ecx, %ecx
jg .L38
jmp .L40
.L41:
pxor %xmm1, %xmm1
.L35:
movaps %xmm1, %xmm0
ret
.cfi_endproc
.LFE2063:
.size _Z13CompareMatrixPfS_ii, .-_Z13CompareMatrixPfS_ii
.globl _Z10FillMatrixPfiif
.type _Z10FillMatrixPfiif, @function
_Z10FillMatrixPfiif:
.LFB2064:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %edx, 12(%rsp)
testl %esi, %esi
jle .L44
movq %rdi, %r13
movl %esi, %r15d
movd %xmm0, %ebp
movl %edx, %r12d
movl $0, 8(%rsp)
movl $0, %r14d
jmp .L46
.L58:
call rand@PLT
movl %eax, %edx
movslq %ebx, %rax
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
mulss .LC7(%rip), %xmm0
mulss .LC8(%rip), %xmm0
movss %xmm0, 0(%r13,%rax,4)
.L49:
addl $1, %ebx
cmpl %r12d, %ebx
je .L51
.L50:
pxor %xmm1, %xmm1
movd %ebp, %xmm2
comiss %xmm2, %xmm1
ja .L58
movslq %ebx, %rax
movl %ebp, 0(%r13,%rax,4)
jmp .L49
.L51:
addl $1, %r14d
addl %r15d, %r12d
addl %r15d, 8(%rsp)
cmpl %r14d, %r15d
je .L44
.L46:
movl 8(%rsp), %ebx
cmpl $0, 12(%rsp)
jg .L50
jmp .L51
.L44:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _Z10FillMatrixPfiif, .-_Z10FillMatrixPfiif
.section .rodata.str1.1
.LC9:
.string "%f,"
.LC10:
.string "\n"
.text
.globl _Z11PrintMatrixPfii
.type _Z11PrintMatrixPfii, @function
_Z11PrintMatrixPfii:
.LFB2065:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 16(%rsp)
movl %edx, 12(%rsp)
testl %esi, %esi
jle .L59
movl %esi, %r15d
movl $0, %r14d
movl $0, %r13d
movl %edx, %eax
cltq
movq %rax, 24(%rsp)
leaq .LC9(%rip), %r12
jmp .L61
.L63:
movslq %r14d, %rax
movq 16(%rsp), %rcx
leaq (%rcx,%rax,4), %rbx
movq 24(%rsp), %rdx
addq %rdx, %rax
leaq (%rcx,%rax,4), %rbp
.L62:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L62
.L64:
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addl %r15d, %r14d
cmpl %r13d, %r15d
je .L59
.L61:
cmpl $0, 12(%rsp)
jg .L63
jmp .L64
.L59:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size _Z11PrintMatrixPfii, .-_Z11PrintMatrixPfii
.globl main
.type main, @function
main:
.LFB2066:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbp
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbx
movss .LC3(%rip), %xmm0
movl $1024, %edx
movl $1024, %esi
movq %rbp, %rdi
call _Z10FillMatrixPfiif
movss .LC3(%rip), %xmm0
movl $1024, %edx
movl $1024, %esi
movq %rbx, %rdi
call _Z10FillMatrixPfiif
movl $1024, %r9d
movl $1024, %r8d
movq %rbx, %rcx
movl $1024, %edx
movl $1024, %esi
movq %rbp, %rdi
call _Z14MatrixMultGPU0PfiiS_ii
movq %rax, %r12
movq %rbp, %rdi
call _ZdaPv@PLT
movq %rbx, %rdi
call _ZdaPv@PLT
testq %r12, %r12
je .L68
movl $4, %esi
movq %r12, %rdi
call _ZdlPvm@PLT
.L68:
movl $0, %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size main, .-main
.globl _Z39__device_stub__Z11kMatrixMul0PfS_iiS_iiPfS_iiS_ii
.type _Z39__device_stub__Z11kMatrixMul0PfS_iiS_iiPfS_iiS_ii, @function
_Z39__device_stub__Z11kMatrixMul0PfS_iiS_iiPfS_iiS_ii:
.LFB2091:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movl %edx, 28(%rsp)
movl %ecx, 24(%rsp)
movq %r8, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L74
.L70:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L75
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L74:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11kMatrixMul0PfS_iiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L70
.L75:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2091:
.size _Z39__device_stub__Z11kMatrixMul0PfS_iiS_iiPfS_iiS_ii, .-_Z39__device_stub__Z11kMatrixMul0PfS_iiS_iiPfS_iiS_ii
.globl _Z11kMatrixMul0PfS_iiS_ii
.type _Z11kMatrixMul0PfS_iiS_ii, @function
_Z11kMatrixMul0PfS_iiS_ii:
.LFB2092:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z39__device_stub__Z11kMatrixMul0PfS_iiS_iiPfS_iiS_ii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _Z11kMatrixMul0PfS_iiS_ii, .-_Z11kMatrixMul0PfS_iiS_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC11:
.string "_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii"
.section .rodata.str1.1
.LC12:
.string "_Z11kMatrixMul0PfS_iiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2096:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z11kMatrixMul0PfS_iiS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1015021568
.align 4
.LC1:
.long 1258291200
.align 4
.LC3:
.long 1065353216
.align 4
.LC6:
.long 2147483647
.align 4
.LC7:
.long 805306368
.align 4
.LC8:
.long 1092616192
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "matrixmult.hip"
.globl _Z26__device_stub__kMatrixMul0PfS_iiS_ii # -- Begin function _Z26__device_stub__kMatrixMul0PfS_iiS_ii
.p2align 4, 0x90
.type _Z26__device_stub__kMatrixMul0PfS_iiS_ii,@function
_Z26__device_stub__kMatrixMul0PfS_iiS_ii: # @_Z26__device_stub__kMatrixMul0PfS_iiS_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11kMatrixMul0PfS_iiS_ii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z26__device_stub__kMatrixMul0PfS_iiS_ii, .Lfunc_end0-_Z26__device_stub__kMatrixMul0PfS_iiS_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z14MatrixMultGPU0PfiiS_ii
.LCPI1_0:
.long 0x3c800000 # float 0.015625
.text
.globl _Z14MatrixMultGPU0PfiiS_ii
.p2align 4, 0x90
.type _Z14MatrixMultGPU0PfiiS_ii,@function
_Z14MatrixMultGPU0PfiiS_ii: # @_Z14MatrixMultGPU0PfiiS_ii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %r15d
movl %r8d, %r13d
movq %rcx, 80(%rsp) # 8-byte Spill
movl %edx, %r12d
movl %esi, %ebx
movq %rdi, 72(%rsp) # 8-byte Spill
leaq 64(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movslq %ebx, %rax
movq %rax, 88(%rsp) # 8-byte Spill
leaq (,%rax,4), %rbp
movslq %r15d, %r14
movq %rbp, %rsi
imulq %r14, %rsi
leaq 32(%rsp), %rdi
movq %rsi, 96(%rsp) # 8-byte Spill
callq hipMalloc
movl %r12d, 40(%rsp) # 4-byte Spill
movslq %r12d, %r12
imulq %rbp, %r12
leaq 24(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
movl %r13d, 44(%rsp) # 4-byte Spill
movslq %r13d, %rbp
imulq %r14, %rbp
shlq $2, %rbp
leaq 16(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq 72(%rsp), %rsi # 8-byte Reload
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movq 80(%rsp), %rsi # 8-byte Reload
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
movq 64(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
cvtsi2ss %r14d, %xmm0
mulss .LCPI1_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r14d
xorps %xmm0, %xmm0
cvtsi2ssl 88(%rsp), %xmm0 # 4-byte Folded Reload
mulss .LCPI1_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %edi
shlq $32, %rdi
orq %r14, %rdi
movabsq $274877907008, %rdx # imm = 0x4000000040
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 168(%rsp)
movq %rcx, 160(%rsp)
movl %ebx, 60(%rsp)
movl 40(%rsp), %eax # 4-byte Reload
movl %eax, 56(%rsp)
movq %rdx, 152(%rsp)
movl 44(%rsp), %eax # 4-byte Reload
movl %eax, 52(%rsp)
movl %r15d, 48(%rsp)
leaq 168(%rsp), %rax
movq %rax, 176(%rsp)
leaq 160(%rsp), %rax
movq %rax, 184(%rsp)
leaq 60(%rsp), %rax
movq %rax, 192(%rsp)
leaq 56(%rsp), %rax
movq %rax, 200(%rsp)
leaq 152(%rsp), %rax
movq %rax, 208(%rsp)
leaq 52(%rsp), %rax
movq %rax, 216(%rsp)
leaq 48(%rsp), %rax
movq %rax, 224(%rsp)
leaq 136(%rsp), %rdi
leaq 120(%rsp), %rsi
leaq 112(%rsp), %rdx
leaq 104(%rsp), %rcx
callq __hipPopCallConfiguration
movq 136(%rsp), %rsi
movl 144(%rsp), %edx
movq 120(%rsp), %rcx
movl 128(%rsp), %r8d
leaq 176(%rsp), %r9
movl $_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, %edi
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
imull %ebx, %r15d
movslq %r15d, %rax
leaq (,%rax,4), %rcx
testl %eax, %eax
movq $-1, %rdi
cmovnsq %rcx, %rdi
callq _Znam
movq %rax, %rbx
movq 32(%rsp), %rsi
movq %rax, %rdi
movq 96(%rsp), %rdx # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movl $0, 176(%rsp)
movq 64(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 176(%rsp), %rdi
callq hipEventElapsedTime
movss 176(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rax
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z14MatrixMultGPU0PfiiS_ii, .Lfunc_end1-_Z14MatrixMultGPU0PfiiS_ii
.cfi_endproc
# -- End function
.section .text._Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii,"axG",@progbits,_Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii,comdat
.weak _Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii # -- Begin function _Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii
.p2align 4, 0x90
.type _Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii,@function
_Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii: # @_Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii, .Lfunc_end2-_Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii
.cfi_endproc
# -- End function
.text
.globl _Z12MatrixMulCPUPfiiS_ii # -- Begin function _Z12MatrixMulCPUPfiiS_ii
.p2align 4, 0x90
.type _Z12MatrixMulCPUPfiiS_ii,@function
_Z12MatrixMulCPUPfiiS_ii: # @_Z12MatrixMulCPUPfiiS_ii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %ebx
movl %r8d, %r12d
movq %rcx, (%rsp) # 8-byte Spill
movl %edx, %ebp
movl %esi, %r13d
movq %rdi, %r15
movl %r9d, %eax
imull %esi, %eax
cltq
leaq (,%rax,4), %rcx
testl %eax, %eax
movq $-1, %rdi
cmovnsq %rcx, %rdi
callq _Znam
testl %r13d, %r13d
jle .LBB3_10
# %bb.1: # %.preheader.lr.ph
movslq %r12d, %rcx
movl %r13d, %edx
movl %ebx, %esi
movl %ebp, %edi
leaq (,%rdx,4), %r8
shlq $2, %rcx
xorl %r9d, %r9d
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_9: # %._crit_edge33
# in Loop: Header=BB3_2 Depth=1
incq %r9
addq %r8, %r15
cmpq %rdx, %r9
je .LBB3_10
.LBB3_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
# Child Loop BB3_6 Depth 3
testl %ebx, %ebx
jle .LBB3_9
# %bb.3: # %.lr.ph32
# in Loop: Header=BB3_2 Depth=1
movq %r9, %r10
imulq %rdx, %r10
leaq (%rax,%r10,4), %r10
movq (%rsp), %r11 # 8-byte Reload
xorl %r12d, %r12d
jmp .LBB3_4
.p2align 4, 0x90
.LBB3_8: # in Loop: Header=BB3_4 Depth=2
incq %r12
addq $4, %r11
cmpq %rsi, %r12
je .LBB3_9
.LBB3_4: # Parent Loop BB3_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_6 Depth 3
movl $0, (%r10,%r12,4)
testl %ebp, %ebp
jle .LBB3_8
# %bb.5: # %.lr.ph
# in Loop: Header=BB3_4 Depth=2
movss (%r10,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movq %r11, %r13
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_6: # Parent Loop BB3_2 Depth=1
# Parent Loop BB3_4 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r15,%r14,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r13), %xmm1
addss %xmm1, %xmm0
incq %r14
addq %rcx, %r13
cmpq %r14, %rdi
jne .LBB3_6
# %bb.7: # %._crit_edge
# in Loop: Header=BB3_4 Depth=2
movss %xmm0, (%r10,%r12,4)
jmp .LBB3_8
.LBB3_10: # %._crit_edge35
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z12MatrixMulCPUPfiiS_ii, .Lfunc_end3-_Z12MatrixMulCPUPfiiS_ii
.cfi_endproc
# -- End function
.globl _Z13CompareMatrixPfS_ii # -- Begin function _Z13CompareMatrixPfS_ii
.p2align 4, 0x90
.type _Z13CompareMatrixPfS_ii,@function
_Z13CompareMatrixPfS_ii: # @_Z13CompareMatrixPfS_ii
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB4_1
# %bb.3: # %.preheader.lr.ph
movslq %ecx, %rax
movl %edx, %edx
movl %eax, %r8d
shlq $2, %rax
xorps %xmm0, %xmm0
xorl %r9d, %r9d
jmp .LBB4_4
.p2align 4, 0x90
.LBB4_7: # %._crit_edge
# in Loop: Header=BB4_4 Depth=1
incq %r9
addq $4, %rsi
addq $4, %rdi
cmpq %rdx, %r9
je .LBB4_2
.LBB4_4: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_6 Depth 2
testl %ecx, %ecx
jle .LBB4_7
# %bb.5: # %.lr.ph.preheader
# in Loop: Header=BB4_4 Depth=1
movq %r8, %r10
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB4_6: # %.lr.ph
# Parent Loop BB4_4 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdi,%r11), %xmm1 # xmm1 = mem[0],zero,zero,zero
subss (%rsi,%r11), %xmm1
addss %xmm1, %xmm0
addq %rax, %r11
decq %r10
jne .LBB4_6
jmp .LBB4_7
.LBB4_1:
xorps %xmm0, %xmm0
.LBB4_2: # %._crit_edge22
retq
.Lfunc_end4:
.size _Z13CompareMatrixPfS_ii, .Lfunc_end4-_Z13CompareMatrixPfS_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z10FillMatrixPfiif
.LCPI5_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI5_1:
.long 0x41200000 # float 10
.text
.globl _Z10FillMatrixPfiif
.p2align 4, 0x90
.type _Z10FillMatrixPfiif,@function
_Z10FillMatrixPfiif: # @_Z10FillMatrixPfiif
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, (%rsp) # 4-byte Spill
movq %rdi, 8(%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB5_8
# %bb.1: # %.preheader.lr.ph
movl %esi, %ebp
movl %esi, %eax
movq %rax, 16(%rsp) # 8-byte Spill
movl (%rsp), %r12d # 4-byte Reload
xorl %r13d, %r13d
xorps %xmm2, %xmm2
xorl %r14d, %r14d
movss %xmm0, 4(%rsp) # 4-byte Spill
jmp .LBB5_2
.p2align 4, 0x90
.LBB5_7: # %._crit_edge
# in Loop: Header=BB5_2 Depth=1
incq %r14
addl %ebp, %r13d
cmpq 16(%rsp), %r14 # 8-byte Folded Reload
je .LBB5_8
.LBB5_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB5_4 Depth 2
cmpl $0, (%rsp) # 4-byte Folded Reload
jle .LBB5_7
# %bb.3: # %.lr.ph
# in Loop: Header=BB5_2 Depth=1
movl %r13d, %eax
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbx
xorl %r15d, %r15d
jmp .LBB5_4
.p2align 4, 0x90
.LBB5_6: # in Loop: Header=BB5_4 Depth=2
movss %xmm1, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r12
je .LBB5_7
.LBB5_4: # Parent Loop BB5_2 Depth=1
# => This Inner Loop Header: Depth=2
ucomiss %xmm0, %xmm2
movaps %xmm0, %xmm1
jbe .LBB5_6
# %bb.5: # in Loop: Header=BB5_4 Depth=2
callq rand
xorps %xmm2, %xmm2
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
mulss .LCPI5_0(%rip), %xmm1
mulss .LCPI5_1(%rip), %xmm1
jmp .LBB5_6
.LBB5_8: # %._crit_edge19
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z10FillMatrixPfiif, .Lfunc_end5-_Z10FillMatrixPfiif
.cfi_endproc
# -- End function
.globl _Z11PrintMatrixPfii # -- Begin function _Z11PrintMatrixPfii
.p2align 4, 0x90
.type _Z11PrintMatrixPfii,@function
_Z11PrintMatrixPfii: # @_Z11PrintMatrixPfii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, 4(%rsp) # 4-byte Spill
movq %rdi, 8(%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB6_6
# %bb.1: # %.preheader.lr.ph
movl %esi, %ebp
movl %esi, %eax
movq %rax, 16(%rsp) # 8-byte Spill
movl 4(%rsp), %r12d # 4-byte Reload
xorl %r13d, %r13d
xorl %r14d, %r14d
jmp .LBB6_2
.p2align 4, 0x90
.LBB6_5: # %._crit_edge
# in Loop: Header=BB6_2 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addl %ebp, %r13d
cmpq 16(%rsp), %r14 # 8-byte Folded Reload
je .LBB6_6
.LBB6_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB6_4 Depth 2
cmpl $0, 4(%rsp) # 4-byte Folded Reload
jle .LBB6_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB6_2 Depth=1
movl %r13d, %eax
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbx
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB6_4: # Parent Loop BB6_2 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
incq %r15
cmpq %r15, %r12
jne .LBB6_4
jmp .LBB6_5
.LBB6_6: # %._crit_edge13
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end6:
.size _Z11PrintMatrixPfii, .Lfunc_end6-_Z11PrintMatrixPfii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %rbx
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r14
xorl %eax, %eax
movq %rbx, %rcx
.p2align 4, 0x90
.LBB7_1: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB7_2 Depth 2
xorl %edx, %edx
.p2align 4, 0x90
.LBB7_2: # Parent Loop BB7_1 Depth=1
# => This Inner Loop Header: Depth=2
movl $1065353216, (%rcx,%rdx,4) # imm = 0x3F800000
incq %rdx
cmpq $1024, %rdx # imm = 0x400
jne .LBB7_2
# %bb.3: # %._crit_edge.i
# in Loop: Header=BB7_1 Depth=1
incq %rax
addq $4096, %rcx # imm = 0x1000
cmpq $1024, %rax # imm = 0x400
jne .LBB7_1
# %bb.4: # %.preheader.i19.preheader
xorl %eax, %eax
movq %r14, %rcx
.p2align 4, 0x90
.LBB7_5: # %.preheader.i19
# =>This Loop Header: Depth=1
# Child Loop BB7_6 Depth 2
xorl %edx, %edx
.p2align 4, 0x90
.LBB7_6: # Parent Loop BB7_5 Depth=1
# => This Inner Loop Header: Depth=2
movl $1065353216, (%rcx,%rdx,4) # imm = 0x3F800000
incq %rdx
cmpq $1024, %rdx # imm = 0x400
jne .LBB7_6
# %bb.7: # %._crit_edge.i24
# in Loop: Header=BB7_5 Depth=1
incq %rax
addq $4096, %rcx # imm = 0x1000
cmpq $1024, %rax # imm = 0x400
jne .LBB7_5
# %bb.8: # %_Z10FillMatrixPfiif.exit27
movq %rbx, %rdi
movl $1024, %esi # imm = 0x400
movl $1024, %edx # imm = 0x400
movq %r14, %rcx
movl $1024, %r8d # imm = 0x400
movl $1024, %r9d # imm = 0x400
callq _Z14MatrixMultGPU0PfiiS_ii
movq %rax, %r15
movq %rbx, %rdi
callq _ZdaPv
movq %r14, %rdi
callq _ZdaPv
movq %r15, %rdi
callq _ZdlPv
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size main, .Lfunc_end7-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11kMatrixMul0PfS_iiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11kMatrixMul0PfS_iiS_ii,@object # @_Z11kMatrixMul0PfS_iiS_ii
.section .rodata,"a",@progbits
.globl _Z11kMatrixMul0PfS_iiS_ii
.p2align 3, 0x0
_Z11kMatrixMul0PfS_iiS_ii:
.quad _Z26__device_stub__kMatrixMul0PfS_iiS_ii
.size _Z11kMatrixMul0PfS_iiS_ii, 8
.type _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,@object # @_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.section .rodata._Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,"aG",@progbits,_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii,comdat
.weak _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.p2align 3, 0x0
_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii:
.quad _Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii
.size _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "calculation Time:%f ms\n"
.size .L.str, 24
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%f,"
.size .L.str.1, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11kMatrixMul0PfS_iiS_ii"
.size .L__unnamed_1, 26
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii"
.size .L__unnamed_2, 36
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__kMatrixMul0PfS_iiS_ii
.addrsig_sym _Z26__device_stub__kMatrixMul1ILi64EEvPfS0_iiS0_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11kMatrixMul0PfS_iiS_ii
.addrsig_sym _Z11kMatrixMul1ILi64EEvPfS0_iiS0_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
__global__ void square( int * d_in){
int totalSum;
if (threadIdx.x == 0) totalSum = 0;
__syncthreads();
int localVal = d_in[threadIdx.x];
atomicAdd(&totalSum, 1);
__syncthreads();
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
int h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = i;
}
int * d_in;
cudaMalloc((void**) &d_in, ARRAY_BYTES);
// cudaMalloc((void*) &totalSum, sizeof(float));
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
square<<<1, ARRAY_SIZE>>>(d_in);
// cudaMemcpy(ans, totalSum, sizeof(float), cudaMemcpyDeviceToHost);
// printf("%f\n",ans);
cudaFree(d_in);
return 0;
} | code for sm_80
Function : _Z6squarePi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0020*/ BPT.TRAP 0x1 ; /* 0x000000040000795c */
/* 0x000fea0000300000 */
/*0030*/ BRA 0x30; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
__global__ void square( int * d_in){
int totalSum;
if (threadIdx.x == 0) totalSum = 0;
__syncthreads();
int localVal = d_in[threadIdx.x];
atomicAdd(&totalSum, 1);
__syncthreads();
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
int h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = i;
}
int * d_in;
cudaMalloc((void**) &d_in, ARRAY_BYTES);
// cudaMalloc((void*) &totalSum, sizeof(float));
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
square<<<1, ARRAY_SIZE>>>(d_in);
// cudaMemcpy(ans, totalSum, sizeof(float), cudaMemcpyDeviceToHost);
// printf("%f\n",ans);
cudaFree(d_in);
return 0;
} | .file "tmpxft_0011fdb2_00000000-6_atomics.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z6squarePiPi
.type _Z25__device_stub__Z6squarePiPi, @function
_Z25__device_stub__Z6squarePiPi:
.LFB2082:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z6squarePi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z25__device_stub__Z6squarePiPi, .-_Z25__device_stub__Z6squarePiPi
.globl _Z6squarePi
.type _Z6squarePi, @function
_Z6squarePi:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z6squarePiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6squarePi, .-_Z6squarePi
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $312, %rsp
.cfi_def_cfa_offset 320
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
.L12:
movl %eax, 32(%rsp,%rax,4)
addq $1, %rax
cmpq $64, %rax
jne .L12
movq %rsp, %rdi
movl $256, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rsi
movl $1, %ecx
movl $256, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $64, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq (%rsp), %rdi
call cudaFree@PLT
movq 296(%rsp), %rax
subq %fs:40, %rax
jne .L18
movl $0, %eax
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
movq (%rsp), %rdi
call _Z25__device_stub__Z6squarePiPi
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z6squarePi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z6squarePi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
__global__ void square( int * d_in){
int totalSum;
if (threadIdx.x == 0) totalSum = 0;
__syncthreads();
int localVal = d_in[threadIdx.x];
atomicAdd(&totalSum, 1);
__syncthreads();
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
int h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = i;
}
int * d_in;
cudaMalloc((void**) &d_in, ARRAY_BYTES);
// cudaMalloc((void*) &totalSum, sizeof(float));
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
square<<<1, ARRAY_SIZE>>>(d_in);
// cudaMemcpy(ans, totalSum, sizeof(float), cudaMemcpyDeviceToHost);
// printf("%f\n",ans);
cudaFree(d_in);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void square( int * d_in){
int totalSum;
if (threadIdx.x == 0) totalSum = 0;
__syncthreads();
int localVal = d_in[threadIdx.x];
atomicAdd(&totalSum, 1);
__syncthreads();
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
int h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = i;
}
int * d_in;
hipMalloc((void**) &d_in, ARRAY_BYTES);
// cudaMalloc((void*) &totalSum, sizeof(float));
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
square<<<1, ARRAY_SIZE>>>(d_in);
// cudaMemcpy(ans, totalSum, sizeof(float), cudaMemcpyDeviceToHost);
// printf("%f\n",ans);
hipFree(d_in);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void square( int * d_in){
int totalSum;
if (threadIdx.x == 0) totalSum = 0;
__syncthreads();
int localVal = d_in[threadIdx.x];
atomicAdd(&totalSum, 1);
__syncthreads();
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
int h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = i;
}
int * d_in;
hipMalloc((void**) &d_in, ARRAY_BYTES);
// cudaMalloc((void*) &totalSum, sizeof(float));
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
square<<<1, ARRAY_SIZE>>>(d_in);
// cudaMemcpy(ans, totalSum, sizeof(float), cudaMemcpyDeviceToHost);
// printf("%f\n",ans);
hipFree(d_in);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6squarePi
.globl _Z6squarePi
.p2align 8
.type _Z6squarePi,@function
_Z6squarePi:
s_load_b64 s[0:1], s[0:1], 0x4
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
v_bfe_u32 v0, v0, 20, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s0, s0, 16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s0, s0, s1
v_mul_lo_u32 v2, s0, v1
s_mov_b32 s0, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u32_u24 v2, v3, s1, v2
v_add_lshl_u32 v0, v2, v0, 2
v_cmpx_eq_u32_e32 0, v1
s_cbranch_execz .LBB0_2
v_mov_b32_e32 v1, 0
ds_store_b32 v0, v1
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v1, 1, v1
ds_store_b32 v0, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6squarePi
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 1
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 2
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6squarePi, .Lfunc_end0-_Z6squarePi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6squarePi
.private_segment_fixed_size: 0
.sgpr_count: 2
.sgpr_spill_count: 0
.symbol: _Z6squarePi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void square( int * d_in){
int totalSum;
if (threadIdx.x == 0) totalSum = 0;
__syncthreads();
int localVal = d_in[threadIdx.x];
atomicAdd(&totalSum, 1);
__syncthreads();
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
int h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = i;
}
int * d_in;
hipMalloc((void**) &d_in, ARRAY_BYTES);
// cudaMalloc((void*) &totalSum, sizeof(float));
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
square<<<1, ARRAY_SIZE>>>(d_in);
// cudaMemcpy(ans, totalSum, sizeof(float), cudaMemcpyDeviceToHost);
// printf("%f\n",ans);
hipFree(d_in);
return 0;
} | .text
.file "atomics.hip"
.globl _Z21__device_stub__squarePi # -- Begin function _Z21__device_stub__squarePi
.p2align 4, 0x90
.type _Z21__device_stub__squarePi,@function
_Z21__device_stub__squarePi: # @_Z21__device_stub__squarePi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z6squarePi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z21__device_stub__squarePi, .Lfunc_end0-_Z21__device_stub__squarePi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $344, %rsp # imm = 0x158
.cfi_def_cfa_offset 352
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, 80(%rsp,%rax,4)
incq %rax
cmpq $64, %rax
jne .LBB1_1
# %bb.2:
leaq 8(%rsp), %rdi
movl $256, %esi # imm = 0x100
callq hipMalloc
movq 8(%rsp), %rdi
leaq 80(%rsp), %rsi
movl $256, %edx # imm = 0x100
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 63(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z6squarePi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $344, %rsp # imm = 0x158
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6squarePi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6squarePi,@object # @_Z6squarePi
.section .rodata,"a",@progbits
.globl _Z6squarePi
.p2align 3, 0x0
_Z6squarePi:
.quad _Z21__device_stub__squarePi
.size _Z6squarePi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6squarePi"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__squarePi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6squarePi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6squarePi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0020*/ BPT.TRAP 0x1 ; /* 0x000000040000795c */
/* 0x000fea0000300000 */
/*0030*/ BRA 0x30; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6squarePi
.globl _Z6squarePi
.p2align 8
.type _Z6squarePi,@function
_Z6squarePi:
s_load_b64 s[0:1], s[0:1], 0x4
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
v_bfe_u32 v0, v0, 20, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s0, s0, 16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s0, s0, s1
v_mul_lo_u32 v2, s0, v1
s_mov_b32 s0, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u32_u24 v2, v3, s1, v2
v_add_lshl_u32 v0, v2, v0, 2
v_cmpx_eq_u32_e32 0, v1
s_cbranch_execz .LBB0_2
v_mov_b32_e32 v1, 0
ds_store_b32 v0, v1
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v1, 1, v1
ds_store_b32 v0, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6squarePi
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 1
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 2
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6squarePi, .Lfunc_end0-_Z6squarePi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6squarePi
.private_segment_fixed_size: 0
.sgpr_count: 2
.sgpr_spill_count: 0
.symbol: _Z6squarePi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0011fdb2_00000000-6_atomics.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z6squarePiPi
.type _Z25__device_stub__Z6squarePiPi, @function
_Z25__device_stub__Z6squarePiPi:
.LFB2082:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z6squarePi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z25__device_stub__Z6squarePiPi, .-_Z25__device_stub__Z6squarePiPi
.globl _Z6squarePi
.type _Z6squarePi, @function
_Z6squarePi:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z6squarePiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6squarePi, .-_Z6squarePi
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $312, %rsp
.cfi_def_cfa_offset 320
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
.L12:
movl %eax, 32(%rsp,%rax,4)
addq $1, %rax
cmpq $64, %rax
jne .L12
movq %rsp, %rdi
movl $256, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rsi
movl $1, %ecx
movl $256, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $64, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq (%rsp), %rdi
call cudaFree@PLT
movq 296(%rsp), %rax
subq %fs:40, %rax
jne .L18
movl $0, %eax
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
movq (%rsp), %rdi
call _Z25__device_stub__Z6squarePiPi
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z6squarePi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z6squarePi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "atomics.hip"
.globl _Z21__device_stub__squarePi # -- Begin function _Z21__device_stub__squarePi
.p2align 4, 0x90
.type _Z21__device_stub__squarePi,@function
_Z21__device_stub__squarePi: # @_Z21__device_stub__squarePi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z6squarePi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z21__device_stub__squarePi, .Lfunc_end0-_Z21__device_stub__squarePi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $344, %rsp # imm = 0x158
.cfi_def_cfa_offset 352
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, 80(%rsp,%rax,4)
incq %rax
cmpq $64, %rax
jne .LBB1_1
# %bb.2:
leaq 8(%rsp), %rdi
movl $256, %esi # imm = 0x100
callq hipMalloc
movq 8(%rsp), %rdi
leaq 80(%rsp), %rsi
movl $256, %edx # imm = 0x100
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 63(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z6squarePi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $344, %rsp # imm = 0x158
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6squarePi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6squarePi,@object # @_Z6squarePi
.section .rodata,"a",@progbits
.globl _Z6squarePi
.p2align 3, 0x0
_Z6squarePi:
.quad _Z21__device_stub__squarePi
.size _Z6squarePi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6squarePi"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__squarePi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6squarePi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // A shared memory matrix multiplication program
#include "stdio.h"
#include "stdlib.h"
// Keep the SIZE evenly divisible by TILE_WIDTH
#define SIZE 512
#define TILE_WIDTH 16
// kernels that are called by another kernel use the __device__ identifier
__device__ float * GetSubMatrix(float * large_matrix, int row, int col)
{
// this returns the address of the first element in a sub-matrix
// when using this sub-matrix you have to jump ahead SIZE elements per row
float * subMatrix = &large_matrix[ row * SIZE * TILE_WIDTH + col * TILE_WIDTH ];
return subMatrix;
}
// matrix multiplication kernel definition
__global__ void MatrixMulKernel(float * A,float * B,float * C)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
// Here we call another kernel from within a kernel.
// Note that we do not need to pass any grid or block
// information between the <<< >>> symbols.
float * Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes on element of Csub
// by accumulating results into Csub
float Cvalue = 0.0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Iterate through n sub_matrices, where n depends on the size of the tile and matrix
int sub_matrices_per_row = gridDim.x;
int i;
for ( i = 0; i < sub_matrices_per_row; i++ )
{
// Get sub-matrices
// Block threads will work on these sub-matrices
float * Asub = GetSubMatrix(A, blockRow, i);
float * Bsub = GetSubMatrix(B, i, blockCol);
// Shared memory used to store Asub and Bsub
__shared__ float As[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bs[TILE_WIDTH][TILE_WIDTH];
// Load Asub and Bsub from device memory into shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = Asub[ row*SIZE + col ];
Bs[row][col] = Bsub[ row*SIZE + col ];
// Sync threads to make sure sub-matrices are completely loaded into shared memory
// Remember, this only syncs threads within a block
__syncthreads();
int j;
// Multiply Asub and Bsub together, using fast shared memory for data access
for ( j = 0; j < TILE_WIDTH ; j++ )
Cvalue += As[row][j] * Bs[j][col];
// Sync threads to ensure the preceding computation is done before loading
// two new sub-matrices of A and B in the next iteration
__syncthreads();
}
Csub[ row*SIZE + col ] = Cvalue;
}
int main(int argc, char ** argv)
{
float h_A[SIZE*SIZE],h_B[SIZE*SIZE],h_C[SIZE*SIZE];
float * d_A, * d_B, * d_C;
// initialize host matrices with arbitrary data
int i;
for (i=0;i<SIZE*SIZE;i++) {
h_A[i] = (float)i;
h_B[i] = (float)SIZE * (float)SIZE - (float)i - 1.00;
h_C[i] = 0.0;
}
// allocate space on device
size_t size = SIZE*SIZE*sizeof(float);
cudaMalloc(&d_A,size);
cudaMalloc(&d_B,size);
cudaMalloc(&d_C,size);
//copy data to device
cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_C,h_C,size,cudaMemcpyHostToDevice);
dim3 threadsPerBlock(TILE_WIDTH,TILE_WIDTH); // 2d block
int blocks = ( SIZE + TILE_WIDTH - 1 ) / TILE_WIDTH;
dim3 blocksPerGrid(blocks,blocks); // 2d grid
// invoke the kernel here
MatrixMulKernel<<< blocksPerGrid, threadsPerBlock >>>(d_A,d_B,d_C);
// copy results back to host
cudaMemcpy(h_C,d_C,size,cudaMemcpyDeviceToHost);
// Free up device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
} | .file "tmpxft_0019743a_00000000-6_mat_multiply.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12GetSubMatrixPfii
.type _Z12GetSubMatrixPfii, @function
_Z12GetSubMatrixPfii:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z12GetSubMatrixPfii, .-_Z12GetSubMatrixPfii
.globl _Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_
.type _Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_, @function
_Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z15MatrixMulKernelPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_, .-_Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_
.globl _Z15MatrixMulKernelPfS_S_
.type _Z15MatrixMulKernelPfS_S_, @function
_Z15MatrixMulKernelPfS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z15MatrixMulKernelPfS_S_, .-_Z15MatrixMulKernelPfS_S_
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
leaq -3145728(%rsp), %r11
.cfi_def_cfa 11, 3145736
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $72, %rsp
.cfi_def_cfa_offset 3145808
movq %fs:40, %rax
movq %rax, 3145784(%rsp)
xorl %eax, %eax
movss .LC0(%rip), %xmm3
movss .LC1(%rip), %xmm2
.L14:
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
movss %xmm1, 48(%rsp,%rax,4)
movaps %xmm3, %xmm0
subss %xmm1, %xmm0
subss %xmm2, %xmm0
movss %xmm0, 1048624(%rsp,%rax,4)
movl $0x00000000, 2097200(%rsp,%rax,4)
addq $1, %rax
cmpq $262144, %rax
jne .L14
movq %rsp, %rdi
movl $1048576, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $1048576, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $1048576, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $1048576, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 1048624(%rsp), %rsi
movl $1, %ecx
movl $1048576, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 2097200(%rsp), %rsi
movl $1, %ecx
movl $1048576, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $16, 24(%rsp)
movl $16, 28(%rsp)
movl $1, 32(%rsp)
movl $32, 36(%rsp)
movl $32, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
leaq 2097200(%rsp), %rdi
movl $2, %ecx
movl $1048576, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 3145784(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $3145800, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "_Z15MatrixMulKernelPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z15MatrixMulKernelPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1216348160
.align 4
.LC1:
.long 1065353216
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // A shared memory matrix multiplication program
#include "stdio.h"
#include "stdlib.h"
// Keep the SIZE evenly divisible by TILE_WIDTH
#define SIZE 512
#define TILE_WIDTH 16
// kernels that are called by another kernel use the __device__ identifier
__device__ float * GetSubMatrix(float * large_matrix, int row, int col)
{
// this returns the address of the first element in a sub-matrix
// when using this sub-matrix you have to jump ahead SIZE elements per row
float * subMatrix = &large_matrix[ row * SIZE * TILE_WIDTH + col * TILE_WIDTH ];
return subMatrix;
}
// matrix multiplication kernel definition
__global__ void MatrixMulKernel(float * A,float * B,float * C)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
// Here we call another kernel from within a kernel.
// Note that we do not need to pass any grid or block
// information between the <<< >>> symbols.
float * Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes on element of Csub
// by accumulating results into Csub
float Cvalue = 0.0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Iterate through n sub_matrices, where n depends on the size of the tile and matrix
int sub_matrices_per_row = gridDim.x;
int i;
for ( i = 0; i < sub_matrices_per_row; i++ )
{
// Get sub-matrices
// Block threads will work on these sub-matrices
float * Asub = GetSubMatrix(A, blockRow, i);
float * Bsub = GetSubMatrix(B, i, blockCol);
// Shared memory used to store Asub and Bsub
__shared__ float As[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bs[TILE_WIDTH][TILE_WIDTH];
// Load Asub and Bsub from device memory into shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = Asub[ row*SIZE + col ];
Bs[row][col] = Bsub[ row*SIZE + col ];
// Sync threads to make sure sub-matrices are completely loaded into shared memory
// Remember, this only syncs threads within a block
__syncthreads();
int j;
// Multiply Asub and Bsub together, using fast shared memory for data access
for ( j = 0; j < TILE_WIDTH ; j++ )
Cvalue += As[row][j] * Bs[j][col];
// Sync threads to ensure the preceding computation is done before loading
// two new sub-matrices of A and B in the next iteration
__syncthreads();
}
Csub[ row*SIZE + col ] = Cvalue;
}
int main(int argc, char ** argv)
{
float h_A[SIZE*SIZE],h_B[SIZE*SIZE],h_C[SIZE*SIZE];
float * d_A, * d_B, * d_C;
// initialize host matrices with arbitrary data
int i;
for (i=0;i<SIZE*SIZE;i++) {
h_A[i] = (float)i;
h_B[i] = (float)SIZE * (float)SIZE - (float)i - 1.00;
h_C[i] = 0.0;
}
// allocate space on device
size_t size = SIZE*SIZE*sizeof(float);
cudaMalloc(&d_A,size);
cudaMalloc(&d_B,size);
cudaMalloc(&d_C,size);
//copy data to device
cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_C,h_C,size,cudaMemcpyHostToDevice);
dim3 threadsPerBlock(TILE_WIDTH,TILE_WIDTH); // 2d block
int blocks = ( SIZE + TILE_WIDTH - 1 ) / TILE_WIDTH;
dim3 blocksPerGrid(blocks,blocks); // 2d grid
// invoke the kernel here
MatrixMulKernel<<< blocksPerGrid, threadsPerBlock >>>(d_A,d_B,d_C);
// copy results back to host
cudaMemcpy(h_C,d_C,size,cudaMemcpyDeviceToHost);
// Free up device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
} | // A shared memory matrix multiplication program
#include <hip/hip_runtime.h>
#include "stdio.h"
#include "stdlib.h"
// Keep the SIZE evenly divisible by TILE_WIDTH
#define SIZE 512
#define TILE_WIDTH 16
// kernels that are called by another kernel use the __device__ identifier
__device__ float * GetSubMatrix(float * large_matrix, int row, int col)
{
// this returns the address of the first element in a sub-matrix
// when using this sub-matrix you have to jump ahead SIZE elements per row
float * subMatrix = &large_matrix[ row * SIZE * TILE_WIDTH + col * TILE_WIDTH ];
return subMatrix;
}
// matrix multiplication kernel definition
__global__ void MatrixMulKernel(float * A,float * B,float * C)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
// Here we call another kernel from within a kernel.
// Note that we do not need to pass any grid or block
// information between the <<< >>> symbols.
float * Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes on element of Csub
// by accumulating results into Csub
float Cvalue = 0.0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Iterate through n sub_matrices, where n depends on the size of the tile and matrix
int sub_matrices_per_row = gridDim.x;
int i;
for ( i = 0; i < sub_matrices_per_row; i++ )
{
// Get sub-matrices
// Block threads will work on these sub-matrices
float * Asub = GetSubMatrix(A, blockRow, i);
float * Bsub = GetSubMatrix(B, i, blockCol);
// Shared memory used to store Asub and Bsub
__shared__ float As[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bs[TILE_WIDTH][TILE_WIDTH];
// Load Asub and Bsub from device memory into shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = Asub[ row*SIZE + col ];
Bs[row][col] = Bsub[ row*SIZE + col ];
// Sync threads to make sure sub-matrices are completely loaded into shared memory
// Remember, this only syncs threads within a block
__syncthreads();
int j;
// Multiply Asub and Bsub together, using fast shared memory for data access
for ( j = 0; j < TILE_WIDTH ; j++ )
Cvalue += As[row][j] * Bs[j][col];
// Sync threads to ensure the preceding computation is done before loading
// two new sub-matrices of A and B in the next iteration
__syncthreads();
}
Csub[ row*SIZE + col ] = Cvalue;
}
int main(int argc, char ** argv)
{
float h_A[SIZE*SIZE],h_B[SIZE*SIZE],h_C[SIZE*SIZE];
float * d_A, * d_B, * d_C;
// initialize host matrices with arbitrary data
int i;
for (i=0;i<SIZE*SIZE;i++) {
h_A[i] = (float)i;
h_B[i] = (float)SIZE * (float)SIZE - (float)i - 1.00;
h_C[i] = 0.0;
}
// allocate space on device
size_t size = SIZE*SIZE*sizeof(float);
hipMalloc(&d_A,size);
hipMalloc(&d_B,size);
hipMalloc(&d_C,size);
//copy data to device
hipMemcpy(d_A,h_A,size,hipMemcpyHostToDevice);
hipMemcpy(d_B,h_B,size,hipMemcpyHostToDevice);
hipMemcpy(d_C,h_C,size,hipMemcpyHostToDevice);
dim3 threadsPerBlock(TILE_WIDTH,TILE_WIDTH); // 2d block
int blocks = ( SIZE + TILE_WIDTH - 1 ) / TILE_WIDTH;
dim3 blocksPerGrid(blocks,blocks); // 2d grid
// invoke the kernel here
MatrixMulKernel<<< blocksPerGrid, threadsPerBlock >>>(d_A,d_B,d_C);
// copy results back to host
hipMemcpy(h_C,d_C,size,hipMemcpyDeviceToHost);
// Free up device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // A shared memory matrix multiplication program
#include <hip/hip_runtime.h>
#include "stdio.h"
#include "stdlib.h"
// Keep the SIZE evenly divisible by TILE_WIDTH
#define SIZE 512
#define TILE_WIDTH 16
// kernels that are called by another kernel use the __device__ identifier
__device__ float * GetSubMatrix(float * large_matrix, int row, int col)
{
// this returns the address of the first element in a sub-matrix
// when using this sub-matrix you have to jump ahead SIZE elements per row
float * subMatrix = &large_matrix[ row * SIZE * TILE_WIDTH + col * TILE_WIDTH ];
return subMatrix;
}
// matrix multiplication kernel definition
__global__ void MatrixMulKernel(float * A,float * B,float * C)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
// Here we call another kernel from within a kernel.
// Note that we do not need to pass any grid or block
// information between the <<< >>> symbols.
float * Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes on element of Csub
// by accumulating results into Csub
float Cvalue = 0.0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Iterate through n sub_matrices, where n depends on the size of the tile and matrix
int sub_matrices_per_row = gridDim.x;
int i;
for ( i = 0; i < sub_matrices_per_row; i++ )
{
// Get sub-matrices
// Block threads will work on these sub-matrices
float * Asub = GetSubMatrix(A, blockRow, i);
float * Bsub = GetSubMatrix(B, i, blockCol);
// Shared memory used to store Asub and Bsub
__shared__ float As[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bs[TILE_WIDTH][TILE_WIDTH];
// Load Asub and Bsub from device memory into shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = Asub[ row*SIZE + col ];
Bs[row][col] = Bsub[ row*SIZE + col ];
// Sync threads to make sure sub-matrices are completely loaded into shared memory
// Remember, this only syncs threads within a block
__syncthreads();
int j;
// Multiply Asub and Bsub together, using fast shared memory for data access
for ( j = 0; j < TILE_WIDTH ; j++ )
Cvalue += As[row][j] * Bs[j][col];
// Sync threads to ensure the preceding computation is done before loading
// two new sub-matrices of A and B in the next iteration
__syncthreads();
}
Csub[ row*SIZE + col ] = Cvalue;
}
int main(int argc, char ** argv)
{
float h_A[SIZE*SIZE],h_B[SIZE*SIZE],h_C[SIZE*SIZE];
float * d_A, * d_B, * d_C;
// initialize host matrices with arbitrary data
int i;
for (i=0;i<SIZE*SIZE;i++) {
h_A[i] = (float)i;
h_B[i] = (float)SIZE * (float)SIZE - (float)i - 1.00;
h_C[i] = 0.0;
}
// allocate space on device
size_t size = SIZE*SIZE*sizeof(float);
hipMalloc(&d_A,size);
hipMalloc(&d_B,size);
hipMalloc(&d_C,size);
//copy data to device
hipMemcpy(d_A,h_A,size,hipMemcpyHostToDevice);
hipMemcpy(d_B,h_B,size,hipMemcpyHostToDevice);
hipMemcpy(d_C,h_C,size,hipMemcpyHostToDevice);
dim3 threadsPerBlock(TILE_WIDTH,TILE_WIDTH); // 2d block
int blocks = ( SIZE + TILE_WIDTH - 1 ) / TILE_WIDTH;
dim3 blocksPerGrid(blocks,blocks); // 2d grid
// invoke the kernel here
MatrixMulKernel<<< blocksPerGrid, threadsPerBlock >>>(d_A,d_B,d_C);
// copy results back to host
hipMemcpy(h_C,d_C,size,hipMemcpyDeviceToHost);
// Free up device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15MatrixMulKernelPfS_S_
.globl _Z15MatrixMulKernelPfS_S_
.p2align 8
.type _Z15MatrixMulKernelPfS_S_,@function
_Z15MatrixMulKernelPfS_S_:
s_load_b32 s4, s[0:1], 0x18
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_lshl_b32 s2, s15, 13
s_lshl_b32 s3, s14, 4
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s4, 1
s_cbranch_scc1 .LBB0_5
s_load_b128 s[8:11], s[0:1], 0x0
v_lshlrev_b32_e32 v2, 9, v1
v_lshlrev_b32_e32 v5, 2, v0
v_lshlrev_b32_e32 v3, 6, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_lshl_u32 v8, v2, v0, 2
v_mov_b32_e32 v2, 0
v_add_nc_u32_e32 v4, v3, v5
v_add_nc_u32_e32 v5, 0x400, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v10, v5, v3
s_waitcnt lgkmcnt(0)
v_add_co_u32 v6, s5, s8, v8
v_add_co_ci_u32_e64 v7, null, s9, 0, s5
v_add_co_u32 v8, s5, s10, v8
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v9, null, s11, 0, s5
s_mov_b32 s5, 0
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_2:
s_lshl_b32 s6, s5, 4
s_lshl_b32 s7, s5, 13
s_add_i32 s6, s6, s2
s_add_i32 s8, s7, s3
s_ashr_i32 s7, s6, 31
s_ashr_i32 s9, s8, 31
s_lshl_b64 s[6:7], s[6:7], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_add_co_u32 v11, vcc_lo, v6, s6
v_add_co_ci_u32_e32 v12, vcc_lo, s7, v7, vcc_lo
s_lshl_b64 s[6:7], s[8:9], 2
v_add_co_u32 v13, vcc_lo, v8, s6
v_add_co_ci_u32_e32 v14, vcc_lo, s7, v9, vcc_lo
global_load_b32 v12, v[11:12], off
global_load_b32 v13, v[13:14], off
v_mov_b32_e32 v11, v5
s_mov_b32 s6, 0
s_waitcnt vmcnt(1)
ds_store_b32 v4, v12
s_waitcnt vmcnt(0)
ds_store_b32 v10, v13
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_3:
v_add_nc_u32_e32 v12, s6, v3
s_add_i32 s6, s6, 4
ds_load_b32 v13, v11
ds_load_b32 v12, v12
v_add_nc_u32_e32 v11, 64, v11
s_cmp_lg_u32 s6, 64
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v2, v12, v13
s_cbranch_scc1 .LBB0_3
s_add_i32 s5, s5, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lg_u32 s5, s4
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_2
s_branch .LBB0_6
.LBB0_5:
v_mov_b32_e32 v2, 0
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_load_b64 s[0:1], s[0:1], 0x10
v_lshlrev_b32_e32 v1, 9, v1
s_add_i32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s3, s2, 31
s_lshl_b64 s[2:3], s[2:3], 2
s_delay_alu instid0(VALU_DEP_1)
v_add_lshl_u32 v0, v1, v0, 2
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v2, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15MatrixMulKernelPfS_S_
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15MatrixMulKernelPfS_S_, .Lfunc_end0-_Z15MatrixMulKernelPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15MatrixMulKernelPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15MatrixMulKernelPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // A shared memory matrix multiplication program
#include <hip/hip_runtime.h>
#include "stdio.h"
#include "stdlib.h"
// Keep the SIZE evenly divisible by TILE_WIDTH
#define SIZE 512
#define TILE_WIDTH 16
// kernels that are called by another kernel use the __device__ identifier
__device__ float * GetSubMatrix(float * large_matrix, int row, int col)
{
// this returns the address of the first element in a sub-matrix
// when using this sub-matrix you have to jump ahead SIZE elements per row
float * subMatrix = &large_matrix[ row * SIZE * TILE_WIDTH + col * TILE_WIDTH ];
return subMatrix;
}
// matrix multiplication kernel definition
__global__ void MatrixMulKernel(float * A,float * B,float * C)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
// Here we call another kernel from within a kernel.
// Note that we do not need to pass any grid or block
// information between the <<< >>> symbols.
float * Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes on element of Csub
// by accumulating results into Csub
float Cvalue = 0.0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Iterate through n sub_matrices, where n depends on the size of the tile and matrix
int sub_matrices_per_row = gridDim.x;
int i;
for ( i = 0; i < sub_matrices_per_row; i++ )
{
// Get sub-matrices
// Block threads will work on these sub-matrices
float * Asub = GetSubMatrix(A, blockRow, i);
float * Bsub = GetSubMatrix(B, i, blockCol);
// Shared memory used to store Asub and Bsub
__shared__ float As[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bs[TILE_WIDTH][TILE_WIDTH];
// Load Asub and Bsub from device memory into shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = Asub[ row*SIZE + col ];
Bs[row][col] = Bsub[ row*SIZE + col ];
// Sync threads to make sure sub-matrices are completely loaded into shared memory
// Remember, this only syncs threads within a block
__syncthreads();
int j;
// Multiply Asub and Bsub together, using fast shared memory for data access
for ( j = 0; j < TILE_WIDTH ; j++ )
Cvalue += As[row][j] * Bs[j][col];
// Sync threads to ensure the preceding computation is done before loading
// two new sub-matrices of A and B in the next iteration
__syncthreads();
}
Csub[ row*SIZE + col ] = Cvalue;
}
int main(int argc, char ** argv)
{
float h_A[SIZE*SIZE],h_B[SIZE*SIZE],h_C[SIZE*SIZE];
float * d_A, * d_B, * d_C;
// initialize host matrices with arbitrary data
int i;
for (i=0;i<SIZE*SIZE;i++) {
h_A[i] = (float)i;
h_B[i] = (float)SIZE * (float)SIZE - (float)i - 1.00;
h_C[i] = 0.0;
}
// allocate space on device
size_t size = SIZE*SIZE*sizeof(float);
hipMalloc(&d_A,size);
hipMalloc(&d_B,size);
hipMalloc(&d_C,size);
//copy data to device
hipMemcpy(d_A,h_A,size,hipMemcpyHostToDevice);
hipMemcpy(d_B,h_B,size,hipMemcpyHostToDevice);
hipMemcpy(d_C,h_C,size,hipMemcpyHostToDevice);
dim3 threadsPerBlock(TILE_WIDTH,TILE_WIDTH); // 2d block
int blocks = ( SIZE + TILE_WIDTH - 1 ) / TILE_WIDTH;
dim3 blocksPerGrid(blocks,blocks); // 2d grid
// invoke the kernel here
MatrixMulKernel<<< blocksPerGrid, threadsPerBlock >>>(d_A,d_B,d_C);
// copy results back to host
hipMemcpy(h_C,d_C,size,hipMemcpyDeviceToHost);
// Free up device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
} | .text
.file "mat_multiply.hip"
.globl _Z30__device_stub__MatrixMulKernelPfS_S_ # -- Begin function _Z30__device_stub__MatrixMulKernelPfS_S_
.p2align 4, 0x90
.type _Z30__device_stub__MatrixMulKernelPfS_S_,@function
_Z30__device_stub__MatrixMulKernelPfS_S_: # @_Z30__device_stub__MatrixMulKernelPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15MatrixMulKernelPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z30__device_stub__MatrixMulKernelPfS_S_, .Lfunc_end0-_Z30__device_stub__MatrixMulKernelPfS_S_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x48800000 # float 262144
.LCPI1_1:
.long 0xbf800000 # float -1
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $3145856, %rsp # imm = 0x300080
.cfi_def_cfa_offset 3145872
.cfi_offset %rbx, -16
leaq 128(%rsp), %rdi
xorl %ebx, %ebx
movl $1048576, %edx # imm = 0x100000
xorl %esi, %esi
callq memset@PLT
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm2, %xmm2
cvtsi2ss %ebx, %xmm2
movss %xmm2, 2097280(%rsp,%rbx,4)
movaps %xmm0, %xmm3
subss %xmm2, %xmm3
addss %xmm1, %xmm3
movss %xmm3, 1048704(%rsp,%rbx,4)
incq %rbx
cmpq $262144, %rbx # imm = 0x40000
jne .LBB1_1
# %bb.2:
leaq 16(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
callq hipMalloc
movq %rsp, %rdi
movl $1048576, %esi # imm = 0x100000
callq hipMalloc
movq 16(%rsp), %rdi
leaq 2097280(%rsp), %rsi
movl $1048576, %edx # imm = 0x100000
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 1048704(%rsp), %rsi
movl $1048576, %edx # imm = 0x100000
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
leaq 128(%rsp), %rsi
movl $1048576, %edx # imm = 0x100000
movl $1, %ecx
callq hipMemcpy
movabsq $137438953504, %rdi # imm = 0x2000000020
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15MatrixMulKernelPfS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
leaq 128(%rsp), %rdi
movl $1048576, %edx # imm = 0x100000
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $3145856, %rsp # imm = 0x300080
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15MatrixMulKernelPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15MatrixMulKernelPfS_S_,@object # @_Z15MatrixMulKernelPfS_S_
.section .rodata,"a",@progbits
.globl _Z15MatrixMulKernelPfS_S_
.p2align 3, 0x0
_Z15MatrixMulKernelPfS_S_:
.quad _Z30__device_stub__MatrixMulKernelPfS_S_
.size _Z15MatrixMulKernelPfS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15MatrixMulKernelPfS_S_"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__MatrixMulKernelPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15MatrixMulKernelPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0019743a_00000000-6_mat_multiply.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12GetSubMatrixPfii
.type _Z12GetSubMatrixPfii, @function
_Z12GetSubMatrixPfii:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z12GetSubMatrixPfii, .-_Z12GetSubMatrixPfii
.globl _Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_
.type _Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_, @function
_Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z15MatrixMulKernelPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_, .-_Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_
.globl _Z15MatrixMulKernelPfS_S_
.type _Z15MatrixMulKernelPfS_S_, @function
_Z15MatrixMulKernelPfS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z15MatrixMulKernelPfS_S_, .-_Z15MatrixMulKernelPfS_S_
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
leaq -3145728(%rsp), %r11
.cfi_def_cfa 11, 3145736
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $72, %rsp
.cfi_def_cfa_offset 3145808
movq %fs:40, %rax
movq %rax, 3145784(%rsp)
xorl %eax, %eax
movss .LC0(%rip), %xmm3
movss .LC1(%rip), %xmm2
.L14:
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
movss %xmm1, 48(%rsp,%rax,4)
movaps %xmm3, %xmm0
subss %xmm1, %xmm0
subss %xmm2, %xmm0
movss %xmm0, 1048624(%rsp,%rax,4)
movl $0x00000000, 2097200(%rsp,%rax,4)
addq $1, %rax
cmpq $262144, %rax
jne .L14
movq %rsp, %rdi
movl $1048576, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $1048576, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $1048576, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $1048576, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 1048624(%rsp), %rsi
movl $1, %ecx
movl $1048576, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 2097200(%rsp), %rsi
movl $1, %ecx
movl $1048576, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $16, 24(%rsp)
movl $16, 28(%rsp)
movl $1, 32(%rsp)
movl $32, 36(%rsp)
movl $32, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
leaq 2097200(%rsp), %rdi
movl $2, %ecx
movl $1048576, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 3145784(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $3145800, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z39__device_stub__Z15MatrixMulKernelPfS_S_PfS_S_
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "_Z15MatrixMulKernelPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z15MatrixMulKernelPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1216348160
.align 4
.LC1:
.long 1065353216
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "mat_multiply.hip"
.globl _Z30__device_stub__MatrixMulKernelPfS_S_ # -- Begin function _Z30__device_stub__MatrixMulKernelPfS_S_
.p2align 4, 0x90
.type _Z30__device_stub__MatrixMulKernelPfS_S_,@function
_Z30__device_stub__MatrixMulKernelPfS_S_: # @_Z30__device_stub__MatrixMulKernelPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15MatrixMulKernelPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z30__device_stub__MatrixMulKernelPfS_S_, .Lfunc_end0-_Z30__device_stub__MatrixMulKernelPfS_S_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x48800000 # float 262144
.LCPI1_1:
.long 0xbf800000 # float -1
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $3145856, %rsp # imm = 0x300080
.cfi_def_cfa_offset 3145872
.cfi_offset %rbx, -16
leaq 128(%rsp), %rdi
xorl %ebx, %ebx
movl $1048576, %edx # imm = 0x100000
xorl %esi, %esi
callq memset@PLT
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm2, %xmm2
cvtsi2ss %ebx, %xmm2
movss %xmm2, 2097280(%rsp,%rbx,4)
movaps %xmm0, %xmm3
subss %xmm2, %xmm3
addss %xmm1, %xmm3
movss %xmm3, 1048704(%rsp,%rbx,4)
incq %rbx
cmpq $262144, %rbx # imm = 0x40000
jne .LBB1_1
# %bb.2:
leaq 16(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
callq hipMalloc
movq %rsp, %rdi
movl $1048576, %esi # imm = 0x100000
callq hipMalloc
movq 16(%rsp), %rdi
leaq 2097280(%rsp), %rsi
movl $1048576, %edx # imm = 0x100000
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 1048704(%rsp), %rsi
movl $1048576, %edx # imm = 0x100000
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
leaq 128(%rsp), %rsi
movl $1048576, %edx # imm = 0x100000
movl $1, %ecx
callq hipMemcpy
movabsq $137438953504, %rdi # imm = 0x2000000020
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15MatrixMulKernelPfS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
leaq 128(%rsp), %rdi
movl $1048576, %edx # imm = 0x100000
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $3145856, %rsp # imm = 0x300080
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15MatrixMulKernelPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15MatrixMulKernelPfS_S_,@object # @_Z15MatrixMulKernelPfS_S_
.section .rodata,"a",@progbits
.globl _Z15MatrixMulKernelPfS_S_
.p2align 3, 0x0
_Z15MatrixMulKernelPfS_S_:
.quad _Z30__device_stub__MatrixMulKernelPfS_S_
.size _Z15MatrixMulKernelPfS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15MatrixMulKernelPfS_S_"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__MatrixMulKernelPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15MatrixMulKernelPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void _setPrecisionKernel(float* data, size_t size, int* precision)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= size) return;
int prec = precision[idx];
int mul = 1;
while(prec--) mul *= 10;
data[idx] = (float)(int)(data[idx]*mul);
data[idx] /= mul;
} | code for sm_80
Function : _Z19_setPrecisionKernelPfmPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */
/* 0x000fc80003f06070 */
/*0050*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x16c], PT, P0 ; /* 0x00005b00ff007a0c */
/* 0x000fda0003f06100 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ IMAD.SHL.U32 R4, R0, 0x4, RZ ; /* 0x0000000400047824 */
/* 0x000fe200078e00ff */
/*0080*/ SHF.R.U32.HI R5, RZ, 0x1e, R0 ; /* 0x0000001eff057819 */
/* 0x000fe20000011600 */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00a0*/ IADD3 R2, P0, R4, c[0x0][0x170], RZ ; /* 0x00005c0004027a10 */
/* 0x000fc80007f1e0ff */
/*00b0*/ IADD3.X R3, R5, c[0x0][0x174], RZ, P0, !PT ; /* 0x00005d0005037a10 */
/* 0x000fca00007fe4ff */
/*00c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00d0*/ BSSY B0, 0x3a0 ; /* 0x000002c000007945 */
/* 0x000fe20003800000 */
/*00e0*/ IMAD.MOV.U32 R0, RZ, RZ, 0x3f800000 ; /* 0x3f800000ff007424 */
/* 0x000fe200078e00ff */
/*00f0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x004fda0003f05270 */
/*0100*/ @!P0 BRA 0x390 ; /* 0x0000028000008947 */
/* 0x000fea0003800000 */
/*0110*/ IADD3 R0, R2.reuse, -0x1, RZ ; /* 0xffffffff02007810 */
/* 0x040fe20007ffe0ff */
/*0120*/ BSSY B1, 0x300 ; /* 0x000001d000017945 */
/* 0x000fe20003800000 */
/*0130*/ LOP3.LUT R3, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302037812 */
/* 0x000fe400078ec0ff */
/*0140*/ ISETP.GE.U32.AND P0, PT, R0, 0x3, PT ; /* 0x000000030000780c */
/* 0x000fe20003f06070 */
/*0150*/ IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff007424 */
/* 0x000fd800078e00ff */
/*0160*/ @!P0 BRA 0x2f0 ; /* 0x0000018000008947 */
/* 0x000fea0003800000 */
/*0170*/ IMAD.IADD R2, R2, 0x1, -R3 ; /* 0x0000000102027824 */
/* 0x000fe400078e0a03 */
/*0180*/ IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff007424 */
/* 0x000fc600078e00ff */
/*0190*/ ISETP.GT.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fda0003f04270 */
/*01a0*/ @!P0 BRA 0x2b0 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*01b0*/ ISETP.GT.AND P1, PT, R2, 0xc, PT ; /* 0x0000000c0200780c */
/* 0x000fe20003f24270 */
/*01c0*/ BSSY B2, 0x250 ; /* 0x0000008000027945 */
/* 0x000fe20003800000 */
/*01d0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01e0*/ @!P1 BRA 0x240 ; /* 0x0000005000009947 */
/* 0x000fea0003800000 */
/*01f0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0200*/ IADD3 R2, R2, -0x10, RZ ; /* 0xfffffff002027810 */
/* 0x000fe20007ffe0ff */
/*0210*/ IMAD R0, R0, 0x6fc10000, RZ ; /* 0x6fc1000000007824 */
/* 0x000fc600078e02ff */
/*0220*/ ISETP.GT.AND P1, PT, R2, 0xc, PT ; /* 0x0000000c0200780c */
/* 0x000fda0003f24270 */
/*0230*/ @P1 BRA 0x200 ; /* 0xffffffc000001947 */
/* 0x000fea000383ffff */
/*0240*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0250*/ ISETP.GT.AND P1, PT, R2, 0x4, PT ; /* 0x000000040200780c */
/* 0x000fda0003f24270 */
/*0260*/ @P1 PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000181c */
/* 0x000fe20003f0e170 */
/*0270*/ @P1 IMAD R0, R0, 0x5f5e100, RZ ; /* 0x05f5e10000001824 */
/* 0x000fe200078e02ff */
/*0280*/ @P1 IADD3 R2, R2, -0x8, RZ ; /* 0xfffffff802021810 */
/* 0x000fd60007ffe0ff */
/*0290*/ ISETP.NE.OR P0, PT, R2, RZ, P0 ; /* 0x000000ff0200720c */
/* 0x000fda0000705670 */
/*02a0*/ @!P0 BRA 0x2f0 ; /* 0x0000004000008947 */
/* 0x000fea0003800000 */
/*02b0*/ IADD3 R2, R2, -0x4, RZ ; /* 0xfffffffc02027810 */
/* 0x000fe20007ffe0ff */
/*02c0*/ IMAD R0, R0, 0x2710, RZ ; /* 0x0000271000007824 */
/* 0x000fc600078e02ff */
/*02d0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fda0003f05270 */
/*02e0*/ @P0 BRA 0x2b0 ; /* 0xffffffc000000947 */
/* 0x000fea000383ffff */
/*02f0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0300*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fe20003f05270 */
/*0310*/ BSSY B1, 0x380 ; /* 0x0000006000017945 */
/* 0x000fd80003800000 */
/*0320*/ @!P0 BRA 0x370 ; /* 0x0000004000008947 */
/* 0x000fea0003800000 */
/*0330*/ IADD3 R3, R3, -0x1, RZ ; /* 0xffffffff03037810 */
/* 0x000fe20007ffe0ff */
/*0340*/ IMAD R0, R0, 0xa, RZ ; /* 0x0000000a00007824 */
/* 0x000fc600078e02ff */
/*0350*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f05270 */
/*0360*/ @P0 BRA 0x330 ; /* 0xffffffc000000947 */
/* 0x000fea000383ffff */
/*0370*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0380*/ I2F R0, R0 ; /* 0x0000000000007306 */
/* 0x000e240000201400 */
/*0390*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*03a0*/ IADD3 R2, P0, R4, c[0x0][0x160], RZ ; /* 0x0000580004027a10 */
/* 0x000fc80007f1e0ff */
/*03b0*/ IADD3.X R3, R5, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590005037a10 */
/* 0x000fca00007fe4ff */
/*03c0*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000ea2000c1e1900 */
/*03d0*/ MUFU.RCP R7, R0 ; /* 0x0000000000077308 */
/* 0x001e220000001000 */
/*03e0*/ BSSY B0, 0x4d0 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*03f0*/ FFMA R4, R7, -R0, 1 ; /* 0x3f80000007047423 */
/* 0x001fc80000000800 */
/*0400*/ FFMA R4, R7, R4, R7 ; /* 0x0000000407047223 */
/* 0x000fe40000000007 */
/*0410*/ FMUL R5, R5, R0 ; /* 0x0000000005057220 */
/* 0x004fcc0000400000 */
/*0420*/ F2I.TRUNC.NTZ R5, R5 ; /* 0x0000000500057305 */
/* 0x000e30000020f100 */
/*0430*/ I2F R9, R5 ; /* 0x0000000500097306 */
/* 0x001e300000201400 */
/*0440*/ FCHK P0, R9, R0 ; /* 0x0000000009007302 */
/* 0x001e220000000000 */
/*0450*/ FFMA R7, R9, R4, RZ ; /* 0x0000000409077223 */
/* 0x000fc800000000ff */
/*0460*/ FFMA R6, R7, -R0, R9 ; /* 0x8000000007067223 */
/* 0x000fc80000000009 */
/*0470*/ FFMA R7, R4, R6, R7 ; /* 0x0000000604077223 */
/* 0x000fe20000000007 */
/*0480*/ @!P0 BRA 0x4c0 ; /* 0x0000003000008947 */
/* 0x001fea0003800000 */
/*0490*/ IMAD.MOV.U32 R6, RZ, RZ, R0 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0000 */
/*04a0*/ MOV R0, 0x4c0 ; /* 0x000004c000007802 */
/* 0x000fe40000000f00 */
/*04b0*/ CALL.REL.NOINC 0x4f0 ; /* 0x0000003000007944 */
/* 0x000fea0003c00000 */
/*04c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*04d0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101904 */
/*04e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*04f0*/ SHF.R.U32.HI R5, RZ, 0x17, R6 ; /* 0x00000017ff057819 */
/* 0x000fe20000011606 */
/*0500*/ BSSY B1, 0xb50 ; /* 0x0000064000017945 */
/* 0x000fe20003800000 */
/*0510*/ SHF.R.U32.HI R4, RZ, 0x17, R9.reuse ; /* 0x00000017ff047819 */
/* 0x100fe40000011609 */
/*0520*/ LOP3.LUT R13, R5, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff050d7812 */
/* 0x000fe200078ec0ff */
/*0530*/ IMAD.MOV.U32 R5, RZ, RZ, R9 ; /* 0x000000ffff057224 */
/* 0x000fe200078e0009 */
/*0540*/ LOP3.LUT R10, R4, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff040a7812 */
/* 0x000fe400078ec0ff */
/*0550*/ IADD3 R11, R13, -0x1, RZ ; /* 0xffffffff0d0b7810 */
/* 0x000fc40007ffe0ff */
/*0560*/ IADD3 R8, R10, -0x1, RZ ; /* 0xffffffff0a087810 */
/* 0x000fe40007ffe0ff */
/*0570*/ ISETP.GT.U32.AND P0, PT, R11, 0xfd, PT ; /* 0x000000fd0b00780c */
/* 0x000fc80003f04070 */
/*0580*/ ISETP.GT.U32.OR P0, PT, R8, 0xfd, P0 ; /* 0x000000fd0800780c */
/* 0x000fda0000704470 */
/*0590*/ @!P0 IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff078224 */
/* 0x000fe200078e00ff */
/*05a0*/ @!P0 BRA 0x730 ; /* 0x0000018000008947 */
/* 0x000fea0003800000 */
/*05b0*/ FSETP.GTU.FTZ.AND P1, PT, |R6|, +INF , PT ; /* 0x7f8000000600780b */
/* 0x000fe20003f3c200 */
/*05c0*/ IMAD.MOV.U32 R4, RZ, RZ, R9 ; /* 0x000000ffff047224 */
/* 0x000fe200078e0009 */
/*05d0*/ FSETP.GTU.FTZ.AND P0, PT, |R9|, +INF , PT ; /* 0x7f8000000900780b */
/* 0x000fc80003f1c200 */
/*05e0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000703570 */
/*05f0*/ @P0 BRA 0xb30 ; /* 0x0000053000000947 */
/* 0x000fea0003800000 */
/*0600*/ LOP3.LUT P0, RZ, R6, 0x7fffffff, R5, 0xc8, !PT ; /* 0x7fffffff06ff7812 */
/* 0x000fda000780c805 */
/*0610*/ @!P0 BRA 0xb10 ; /* 0x000004f000008947 */
/* 0x000fea0003800000 */
/*0620*/ FSETP.NEU.FTZ.AND P2, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */
/* 0x000fe40003f5d200 */
/*0630*/ FSETP.NEU.FTZ.AND P1, PT, |R6|, +INF , PT ; /* 0x7f8000000600780b */
/* 0x000fe40003f3d200 */
/*0640*/ FSETP.NEU.FTZ.AND P0, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */
/* 0x000fd60003f1d200 */
/*0650*/ @!P1 BRA !P2, 0xb10 ; /* 0x000004b000009947 */
/* 0x000fea0005000000 */
/*0660*/ LOP3.LUT P2, RZ, R5, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff05ff7812 */
/* 0x000fc8000784c0ff */
/*0670*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000f24572 */
/*0680*/ @P1 BRA 0xaf0 ; /* 0x0000046000001947 */
/* 0x000fea0003800000 */
/*0690*/ LOP3.LUT P1, RZ, R6, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff06ff7812 */
/* 0x000fc8000782c0ff */
/*06a0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000702572 */
/*06b0*/ @P0 BRA 0xac0 ; /* 0x0000040000000947 */
/* 0x000fea0003800000 */
/*06c0*/ ISETP.GE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f06270 */
/*06d0*/ ISETP.GE.AND P1, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x000fd60003f26270 */
/*06e0*/ @P0 IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff070224 */
/* 0x000fe400078e00ff */
/*06f0*/ @!P0 IMAD.MOV.U32 R7, RZ, RZ, -0x40 ; /* 0xffffffc0ff078424 */
/* 0x000fe400078e00ff */
/*0700*/ @!P0 FFMA R5, R4, 1.84467440737095516160e+19, RZ ; /* 0x5f80000004058823 */
/* 0x000fe400000000ff */
/*0710*/ @!P1 FFMA R6, R6, 1.84467440737095516160e+19, RZ ; /* 0x5f80000006069823 */
/* 0x000fe200000000ff */
/*0720*/ @!P1 IADD3 R7, R7, 0x40, RZ ; /* 0x0000004007079810 */
/* 0x000fe40007ffe0ff */
/*0730*/ LEA R9, R13, 0xc0800000, 0x17 ; /* 0xc08000000d097811 */
/* 0x000fe200078eb8ff */
/*0740*/ BSSY B2, 0xab0 ; /* 0x0000036000027945 */
/* 0x000fe80003800000 */
/*0750*/ IMAD.IADD R9, R6, 0x1, -R9 ; /* 0x0000000106097824 */
/* 0x000fe200078e0a09 */
/*0760*/ IADD3 R6, R10, -0x7f, RZ ; /* 0xffffff810a067810 */
/* 0x000fc60007ffe0ff */
/*0770*/ MUFU.RCP R4, R9 ; /* 0x0000000900047308 */
/* 0x000e220000001000 */
/*0780*/ FADD.FTZ R8, -R9, -RZ ; /* 0x800000ff09087221 */
/* 0x000fe40000010100 */
/*0790*/ IMAD R5, R6.reuse, -0x800000, R5 ; /* 0xff80000006057824 */
/* 0x040fe200078e0205 */
/*07a0*/ IADD3 R6, R6, 0x7f, -R13 ; /* 0x0000007f06067810 */
/* 0x000fca0007ffe80d */
/*07b0*/ IMAD.IADD R6, R6, 0x1, R7 ; /* 0x0000000106067824 */
/* 0x000fe400078e0207 */
/*07c0*/ FFMA R11, R4, R8, 1 ; /* 0x3f800000040b7423 */
/* 0x001fc80000000008 */
/*07d0*/ FFMA R10, R4, R11, R4 ; /* 0x0000000b040a7223 */
/* 0x000fc80000000004 */
/*07e0*/ FFMA R4, R5, R10, RZ ; /* 0x0000000a05047223 */
/* 0x000fc800000000ff */
/*07f0*/ FFMA R11, R8, R4, R5 ; /* 0x00000004080b7223 */
/* 0x000fc80000000005 */
/*0800*/ FFMA R11, R10, R11, R4 ; /* 0x0000000b0a0b7223 */
/* 0x000fc80000000004 */
/*0810*/ FFMA R8, R8, R11, R5 ; /* 0x0000000b08087223 */
/* 0x000fc80000000005 */
/*0820*/ FFMA R4, R10, R8, R11 ; /* 0x000000080a047223 */
/* 0x000fca000000000b */
/*0830*/ SHF.R.U32.HI R5, RZ, 0x17, R4 ; /* 0x00000017ff057819 */
/* 0x000fc80000011604 */
/*0840*/ LOP3.LUT R5, R5, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff05057812 */
/* 0x000fca00078ec0ff */
/*0850*/ IMAD.IADD R9, R5, 0x1, R6 ; /* 0x0000000105097824 */
/* 0x000fca00078e0206 */
/*0860*/ IADD3 R5, R9, -0x1, RZ ; /* 0xffffffff09057810 */
/* 0x000fc80007ffe0ff */
/*0870*/ ISETP.GE.U32.AND P0, PT, R5, 0xfe, PT ; /* 0x000000fe0500780c */
/* 0x000fda0003f06070 */
/*0880*/ @!P0 BRA 0xa90 ; /* 0x0000020000008947 */
/* 0x000fea0003800000 */
/*0890*/ ISETP.GT.AND P0, PT, R9, 0xfe, PT ; /* 0x000000fe0900780c */
/* 0x000fda0003f04270 */
/*08a0*/ @P0 BRA 0xa60 ; /* 0x000001b000000947 */
/* 0x000fea0003800000 */
/*08b0*/ ISETP.GE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */
/* 0x000fda0003f06270 */
/*08c0*/ @P0 BRA 0xaa0 ; /* 0x000001d000000947 */
/* 0x000fea0003800000 */
/*08d0*/ ISETP.GE.AND P0, PT, R9, -0x18, PT ; /* 0xffffffe80900780c */
/* 0x000fe40003f06270 */
/*08e0*/ LOP3.LUT R4, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000004047812 */
/* 0x000fd600078ec0ff */
/*08f0*/ @!P0 BRA 0xaa0 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0900*/ FFMA.RZ R5, R10.reuse, R8.reuse, R11.reuse ; /* 0x000000080a057223 */
/* 0x1c0fe2000000c00b */
/*0910*/ ISETP.NE.AND P2, PT, R9.reuse, RZ, PT ; /* 0x000000ff0900720c */
/* 0x040fe20003f45270 */
/*0920*/ FFMA.RM R6, R10.reuse, R8.reuse, R11.reuse ; /* 0x000000080a067223 */
/* 0x1c0fe2000000400b */
/*0930*/ ISETP.NE.AND P1, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe40003f25270 */
/*0940*/ LOP3.LUT R7, R5, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff05077812 */
/* 0x000fe200078ec0ff */
/*0950*/ FFMA.RP R5, R10, R8, R11 ; /* 0x000000080a057223 */
/* 0x000fe2000000800b */
/*0960*/ IADD3 R8, R9, 0x20, RZ ; /* 0x0000002009087810 */
/* 0x000fe20007ffe0ff */
/*0970*/ IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0a09 */
/*0980*/ LOP3.LUT R7, R7, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000007077812 */
/* 0x000fe400078efcff */
/*0990*/ FSETP.NEU.FTZ.AND P0, PT, R5, R6, PT ; /* 0x000000060500720b */
/* 0x000fc40003f1d000 */
/*09a0*/ SHF.L.U32 R8, R7, R8, RZ ; /* 0x0000000807087219 */
/* 0x000fe400000006ff */
/*09b0*/ SEL R6, R9, RZ, P2 ; /* 0x000000ff09067207 */
/* 0x000fe40001000000 */
/*09c0*/ ISETP.NE.AND P1, PT, R8, RZ, P1 ; /* 0x000000ff0800720c */
/* 0x000fe40000f25270 */
/*09d0*/ SHF.R.U32.HI R6, RZ, R6, R7 ; /* 0x00000006ff067219 */
/* 0x000fe40000011607 */
/*09e0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40000703570 */
/*09f0*/ SHF.R.U32.HI R8, RZ, 0x1, R6 ; /* 0x00000001ff087819 */
/* 0x000fc40000011606 */
/*0a00*/ SEL R5, RZ, 0x1, !P0 ; /* 0x00000001ff057807 */
/* 0x000fc80004000000 */
/*0a10*/ LOP3.LUT R5, R5, 0x1, R8, 0xf8, !PT ; /* 0x0000000105057812 */
/* 0x000fc800078ef808 */
/*0a20*/ LOP3.LUT R5, R5, R6, RZ, 0xc0, !PT ; /* 0x0000000605057212 */
/* 0x000fca00078ec0ff */
/*0a30*/ IMAD.IADD R5, R8, 0x1, R5 ; /* 0x0000000108057824 */
/* 0x000fca00078e0205 */
/*0a40*/ LOP3.LUT R4, R5, R4, RZ, 0xfc, !PT ; /* 0x0000000405047212 */
/* 0x000fe200078efcff */
/*0a50*/ BRA 0xaa0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0a60*/ LOP3.LUT R4, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000004047812 */
/* 0x000fc800078ec0ff */
/*0a70*/ LOP3.LUT R4, R4, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000004047812 */
/* 0x000fe200078efcff */
/*0a80*/ BRA 0xaa0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0a90*/ IMAD R4, R6, 0x800000, R4 ; /* 0x0080000006047824 */
/* 0x000fe400078e0204 */
/*0aa0*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0ab0*/ BRA 0xb40 ; /* 0x0000008000007947 */
/* 0x000fea0003800000 */
/*0ac0*/ LOP3.LUT R4, R6, 0x80000000, R5, 0x48, !PT ; /* 0x8000000006047812 */
/* 0x000fc800078e4805 */
/*0ad0*/ LOP3.LUT R4, R4, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000004047812 */
/* 0x000fe200078efcff */
/*0ae0*/ BRA 0xb40 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0af0*/ LOP3.LUT R4, R6, 0x80000000, R5, 0x48, !PT ; /* 0x8000000006047812 */
/* 0x000fe200078e4805 */
/*0b00*/ BRA 0xb40 ; /* 0x0000003000007947 */
/* 0x000fea0003800000 */
/*0b10*/ MUFU.RSQ R4, -QNAN ; /* 0xffc0000000047908 */
/* 0x000e220000001400 */
/*0b20*/ BRA 0xb40 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0b30*/ FADD.FTZ R4, R4, R6 ; /* 0x0000000604047221 */
/* 0x000fe40000010000 */
/*0b40*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0b50*/ IMAD.MOV.U32 R7, RZ, RZ, R4 ; /* 0x000000ffff077224 */
/* 0x001fe400078e0004 */
/*0b60*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0000 */
/*0b70*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */
/* 0x000fc800078e00ff */
/*0b80*/ RET.REL.NODEC R4 0x0 ; /* 0xfffff47004007950 */
/* 0x000fea0003c3ffff */
/*0b90*/ BRA 0xb90; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0ba0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0be0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void _setPrecisionKernel(float* data, size_t size, int* precision)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= size) return;
int prec = precision[idx];
int mul = 1;
while(prec--) mul *= 10;
data[idx] = (float)(int)(data[idx]*mul);
data[idx] /= mul;
} | .file "tmpxft_0016fd16_00000000-6__setPrecisionKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z19_setPrecisionKernelPfmPiPfmPi
.type _Z42__device_stub__Z19_setPrecisionKernelPfmPiPfmPi, @function
_Z42__device_stub__Z19_setPrecisionKernelPfmPiPfmPi:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19_setPrecisionKernelPfmPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z42__device_stub__Z19_setPrecisionKernelPfmPiPfmPi, .-_Z42__device_stub__Z19_setPrecisionKernelPfmPiPfmPi
.globl _Z19_setPrecisionKernelPfmPi
.type _Z19_setPrecisionKernelPfmPi, @function
_Z19_setPrecisionKernelPfmPi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z19_setPrecisionKernelPfmPiPfmPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z19_setPrecisionKernelPfmPi, .-_Z19_setPrecisionKernelPfmPi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z19_setPrecisionKernelPfmPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19_setPrecisionKernelPfmPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void _setPrecisionKernel(float* data, size_t size, int* precision)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= size) return;
int prec = precision[idx];
int mul = 1;
while(prec--) mul *= 10;
data[idx] = (float)(int)(data[idx]*mul);
data[idx] /= mul;
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void _setPrecisionKernel(float* data, size_t size, int* precision)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= size) return;
int prec = precision[idx];
int mul = 1;
while(prec--) mul *= 10;
data[idx] = (float)(int)(data[idx]*mul);
data[idx] /= mul;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void _setPrecisionKernel(float* data, size_t size, int* precision)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= size) return;
int prec = precision[idx];
int mul = 1;
while(prec--) mul *= 10;
data[idx] = (float)(int)(data[idx]*mul);
data[idx] /= mul;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19_setPrecisionKernelPfmPi
.globl _Z19_setPrecisionKernelPfmPi
.p2align 8
.type _Z19_setPrecisionKernelPfmPi,@function
_Z19_setPrecisionKernelPfmPi:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_6
s_load_b64 s[2:3], s[0:1], 0x10
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_mov_b32_e32 v0, 1.0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
s_mov_b32 s3, 0
s_mov_b32 s2, exec_lo
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e32 0, v3
s_cbranch_execz .LBB0_5
s_mov_b32 s4, 1
.LBB0_3:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s4, s4, 10
v_dual_mov_b32 v0, s4 :: v_dual_add_nc_u32 v3, -1, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_eq_u32_e32 vcc_lo, 0, v3
s_or_b32 s3, vcc_lo, s3
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_3
s_or_b32 exec_lo, exec_lo, s3
v_cvt_f32_i32_e32 v0, v0
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s2
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_load_b32 v3, v[1:2], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v3, v3, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f32_e32 v3, v3
v_cvt_f32_i32_e32 v3, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f32 v4, null, v0, v0, v3
v_rcp_f32_e32 v5, v4
s_waitcnt_depctr 0xfff
v_fma_f32 v6, -v4, v5, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v5, v6, v5
v_div_scale_f32 v6, vcc_lo, v3, v0, v3
v_mul_f32_e32 v7, v6, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v8, -v4, v7, v6
v_fmac_f32_e32 v7, v8, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v4, -v4, v7, v6
v_div_fmas_f32 v4, v4, v5, v7
s_delay_alu instid0(VALU_DEP_1)
v_div_fixup_f32 v0, v4, v0, v3
global_store_b32 v[1:2], v0, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19_setPrecisionKernelPfmPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19_setPrecisionKernelPfmPi, .Lfunc_end0-_Z19_setPrecisionKernelPfmPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19_setPrecisionKernelPfmPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19_setPrecisionKernelPfmPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void _setPrecisionKernel(float* data, size_t size, int* precision)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= size) return;
int prec = precision[idx];
int mul = 1;
while(prec--) mul *= 10;
data[idx] = (float)(int)(data[idx]*mul);
data[idx] /= mul;
} | .text
.file "_setPrecisionKernel.hip"
.globl _Z34__device_stub___setPrecisionKernelPfmPi # -- Begin function _Z34__device_stub___setPrecisionKernelPfmPi
.p2align 4, 0x90
.type _Z34__device_stub___setPrecisionKernelPfmPi,@function
_Z34__device_stub___setPrecisionKernelPfmPi: # @_Z34__device_stub___setPrecisionKernelPfmPi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19_setPrecisionKernelPfmPi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z34__device_stub___setPrecisionKernelPfmPi, .Lfunc_end0-_Z34__device_stub___setPrecisionKernelPfmPi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19_setPrecisionKernelPfmPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19_setPrecisionKernelPfmPi,@object # @_Z19_setPrecisionKernelPfmPi
.section .rodata,"a",@progbits
.globl _Z19_setPrecisionKernelPfmPi
.p2align 3, 0x0
_Z19_setPrecisionKernelPfmPi:
.quad _Z34__device_stub___setPrecisionKernelPfmPi
.size _Z19_setPrecisionKernelPfmPi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19_setPrecisionKernelPfmPi"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub___setPrecisionKernelPfmPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19_setPrecisionKernelPfmPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z19_setPrecisionKernelPfmPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */
/* 0x000fc80003f06070 */
/*0050*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x16c], PT, P0 ; /* 0x00005b00ff007a0c */
/* 0x000fda0003f06100 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ IMAD.SHL.U32 R4, R0, 0x4, RZ ; /* 0x0000000400047824 */
/* 0x000fe200078e00ff */
/*0080*/ SHF.R.U32.HI R5, RZ, 0x1e, R0 ; /* 0x0000001eff057819 */
/* 0x000fe20000011600 */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00a0*/ IADD3 R2, P0, R4, c[0x0][0x170], RZ ; /* 0x00005c0004027a10 */
/* 0x000fc80007f1e0ff */
/*00b0*/ IADD3.X R3, R5, c[0x0][0x174], RZ, P0, !PT ; /* 0x00005d0005037a10 */
/* 0x000fca00007fe4ff */
/*00c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00d0*/ BSSY B0, 0x3a0 ; /* 0x000002c000007945 */
/* 0x000fe20003800000 */
/*00e0*/ IMAD.MOV.U32 R0, RZ, RZ, 0x3f800000 ; /* 0x3f800000ff007424 */
/* 0x000fe200078e00ff */
/*00f0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x004fda0003f05270 */
/*0100*/ @!P0 BRA 0x390 ; /* 0x0000028000008947 */
/* 0x000fea0003800000 */
/*0110*/ IADD3 R0, R2.reuse, -0x1, RZ ; /* 0xffffffff02007810 */
/* 0x040fe20007ffe0ff */
/*0120*/ BSSY B1, 0x300 ; /* 0x000001d000017945 */
/* 0x000fe20003800000 */
/*0130*/ LOP3.LUT R3, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302037812 */
/* 0x000fe400078ec0ff */
/*0140*/ ISETP.GE.U32.AND P0, PT, R0, 0x3, PT ; /* 0x000000030000780c */
/* 0x000fe20003f06070 */
/*0150*/ IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff007424 */
/* 0x000fd800078e00ff */
/*0160*/ @!P0 BRA 0x2f0 ; /* 0x0000018000008947 */
/* 0x000fea0003800000 */
/*0170*/ IMAD.IADD R2, R2, 0x1, -R3 ; /* 0x0000000102027824 */
/* 0x000fe400078e0a03 */
/*0180*/ IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff007424 */
/* 0x000fc600078e00ff */
/*0190*/ ISETP.GT.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fda0003f04270 */
/*01a0*/ @!P0 BRA 0x2b0 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*01b0*/ ISETP.GT.AND P1, PT, R2, 0xc, PT ; /* 0x0000000c0200780c */
/* 0x000fe20003f24270 */
/*01c0*/ BSSY B2, 0x250 ; /* 0x0000008000027945 */
/* 0x000fe20003800000 */
/*01d0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01e0*/ @!P1 BRA 0x240 ; /* 0x0000005000009947 */
/* 0x000fea0003800000 */
/*01f0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0200*/ IADD3 R2, R2, -0x10, RZ ; /* 0xfffffff002027810 */
/* 0x000fe20007ffe0ff */
/*0210*/ IMAD R0, R0, 0x6fc10000, RZ ; /* 0x6fc1000000007824 */
/* 0x000fc600078e02ff */
/*0220*/ ISETP.GT.AND P1, PT, R2, 0xc, PT ; /* 0x0000000c0200780c */
/* 0x000fda0003f24270 */
/*0230*/ @P1 BRA 0x200 ; /* 0xffffffc000001947 */
/* 0x000fea000383ffff */
/*0240*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0250*/ ISETP.GT.AND P1, PT, R2, 0x4, PT ; /* 0x000000040200780c */
/* 0x000fda0003f24270 */
/*0260*/ @P1 PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000181c */
/* 0x000fe20003f0e170 */
/*0270*/ @P1 IMAD R0, R0, 0x5f5e100, RZ ; /* 0x05f5e10000001824 */
/* 0x000fe200078e02ff */
/*0280*/ @P1 IADD3 R2, R2, -0x8, RZ ; /* 0xfffffff802021810 */
/* 0x000fd60007ffe0ff */
/*0290*/ ISETP.NE.OR P0, PT, R2, RZ, P0 ; /* 0x000000ff0200720c */
/* 0x000fda0000705670 */
/*02a0*/ @!P0 BRA 0x2f0 ; /* 0x0000004000008947 */
/* 0x000fea0003800000 */
/*02b0*/ IADD3 R2, R2, -0x4, RZ ; /* 0xfffffffc02027810 */
/* 0x000fe20007ffe0ff */
/*02c0*/ IMAD R0, R0, 0x2710, RZ ; /* 0x0000271000007824 */
/* 0x000fc600078e02ff */
/*02d0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fda0003f05270 */
/*02e0*/ @P0 BRA 0x2b0 ; /* 0xffffffc000000947 */
/* 0x000fea000383ffff */
/*02f0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0300*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fe20003f05270 */
/*0310*/ BSSY B1, 0x380 ; /* 0x0000006000017945 */
/* 0x000fd80003800000 */
/*0320*/ @!P0 BRA 0x370 ; /* 0x0000004000008947 */
/* 0x000fea0003800000 */
/*0330*/ IADD3 R3, R3, -0x1, RZ ; /* 0xffffffff03037810 */
/* 0x000fe20007ffe0ff */
/*0340*/ IMAD R0, R0, 0xa, RZ ; /* 0x0000000a00007824 */
/* 0x000fc600078e02ff */
/*0350*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f05270 */
/*0360*/ @P0 BRA 0x330 ; /* 0xffffffc000000947 */
/* 0x000fea000383ffff */
/*0370*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0380*/ I2F R0, R0 ; /* 0x0000000000007306 */
/* 0x000e240000201400 */
/*0390*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*03a0*/ IADD3 R2, P0, R4, c[0x0][0x160], RZ ; /* 0x0000580004027a10 */
/* 0x000fc80007f1e0ff */
/*03b0*/ IADD3.X R3, R5, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590005037a10 */
/* 0x000fca00007fe4ff */
/*03c0*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000ea2000c1e1900 */
/*03d0*/ MUFU.RCP R7, R0 ; /* 0x0000000000077308 */
/* 0x001e220000001000 */
/*03e0*/ BSSY B0, 0x4d0 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*03f0*/ FFMA R4, R7, -R0, 1 ; /* 0x3f80000007047423 */
/* 0x001fc80000000800 */
/*0400*/ FFMA R4, R7, R4, R7 ; /* 0x0000000407047223 */
/* 0x000fe40000000007 */
/*0410*/ FMUL R5, R5, R0 ; /* 0x0000000005057220 */
/* 0x004fcc0000400000 */
/*0420*/ F2I.TRUNC.NTZ R5, R5 ; /* 0x0000000500057305 */
/* 0x000e30000020f100 */
/*0430*/ I2F R9, R5 ; /* 0x0000000500097306 */
/* 0x001e300000201400 */
/*0440*/ FCHK P0, R9, R0 ; /* 0x0000000009007302 */
/* 0x001e220000000000 */
/*0450*/ FFMA R7, R9, R4, RZ ; /* 0x0000000409077223 */
/* 0x000fc800000000ff */
/*0460*/ FFMA R6, R7, -R0, R9 ; /* 0x8000000007067223 */
/* 0x000fc80000000009 */
/*0470*/ FFMA R7, R4, R6, R7 ; /* 0x0000000604077223 */
/* 0x000fe20000000007 */
/*0480*/ @!P0 BRA 0x4c0 ; /* 0x0000003000008947 */
/* 0x001fea0003800000 */
/*0490*/ IMAD.MOV.U32 R6, RZ, RZ, R0 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0000 */
/*04a0*/ MOV R0, 0x4c0 ; /* 0x000004c000007802 */
/* 0x000fe40000000f00 */
/*04b0*/ CALL.REL.NOINC 0x4f0 ; /* 0x0000003000007944 */
/* 0x000fea0003c00000 */
/*04c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*04d0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101904 */
/*04e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*04f0*/ SHF.R.U32.HI R5, RZ, 0x17, R6 ; /* 0x00000017ff057819 */
/* 0x000fe20000011606 */
/*0500*/ BSSY B1, 0xb50 ; /* 0x0000064000017945 */
/* 0x000fe20003800000 */
/*0510*/ SHF.R.U32.HI R4, RZ, 0x17, R9.reuse ; /* 0x00000017ff047819 */
/* 0x100fe40000011609 */
/*0520*/ LOP3.LUT R13, R5, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff050d7812 */
/* 0x000fe200078ec0ff */
/*0530*/ IMAD.MOV.U32 R5, RZ, RZ, R9 ; /* 0x000000ffff057224 */
/* 0x000fe200078e0009 */
/*0540*/ LOP3.LUT R10, R4, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff040a7812 */
/* 0x000fe400078ec0ff */
/*0550*/ IADD3 R11, R13, -0x1, RZ ; /* 0xffffffff0d0b7810 */
/* 0x000fc40007ffe0ff */
/*0560*/ IADD3 R8, R10, -0x1, RZ ; /* 0xffffffff0a087810 */
/* 0x000fe40007ffe0ff */
/*0570*/ ISETP.GT.U32.AND P0, PT, R11, 0xfd, PT ; /* 0x000000fd0b00780c */
/* 0x000fc80003f04070 */
/*0580*/ ISETP.GT.U32.OR P0, PT, R8, 0xfd, P0 ; /* 0x000000fd0800780c */
/* 0x000fda0000704470 */
/*0590*/ @!P0 IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff078224 */
/* 0x000fe200078e00ff */
/*05a0*/ @!P0 BRA 0x730 ; /* 0x0000018000008947 */
/* 0x000fea0003800000 */
/*05b0*/ FSETP.GTU.FTZ.AND P1, PT, |R6|, +INF , PT ; /* 0x7f8000000600780b */
/* 0x000fe20003f3c200 */
/*05c0*/ IMAD.MOV.U32 R4, RZ, RZ, R9 ; /* 0x000000ffff047224 */
/* 0x000fe200078e0009 */
/*05d0*/ FSETP.GTU.FTZ.AND P0, PT, |R9|, +INF , PT ; /* 0x7f8000000900780b */
/* 0x000fc80003f1c200 */
/*05e0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000703570 */
/*05f0*/ @P0 BRA 0xb30 ; /* 0x0000053000000947 */
/* 0x000fea0003800000 */
/*0600*/ LOP3.LUT P0, RZ, R6, 0x7fffffff, R5, 0xc8, !PT ; /* 0x7fffffff06ff7812 */
/* 0x000fda000780c805 */
/*0610*/ @!P0 BRA 0xb10 ; /* 0x000004f000008947 */
/* 0x000fea0003800000 */
/*0620*/ FSETP.NEU.FTZ.AND P2, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */
/* 0x000fe40003f5d200 */
/*0630*/ FSETP.NEU.FTZ.AND P1, PT, |R6|, +INF , PT ; /* 0x7f8000000600780b */
/* 0x000fe40003f3d200 */
/*0640*/ FSETP.NEU.FTZ.AND P0, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */
/* 0x000fd60003f1d200 */
/*0650*/ @!P1 BRA !P2, 0xb10 ; /* 0x000004b000009947 */
/* 0x000fea0005000000 */
/*0660*/ LOP3.LUT P2, RZ, R5, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff05ff7812 */
/* 0x000fc8000784c0ff */
/*0670*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000f24572 */
/*0680*/ @P1 BRA 0xaf0 ; /* 0x0000046000001947 */
/* 0x000fea0003800000 */
/*0690*/ LOP3.LUT P1, RZ, R6, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff06ff7812 */
/* 0x000fc8000782c0ff */
/*06a0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000702572 */
/*06b0*/ @P0 BRA 0xac0 ; /* 0x0000040000000947 */
/* 0x000fea0003800000 */
/*06c0*/ ISETP.GE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f06270 */
/*06d0*/ ISETP.GE.AND P1, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x000fd60003f26270 */
/*06e0*/ @P0 IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff070224 */
/* 0x000fe400078e00ff */
/*06f0*/ @!P0 IMAD.MOV.U32 R7, RZ, RZ, -0x40 ; /* 0xffffffc0ff078424 */
/* 0x000fe400078e00ff */
/*0700*/ @!P0 FFMA R5, R4, 1.84467440737095516160e+19, RZ ; /* 0x5f80000004058823 */
/* 0x000fe400000000ff */
/*0710*/ @!P1 FFMA R6, R6, 1.84467440737095516160e+19, RZ ; /* 0x5f80000006069823 */
/* 0x000fe200000000ff */
/*0720*/ @!P1 IADD3 R7, R7, 0x40, RZ ; /* 0x0000004007079810 */
/* 0x000fe40007ffe0ff */
/*0730*/ LEA R9, R13, 0xc0800000, 0x17 ; /* 0xc08000000d097811 */
/* 0x000fe200078eb8ff */
/*0740*/ BSSY B2, 0xab0 ; /* 0x0000036000027945 */
/* 0x000fe80003800000 */
/*0750*/ IMAD.IADD R9, R6, 0x1, -R9 ; /* 0x0000000106097824 */
/* 0x000fe200078e0a09 */
/*0760*/ IADD3 R6, R10, -0x7f, RZ ; /* 0xffffff810a067810 */
/* 0x000fc60007ffe0ff */
/*0770*/ MUFU.RCP R4, R9 ; /* 0x0000000900047308 */
/* 0x000e220000001000 */
/*0780*/ FADD.FTZ R8, -R9, -RZ ; /* 0x800000ff09087221 */
/* 0x000fe40000010100 */
/*0790*/ IMAD R5, R6.reuse, -0x800000, R5 ; /* 0xff80000006057824 */
/* 0x040fe200078e0205 */
/*07a0*/ IADD3 R6, R6, 0x7f, -R13 ; /* 0x0000007f06067810 */
/* 0x000fca0007ffe80d */
/*07b0*/ IMAD.IADD R6, R6, 0x1, R7 ; /* 0x0000000106067824 */
/* 0x000fe400078e0207 */
/*07c0*/ FFMA R11, R4, R8, 1 ; /* 0x3f800000040b7423 */
/* 0x001fc80000000008 */
/*07d0*/ FFMA R10, R4, R11, R4 ; /* 0x0000000b040a7223 */
/* 0x000fc80000000004 */
/*07e0*/ FFMA R4, R5, R10, RZ ; /* 0x0000000a05047223 */
/* 0x000fc800000000ff */
/*07f0*/ FFMA R11, R8, R4, R5 ; /* 0x00000004080b7223 */
/* 0x000fc80000000005 */
/*0800*/ FFMA R11, R10, R11, R4 ; /* 0x0000000b0a0b7223 */
/* 0x000fc80000000004 */
/*0810*/ FFMA R8, R8, R11, R5 ; /* 0x0000000b08087223 */
/* 0x000fc80000000005 */
/*0820*/ FFMA R4, R10, R8, R11 ; /* 0x000000080a047223 */
/* 0x000fca000000000b */
/*0830*/ SHF.R.U32.HI R5, RZ, 0x17, R4 ; /* 0x00000017ff057819 */
/* 0x000fc80000011604 */
/*0840*/ LOP3.LUT R5, R5, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff05057812 */
/* 0x000fca00078ec0ff */
/*0850*/ IMAD.IADD R9, R5, 0x1, R6 ; /* 0x0000000105097824 */
/* 0x000fca00078e0206 */
/*0860*/ IADD3 R5, R9, -0x1, RZ ; /* 0xffffffff09057810 */
/* 0x000fc80007ffe0ff */
/*0870*/ ISETP.GE.U32.AND P0, PT, R5, 0xfe, PT ; /* 0x000000fe0500780c */
/* 0x000fda0003f06070 */
/*0880*/ @!P0 BRA 0xa90 ; /* 0x0000020000008947 */
/* 0x000fea0003800000 */
/*0890*/ ISETP.GT.AND P0, PT, R9, 0xfe, PT ; /* 0x000000fe0900780c */
/* 0x000fda0003f04270 */
/*08a0*/ @P0 BRA 0xa60 ; /* 0x000001b000000947 */
/* 0x000fea0003800000 */
/*08b0*/ ISETP.GE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */
/* 0x000fda0003f06270 */
/*08c0*/ @P0 BRA 0xaa0 ; /* 0x000001d000000947 */
/* 0x000fea0003800000 */
/*08d0*/ ISETP.GE.AND P0, PT, R9, -0x18, PT ; /* 0xffffffe80900780c */
/* 0x000fe40003f06270 */
/*08e0*/ LOP3.LUT R4, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000004047812 */
/* 0x000fd600078ec0ff */
/*08f0*/ @!P0 BRA 0xaa0 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0900*/ FFMA.RZ R5, R10.reuse, R8.reuse, R11.reuse ; /* 0x000000080a057223 */
/* 0x1c0fe2000000c00b */
/*0910*/ ISETP.NE.AND P2, PT, R9.reuse, RZ, PT ; /* 0x000000ff0900720c */
/* 0x040fe20003f45270 */
/*0920*/ FFMA.RM R6, R10.reuse, R8.reuse, R11.reuse ; /* 0x000000080a067223 */
/* 0x1c0fe2000000400b */
/*0930*/ ISETP.NE.AND P1, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe40003f25270 */
/*0940*/ LOP3.LUT R7, R5, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff05077812 */
/* 0x000fe200078ec0ff */
/*0950*/ FFMA.RP R5, R10, R8, R11 ; /* 0x000000080a057223 */
/* 0x000fe2000000800b */
/*0960*/ IADD3 R8, R9, 0x20, RZ ; /* 0x0000002009087810 */
/* 0x000fe20007ffe0ff */
/*0970*/ IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0a09 */
/*0980*/ LOP3.LUT R7, R7, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000007077812 */
/* 0x000fe400078efcff */
/*0990*/ FSETP.NEU.FTZ.AND P0, PT, R5, R6, PT ; /* 0x000000060500720b */
/* 0x000fc40003f1d000 */
/*09a0*/ SHF.L.U32 R8, R7, R8, RZ ; /* 0x0000000807087219 */
/* 0x000fe400000006ff */
/*09b0*/ SEL R6, R9, RZ, P2 ; /* 0x000000ff09067207 */
/* 0x000fe40001000000 */
/*09c0*/ ISETP.NE.AND P1, PT, R8, RZ, P1 ; /* 0x000000ff0800720c */
/* 0x000fe40000f25270 */
/*09d0*/ SHF.R.U32.HI R6, RZ, R6, R7 ; /* 0x00000006ff067219 */
/* 0x000fe40000011607 */
/*09e0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40000703570 */
/*09f0*/ SHF.R.U32.HI R8, RZ, 0x1, R6 ; /* 0x00000001ff087819 */
/* 0x000fc40000011606 */
/*0a00*/ SEL R5, RZ, 0x1, !P0 ; /* 0x00000001ff057807 */
/* 0x000fc80004000000 */
/*0a10*/ LOP3.LUT R5, R5, 0x1, R8, 0xf8, !PT ; /* 0x0000000105057812 */
/* 0x000fc800078ef808 */
/*0a20*/ LOP3.LUT R5, R5, R6, RZ, 0xc0, !PT ; /* 0x0000000605057212 */
/* 0x000fca00078ec0ff */
/*0a30*/ IMAD.IADD R5, R8, 0x1, R5 ; /* 0x0000000108057824 */
/* 0x000fca00078e0205 */
/*0a40*/ LOP3.LUT R4, R5, R4, RZ, 0xfc, !PT ; /* 0x0000000405047212 */
/* 0x000fe200078efcff */
/*0a50*/ BRA 0xaa0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0a60*/ LOP3.LUT R4, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000004047812 */
/* 0x000fc800078ec0ff */
/*0a70*/ LOP3.LUT R4, R4, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000004047812 */
/* 0x000fe200078efcff */
/*0a80*/ BRA 0xaa0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0a90*/ IMAD R4, R6, 0x800000, R4 ; /* 0x0080000006047824 */
/* 0x000fe400078e0204 */
/*0aa0*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0ab0*/ BRA 0xb40 ; /* 0x0000008000007947 */
/* 0x000fea0003800000 */
/*0ac0*/ LOP3.LUT R4, R6, 0x80000000, R5, 0x48, !PT ; /* 0x8000000006047812 */
/* 0x000fc800078e4805 */
/*0ad0*/ LOP3.LUT R4, R4, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000004047812 */
/* 0x000fe200078efcff */
/*0ae0*/ BRA 0xb40 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0af0*/ LOP3.LUT R4, R6, 0x80000000, R5, 0x48, !PT ; /* 0x8000000006047812 */
/* 0x000fe200078e4805 */
/*0b00*/ BRA 0xb40 ; /* 0x0000003000007947 */
/* 0x000fea0003800000 */
/*0b10*/ MUFU.RSQ R4, -QNAN ; /* 0xffc0000000047908 */
/* 0x000e220000001400 */
/*0b20*/ BRA 0xb40 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0b30*/ FADD.FTZ R4, R4, R6 ; /* 0x0000000604047221 */
/* 0x000fe40000010000 */
/*0b40*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0b50*/ IMAD.MOV.U32 R7, RZ, RZ, R4 ; /* 0x000000ffff077224 */
/* 0x001fe400078e0004 */
/*0b60*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0000 */
/*0b70*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */
/* 0x000fc800078e00ff */
/*0b80*/ RET.REL.NODEC R4 0x0 ; /* 0xfffff47004007950 */
/* 0x000fea0003c3ffff */
/*0b90*/ BRA 0xb90; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0ba0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0be0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19_setPrecisionKernelPfmPi
.globl _Z19_setPrecisionKernelPfmPi
.p2align 8
.type _Z19_setPrecisionKernelPfmPi,@function
_Z19_setPrecisionKernelPfmPi:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_6
s_load_b64 s[2:3], s[0:1], 0x10
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_mov_b32_e32 v0, 1.0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
s_mov_b32 s3, 0
s_mov_b32 s2, exec_lo
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e32 0, v3
s_cbranch_execz .LBB0_5
s_mov_b32 s4, 1
.LBB0_3:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s4, s4, 10
v_dual_mov_b32 v0, s4 :: v_dual_add_nc_u32 v3, -1, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_eq_u32_e32 vcc_lo, 0, v3
s_or_b32 s3, vcc_lo, s3
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_3
s_or_b32 exec_lo, exec_lo, s3
v_cvt_f32_i32_e32 v0, v0
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s2
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_load_b32 v3, v[1:2], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v3, v3, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f32_e32 v3, v3
v_cvt_f32_i32_e32 v3, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f32 v4, null, v0, v0, v3
v_rcp_f32_e32 v5, v4
s_waitcnt_depctr 0xfff
v_fma_f32 v6, -v4, v5, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v5, v6, v5
v_div_scale_f32 v6, vcc_lo, v3, v0, v3
v_mul_f32_e32 v7, v6, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v8, -v4, v7, v6
v_fmac_f32_e32 v7, v8, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v4, -v4, v7, v6
v_div_fmas_f32 v4, v4, v5, v7
s_delay_alu instid0(VALU_DEP_1)
v_div_fixup_f32 v0, v4, v0, v3
global_store_b32 v[1:2], v0, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19_setPrecisionKernelPfmPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19_setPrecisionKernelPfmPi, .Lfunc_end0-_Z19_setPrecisionKernelPfmPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19_setPrecisionKernelPfmPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19_setPrecisionKernelPfmPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0016fd16_00000000-6__setPrecisionKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z19_setPrecisionKernelPfmPiPfmPi
.type _Z42__device_stub__Z19_setPrecisionKernelPfmPiPfmPi, @function
_Z42__device_stub__Z19_setPrecisionKernelPfmPiPfmPi:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19_setPrecisionKernelPfmPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z42__device_stub__Z19_setPrecisionKernelPfmPiPfmPi, .-_Z42__device_stub__Z19_setPrecisionKernelPfmPiPfmPi
.globl _Z19_setPrecisionKernelPfmPi
.type _Z19_setPrecisionKernelPfmPi, @function
_Z19_setPrecisionKernelPfmPi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z19_setPrecisionKernelPfmPiPfmPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z19_setPrecisionKernelPfmPi, .-_Z19_setPrecisionKernelPfmPi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z19_setPrecisionKernelPfmPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19_setPrecisionKernelPfmPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "_setPrecisionKernel.hip"
.globl _Z34__device_stub___setPrecisionKernelPfmPi # -- Begin function _Z34__device_stub___setPrecisionKernelPfmPi
.p2align 4, 0x90
.type _Z34__device_stub___setPrecisionKernelPfmPi,@function
_Z34__device_stub___setPrecisionKernelPfmPi: # @_Z34__device_stub___setPrecisionKernelPfmPi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19_setPrecisionKernelPfmPi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z34__device_stub___setPrecisionKernelPfmPi, .Lfunc_end0-_Z34__device_stub___setPrecisionKernelPfmPi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19_setPrecisionKernelPfmPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19_setPrecisionKernelPfmPi,@object # @_Z19_setPrecisionKernelPfmPi
.section .rodata,"a",@progbits
.globl _Z19_setPrecisionKernelPfmPi
.p2align 3, 0x0
_Z19_setPrecisionKernelPfmPi:
.quad _Z34__device_stub___setPrecisionKernelPfmPi
.size _Z19_setPrecisionKernelPfmPi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19_setPrecisionKernelPfmPi"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub___setPrecisionKernelPfmPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19_setPrecisionKernelPfmPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h> | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h> | .file "tmpxft_001bbb56_00000000-6_cubemap_texture.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h> | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h> |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h> | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h> | .text
.file "cubemap_texture.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001bbb56_00000000-6_cubemap_texture.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cubemap_texture.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //#define REARRANGED_DOMAIN
__global__ void _interpolate_from_vertices_to_edges(
int N,
double* vertex_values,
double* edge_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if ( k >= N )
return;
#ifndef REARRANGED_DOMAIN
int k3= k*3;
#endif
double q0, q1, q2;
//for (k=0; k<N; k++) {
#ifndef REARRANGED_DOMAIN
q0 = vertex_values[k3 + 0];
q1 = vertex_values[k3 + 1];
q2 = vertex_values[k3 + 2];
edge_values[k3 + 0] = 0.5*(q1+q2);
edge_values[k3 + 1] = 0.5*(q0+q2);
edge_values[k3 + 2] = 0.5*(q0+q1);
#else
q0 = vertex_values[k];
q1 = vertex_values[k + N];
q2 = vertex_values[k + 2*N];
edge_values[k] = 0.5*(q1+q2);
edge_values[k + N] = 0.5*(q0+q2);
edge_values[k + 2*N] = 0.5*(q0+q1);
#endif
//}
} | code for sm_80
Function : _Z35_interpolate_from_vertices_to_edgesiPdS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0030*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e680000002200 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000ea20000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0203 */
/*0060*/ IMAD R0, R0, c[0x0][0x4], R5 ; /* 0x0000010000007a24 */
/* 0x002fc800078e0205 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */
/* 0x004fca00078e0207 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R13, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff0d7435 */
/* 0x000fe200000001ff */
/*00b0*/ LEA R0, R0, R0, 0x1 ; /* 0x0000000000007211 */
/* 0x000fe200078e08ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R2, R0, R13, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fca00078e020d */
/*00e0*/ LDG.E.64 R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ea8000c1e1b00 */
/*00f0*/ LDG.E.64 R6, [R2.64+0x10] ; /* 0x0000100402067981 */
/* 0x000ea8000c1e1b00 */
/*0100*/ LDG.E.64 R8, [R2.64+0x8] ; /* 0x0000080402087981 */
/* 0x000ee2000c1e1b00 */
/*0110*/ DADD R10, R4, R6 ; /* 0x00000000040a7229 */
/* 0x004e080000000006 */
/*0120*/ DADD R6, R6, R8 ; /* 0x0000000006067229 */
/* 0x008e480000000008 */
/*0130*/ DADD R8, R4, R8 ; /* 0x0000000004087229 */
/* 0x0005e40000000008 */
/*0140*/ IMAD.WIDE R4, R0, R13, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x004fe400078e020d */
/*0150*/ DMUL R10, R10, 0.5 ; /* 0x3fe000000a0a7828 */
/* 0x001e080000000000 */
/*0160*/ DMUL R6, R6, 0.5 ; /* 0x3fe0000006067828 */
/* 0x002e460000000000 */
/*0170*/ STG.E.64 [R4.64+0x8], R10 ; /* 0x0000080a04007986 */
/* 0x001fe2000c101b04 */
/*0180*/ DMUL R8, R8, 0.5 ; /* 0x3fe0000008087828 */
/* 0x000e060000000000 */
/*0190*/ STG.E.64 [R4.64], R6 ; /* 0x0000000604007986 */
/* 0x002fe8000c101b04 */
/*01a0*/ STG.E.64 [R4.64+0x10], R8 ; /* 0x0000100804007986 */
/* 0x001fe2000c101b04 */
/*01b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01c0*/ BRA 0x1c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //#define REARRANGED_DOMAIN
__global__ void _interpolate_from_vertices_to_edges(
int N,
double* vertex_values,
double* edge_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if ( k >= N )
return;
#ifndef REARRANGED_DOMAIN
int k3= k*3;
#endif
double q0, q1, q2;
//for (k=0; k<N; k++) {
#ifndef REARRANGED_DOMAIN
q0 = vertex_values[k3 + 0];
q1 = vertex_values[k3 + 1];
q2 = vertex_values[k3 + 2];
edge_values[k3 + 0] = 0.5*(q1+q2);
edge_values[k3 + 1] = 0.5*(q0+q2);
edge_values[k3 + 2] = 0.5*(q0+q1);
#else
q0 = vertex_values[k];
q1 = vertex_values[k + N];
q2 = vertex_values[k + 2*N];
edge_values[k] = 0.5*(q1+q2);
edge_values[k + N] = 0.5*(q0+q2);
edge_values[k + 2*N] = 0.5*(q0+q1);
#endif
//}
} | .file "tmpxft_000df930_00000000-6_interpolate_from_vertices_to_edges.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z58__device_stub__Z35_interpolate_from_vertices_to_edgesiPdS_iPdS_
.type _Z58__device_stub__Z35_interpolate_from_vertices_to_edgesiPdS_iPdS_, @function
_Z58__device_stub__Z35_interpolate_from_vertices_to_edgesiPdS_iPdS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z35_interpolate_from_vertices_to_edgesiPdS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z58__device_stub__Z35_interpolate_from_vertices_to_edgesiPdS_iPdS_, .-_Z58__device_stub__Z35_interpolate_from_vertices_to_edgesiPdS_iPdS_
.globl _Z35_interpolate_from_vertices_to_edgesiPdS_
.type _Z35_interpolate_from_vertices_to_edgesiPdS_, @function
_Z35_interpolate_from_vertices_to_edgesiPdS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z58__device_stub__Z35_interpolate_from_vertices_to_edgesiPdS_iPdS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z35_interpolate_from_vertices_to_edgesiPdS_, .-_Z35_interpolate_from_vertices_to_edgesiPdS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z35_interpolate_from_vertices_to_edgesiPdS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z35_interpolate_from_vertices_to_edgesiPdS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //#define REARRANGED_DOMAIN
__global__ void _interpolate_from_vertices_to_edges(
int N,
double* vertex_values,
double* edge_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if ( k >= N )
return;
#ifndef REARRANGED_DOMAIN
int k3= k*3;
#endif
double q0, q1, q2;
//for (k=0; k<N; k++) {
#ifndef REARRANGED_DOMAIN
q0 = vertex_values[k3 + 0];
q1 = vertex_values[k3 + 1];
q2 = vertex_values[k3 + 2];
edge_values[k3 + 0] = 0.5*(q1+q2);
edge_values[k3 + 1] = 0.5*(q0+q2);
edge_values[k3 + 2] = 0.5*(q0+q1);
#else
q0 = vertex_values[k];
q1 = vertex_values[k + N];
q2 = vertex_values[k + 2*N];
edge_values[k] = 0.5*(q1+q2);
edge_values[k + N] = 0.5*(q0+q2);
edge_values[k + 2*N] = 0.5*(q0+q1);
#endif
//}
} | #include <hip/hip_runtime.h>
//#define REARRANGED_DOMAIN
__global__ void _interpolate_from_vertices_to_edges(
int N,
double* vertex_values,
double* edge_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if ( k >= N )
return;
#ifndef REARRANGED_DOMAIN
int k3= k*3;
#endif
double q0, q1, q2;
//for (k=0; k<N; k++) {
#ifndef REARRANGED_DOMAIN
q0 = vertex_values[k3 + 0];
q1 = vertex_values[k3 + 1];
q2 = vertex_values[k3 + 2];
edge_values[k3 + 0] = 0.5*(q1+q2);
edge_values[k3 + 1] = 0.5*(q0+q2);
edge_values[k3 + 2] = 0.5*(q0+q1);
#else
q0 = vertex_values[k];
q1 = vertex_values[k + N];
q2 = vertex_values[k + 2*N];
edge_values[k] = 0.5*(q1+q2);
edge_values[k + N] = 0.5*(q0+q2);
edge_values[k + 2*N] = 0.5*(q0+q1);
#endif
//}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
//#define REARRANGED_DOMAIN
__global__ void _interpolate_from_vertices_to_edges(
int N,
double* vertex_values,
double* edge_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if ( k >= N )
return;
#ifndef REARRANGED_DOMAIN
int k3= k*3;
#endif
double q0, q1, q2;
//for (k=0; k<N; k++) {
#ifndef REARRANGED_DOMAIN
q0 = vertex_values[k3 + 0];
q1 = vertex_values[k3 + 1];
q2 = vertex_values[k3 + 2];
edge_values[k3 + 0] = 0.5*(q1+q2);
edge_values[k3 + 1] = 0.5*(q0+q2);
edge_values[k3 + 2] = 0.5*(q0+q1);
#else
q0 = vertex_values[k];
q1 = vertex_values[k + N];
q2 = vertex_values[k + 2*N];
edge_values[k] = 0.5*(q1+q2);
edge_values[k + N] = 0.5*(q0+q2);
edge_values[k + 2*N] = 0.5*(q0+q1);
#endif
//}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z35_interpolate_from_vertices_to_edgesiPdS_
.globl _Z35_interpolate_from_vertices_to_edgesiPdS_
.p2align 8
.type _Z35_interpolate_from_vertices_to_edgesiPdS_,@function
_Z35_interpolate_from_vertices_to_edgesiPdS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x18
s_load_b32 s3, s[0:1], 0x24
v_bfe_u32 v1, v0, 10, 10
s_load_b32 s4, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s15
s_lshr_b32 s5, s3, 16
s_add_i32 s2, s2, s14
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s2, s5, v[1:2]
v_and_b32_e32 v3, 0x3ff, v0
s_and_b32 s2, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[0:1], null, v2, s2, v[3:4]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s4, v0
s_cbranch_execz .LBB0_2
v_lshl_add_u32 v0, v0, 1, v0
s_load_b128 s[0:3], s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 3, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, v0, 8
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v12, vcc_lo, v0, 16
v_add_co_ci_u32_e32 v13, vcc_lo, 0, v1, vcc_lo
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v10
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v11, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v12
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v13, vcc_lo
v_add_co_u32 v6, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v1, vcc_lo
s_clause 0x2
global_load_b64 v[2:3], v[2:3], off
global_load_b64 v[4:5], v[4:5], off
global_load_b64 v[6:7], v[6:7], off
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(1)
v_add_f64 v[8:9], v[2:3], v[4:5]
s_waitcnt vmcnt(0)
v_add_f64 v[4:5], v[6:7], v[4:5]
v_add_f64 v[2:3], v[6:7], v[2:3]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mul_f64 v[6:7], v[8:9], 0.5
v_mul_f64 v[4:5], v[4:5], 0.5
s_delay_alu instid0(VALU_DEP_3)
v_mul_f64 v[2:3], v[2:3], 0.5
v_add_co_u32 v8, vcc_lo, s2, v10
v_add_co_ci_u32_e32 v9, vcc_lo, s3, v11, vcc_lo
v_add_co_u32 v10, vcc_lo, s2, v12
v_add_co_ci_u32_e32 v11, vcc_lo, s3, v13, vcc_lo
s_clause 0x2
global_store_b64 v[0:1], v[6:7], off
global_store_b64 v[8:9], v[4:5], off
global_store_b64 v[10:11], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z35_interpolate_from_vertices_to_edgesiPdS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z35_interpolate_from_vertices_to_edgesiPdS_, .Lfunc_end0-_Z35_interpolate_from_vertices_to_edgesiPdS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z35_interpolate_from_vertices_to_edgesiPdS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z35_interpolate_from_vertices_to_edgesiPdS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
//#define REARRANGED_DOMAIN
__global__ void _interpolate_from_vertices_to_edges(
int N,
double* vertex_values,
double* edge_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if ( k >= N )
return;
#ifndef REARRANGED_DOMAIN
int k3= k*3;
#endif
double q0, q1, q2;
//for (k=0; k<N; k++) {
#ifndef REARRANGED_DOMAIN
q0 = vertex_values[k3 + 0];
q1 = vertex_values[k3 + 1];
q2 = vertex_values[k3 + 2];
edge_values[k3 + 0] = 0.5*(q1+q2);
edge_values[k3 + 1] = 0.5*(q0+q2);
edge_values[k3 + 2] = 0.5*(q0+q1);
#else
q0 = vertex_values[k];
q1 = vertex_values[k + N];
q2 = vertex_values[k + 2*N];
edge_values[k] = 0.5*(q1+q2);
edge_values[k + N] = 0.5*(q0+q2);
edge_values[k + 2*N] = 0.5*(q0+q1);
#endif
//}
} | .text
.file "interpolate_from_vertices_to_edges.hip"
.globl _Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_ # -- Begin function _Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_
.p2align 4, 0x90
.type _Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_,@function
_Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_: # @_Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z35_interpolate_from_vertices_to_edgesiPdS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_, .Lfunc_end0-_Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z35_interpolate_from_vertices_to_edgesiPdS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z35_interpolate_from_vertices_to_edgesiPdS_,@object # @_Z35_interpolate_from_vertices_to_edgesiPdS_
.section .rodata,"a",@progbits
.globl _Z35_interpolate_from_vertices_to_edgesiPdS_
.p2align 3, 0x0
_Z35_interpolate_from_vertices_to_edgesiPdS_:
.quad _Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_
.size _Z35_interpolate_from_vertices_to_edgesiPdS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z35_interpolate_from_vertices_to_edgesiPdS_"
.size .L__unnamed_1, 45
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z35_interpolate_from_vertices_to_edgesiPdS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z35_interpolate_from_vertices_to_edgesiPdS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0030*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e680000002200 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000ea20000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0203 */
/*0060*/ IMAD R0, R0, c[0x0][0x4], R5 ; /* 0x0000010000007a24 */
/* 0x002fc800078e0205 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */
/* 0x004fca00078e0207 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R13, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff0d7435 */
/* 0x000fe200000001ff */
/*00b0*/ LEA R0, R0, R0, 0x1 ; /* 0x0000000000007211 */
/* 0x000fe200078e08ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R2, R0, R13, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fca00078e020d */
/*00e0*/ LDG.E.64 R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ea8000c1e1b00 */
/*00f0*/ LDG.E.64 R6, [R2.64+0x10] ; /* 0x0000100402067981 */
/* 0x000ea8000c1e1b00 */
/*0100*/ LDG.E.64 R8, [R2.64+0x8] ; /* 0x0000080402087981 */
/* 0x000ee2000c1e1b00 */
/*0110*/ DADD R10, R4, R6 ; /* 0x00000000040a7229 */
/* 0x004e080000000006 */
/*0120*/ DADD R6, R6, R8 ; /* 0x0000000006067229 */
/* 0x008e480000000008 */
/*0130*/ DADD R8, R4, R8 ; /* 0x0000000004087229 */
/* 0x0005e40000000008 */
/*0140*/ IMAD.WIDE R4, R0, R13, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x004fe400078e020d */
/*0150*/ DMUL R10, R10, 0.5 ; /* 0x3fe000000a0a7828 */
/* 0x001e080000000000 */
/*0160*/ DMUL R6, R6, 0.5 ; /* 0x3fe0000006067828 */
/* 0x002e460000000000 */
/*0170*/ STG.E.64 [R4.64+0x8], R10 ; /* 0x0000080a04007986 */
/* 0x001fe2000c101b04 */
/*0180*/ DMUL R8, R8, 0.5 ; /* 0x3fe0000008087828 */
/* 0x000e060000000000 */
/*0190*/ STG.E.64 [R4.64], R6 ; /* 0x0000000604007986 */
/* 0x002fe8000c101b04 */
/*01a0*/ STG.E.64 [R4.64+0x10], R8 ; /* 0x0000100804007986 */
/* 0x001fe2000c101b04 */
/*01b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01c0*/ BRA 0x1c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z35_interpolate_from_vertices_to_edgesiPdS_
.globl _Z35_interpolate_from_vertices_to_edgesiPdS_
.p2align 8
.type _Z35_interpolate_from_vertices_to_edgesiPdS_,@function
_Z35_interpolate_from_vertices_to_edgesiPdS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x18
s_load_b32 s3, s[0:1], 0x24
v_bfe_u32 v1, v0, 10, 10
s_load_b32 s4, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s15
s_lshr_b32 s5, s3, 16
s_add_i32 s2, s2, s14
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s2, s5, v[1:2]
v_and_b32_e32 v3, 0x3ff, v0
s_and_b32 s2, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[0:1], null, v2, s2, v[3:4]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s4, v0
s_cbranch_execz .LBB0_2
v_lshl_add_u32 v0, v0, 1, v0
s_load_b128 s[0:3], s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 3, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, v0, 8
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v12, vcc_lo, v0, 16
v_add_co_ci_u32_e32 v13, vcc_lo, 0, v1, vcc_lo
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v10
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v11, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v12
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v13, vcc_lo
v_add_co_u32 v6, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v1, vcc_lo
s_clause 0x2
global_load_b64 v[2:3], v[2:3], off
global_load_b64 v[4:5], v[4:5], off
global_load_b64 v[6:7], v[6:7], off
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(1)
v_add_f64 v[8:9], v[2:3], v[4:5]
s_waitcnt vmcnt(0)
v_add_f64 v[4:5], v[6:7], v[4:5]
v_add_f64 v[2:3], v[6:7], v[2:3]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mul_f64 v[6:7], v[8:9], 0.5
v_mul_f64 v[4:5], v[4:5], 0.5
s_delay_alu instid0(VALU_DEP_3)
v_mul_f64 v[2:3], v[2:3], 0.5
v_add_co_u32 v8, vcc_lo, s2, v10
v_add_co_ci_u32_e32 v9, vcc_lo, s3, v11, vcc_lo
v_add_co_u32 v10, vcc_lo, s2, v12
v_add_co_ci_u32_e32 v11, vcc_lo, s3, v13, vcc_lo
s_clause 0x2
global_store_b64 v[0:1], v[6:7], off
global_store_b64 v[8:9], v[4:5], off
global_store_b64 v[10:11], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z35_interpolate_from_vertices_to_edgesiPdS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z35_interpolate_from_vertices_to_edgesiPdS_, .Lfunc_end0-_Z35_interpolate_from_vertices_to_edgesiPdS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z35_interpolate_from_vertices_to_edgesiPdS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z35_interpolate_from_vertices_to_edgesiPdS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000df930_00000000-6_interpolate_from_vertices_to_edges.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z58__device_stub__Z35_interpolate_from_vertices_to_edgesiPdS_iPdS_
.type _Z58__device_stub__Z35_interpolate_from_vertices_to_edgesiPdS_iPdS_, @function
_Z58__device_stub__Z35_interpolate_from_vertices_to_edgesiPdS_iPdS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z35_interpolate_from_vertices_to_edgesiPdS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z58__device_stub__Z35_interpolate_from_vertices_to_edgesiPdS_iPdS_, .-_Z58__device_stub__Z35_interpolate_from_vertices_to_edgesiPdS_iPdS_
.globl _Z35_interpolate_from_vertices_to_edgesiPdS_
.type _Z35_interpolate_from_vertices_to_edgesiPdS_, @function
_Z35_interpolate_from_vertices_to_edgesiPdS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z58__device_stub__Z35_interpolate_from_vertices_to_edgesiPdS_iPdS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z35_interpolate_from_vertices_to_edgesiPdS_, .-_Z35_interpolate_from_vertices_to_edgesiPdS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z35_interpolate_from_vertices_to_edgesiPdS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z35_interpolate_from_vertices_to_edgesiPdS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "interpolate_from_vertices_to_edges.hip"
.globl _Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_ # -- Begin function _Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_
.p2align 4, 0x90
.type _Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_,@function
_Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_: # @_Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z35_interpolate_from_vertices_to_edgesiPdS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_, .Lfunc_end0-_Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z35_interpolate_from_vertices_to_edgesiPdS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z35_interpolate_from_vertices_to_edgesiPdS_,@object # @_Z35_interpolate_from_vertices_to_edgesiPdS_
.section .rodata,"a",@progbits
.globl _Z35_interpolate_from_vertices_to_edgesiPdS_
.p2align 3, 0x0
_Z35_interpolate_from_vertices_to_edgesiPdS_:
.quad _Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_
.size _Z35_interpolate_from_vertices_to_edgesiPdS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z35_interpolate_from_vertices_to_edgesiPdS_"
.size .L__unnamed_1, 45
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z50__device_stub___interpolate_from_vertices_to_edgesiPdS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z35_interpolate_from_vertices_to_edgesiPdS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "stdio.h"
#include "time.h"
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
// Defining number of elements in Array
#define N 10000000
// Defining Kernel function for vector addition
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c)
{
// Getting block index of current kernel
int tid = blockIdx.x * 1000 + threadIdx.x;
if (tid < N)
d_c[tid] = d_a[tid] + d_b[tid];
}
int main(void)
{
// Defining host arrays
int *h_a, *h_b, *h_c;
h_a = (int *)malloc(sizeof(int) * N);
h_b = (int *)malloc(sizeof(int) * N);
h_c = (int *)malloc(sizeof(int) * N);
// Defining device pointers
int *d_a, *d_b, *d_c;
// Initializing two arrays for addition
for (int i = 0; i < N; i++)
{
h_a[i] = i + 1;
h_b[i] = i - 1;
}
// Allocate the memory
cudaMalloc((void **)&d_a, sizeof(int) * N);
cudaMalloc((void **)&d_b, sizeof(int) * N);
cudaMalloc((void **)&d_c, sizeof(int) * N);
// Copy input arrays from host to device memory
cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N * sizeof(int), cudaMemcpyHostToDevice);
// Start Time
clock_t start_h = clock();
// Calling kernels with N blocks and one thread per block, passing device pointers as parameters
gpuAdd <<<10000, 1000 >>>(d_a, d_b, d_c);
cudaThreadSynchronize();
// End Time
clock_t end_h = clock();
// Copy result back to host memory from device memory
cudaMemcpy(h_c, d_c, N * sizeof(int), cudaMemcpyDeviceToHost);
printf("Vector addition on GPU \n");
// Execute Time
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
printf("N = %d \n", N);
printf("Execute time: %f seconds \n", time_h);
// Printing result on console
for (int i = 9999990; i < N; i++)
{
printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], h_c[i]);
}
// Free up memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
system("pause");
return 0;
} | code for sm_80
Function : _Z6gpuAddPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, 0x3e8, R3 ; /* 0x000003e806067824 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R6, 0x98967f, PT ; /* 0x0098967f0600780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "stdio.h"
#include "time.h"
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
// Defining number of elements in Array
#define N 10000000
// Defining Kernel function for vector addition
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c)
{
// Getting block index of current kernel
int tid = blockIdx.x * 1000 + threadIdx.x;
if (tid < N)
d_c[tid] = d_a[tid] + d_b[tid];
}
int main(void)
{
// Defining host arrays
int *h_a, *h_b, *h_c;
h_a = (int *)malloc(sizeof(int) * N);
h_b = (int *)malloc(sizeof(int) * N);
h_c = (int *)malloc(sizeof(int) * N);
// Defining device pointers
int *d_a, *d_b, *d_c;
// Initializing two arrays for addition
for (int i = 0; i < N; i++)
{
h_a[i] = i + 1;
h_b[i] = i - 1;
}
// Allocate the memory
cudaMalloc((void **)&d_a, sizeof(int) * N);
cudaMalloc((void **)&d_b, sizeof(int) * N);
cudaMalloc((void **)&d_c, sizeof(int) * N);
// Copy input arrays from host to device memory
cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N * sizeof(int), cudaMemcpyHostToDevice);
// Start Time
clock_t start_h = clock();
// Calling kernels with N blocks and one thread per block, passing device pointers as parameters
gpuAdd <<<10000, 1000 >>>(d_a, d_b, d_c);
cudaThreadSynchronize();
// End Time
clock_t end_h = clock();
// Copy result back to host memory from device memory
cudaMemcpy(h_c, d_c, N * sizeof(int), cudaMemcpyDeviceToHost);
printf("Vector addition on GPU \n");
// Execute Time
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
printf("N = %d \n", N);
printf("Execute time: %f seconds \n", time_h);
// Printing result on console
for (int i = 9999990; i < N; i++)
{
printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], h_c[i]);
}
// Free up memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
system("pause");
return 0;
} | .file "tmpxft_0002beec_00000000-6_Add_gpu.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_
.type _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_, @function
_Z29__device_stub__Z6gpuAddPiS_S_PiS_S_:
.LFB3694:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6gpuAddPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_, .-_Z29__device_stub__Z6gpuAddPiS_S_PiS_S_
.globl _Z6gpuAddPiS_S_
.type _Z6gpuAddPiS_S_, @function
_Z6gpuAddPiS_S_:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z6gpuAddPiS_S_, .-_Z6gpuAddPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Vector addition on GPU \n"
.LC2:
.string "N = %d \n"
.LC3:
.string "Execute time: %f seconds \n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "The sum of %d element is %d + %d = %d\n"
.section .rodata.str1.1
.LC5:
.string "pause"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $40000000, %edi
call malloc@PLT
movq %rax, %rbx
movl $40000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $40000000, %edi
call malloc@PLT
movq %rax, %r13
movl $1, %eax
.L12:
movl %eax, -4(%rbx,%rax,4)
leal -2(%rax), %edx
movl %edx, -4(%rbp,%rax,4)
addq $1, %rax
cmpq $10000001, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $40000000, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $40000000, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
call clock@PLT
movq %rax, %r14
movl $1000, 44(%rsp)
movl $1, 48(%rsp)
movl $10000, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
call cudaThreadSynchronize@PLT
call clock@PLT
movq %rax, %r12
movl $2, %ecx
movl $40000000, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
subq %r14, %r12
pxor %xmm0, %xmm0
cvtsi2sdq %r12, %xmm0
divsd .LC1(%rip), %xmm0
movq %xmm0, %r12
movl $10000000, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $9999990, %r12d
leaq .LC4(%rip), %r14
.L14:
movl (%rbx,%r12,4), %ecx
movl 0(%r13,%r12,4), %r9d
movl 0(%rbp,%r12,4), %r8d
movl %r12d, %edx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %r12
cmpq $10000000, %r12
jne .L14
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
leaq .LC5(%rip), %rdi
call system@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z6gpuAddPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z6gpuAddPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "stdio.h"
#include "time.h"
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
// Defining number of elements in Array
#define N 10000000
// Defining Kernel function for vector addition
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c)
{
// Getting block index of current kernel
int tid = blockIdx.x * 1000 + threadIdx.x;
if (tid < N)
d_c[tid] = d_a[tid] + d_b[tid];
}
int main(void)
{
// Defining host arrays
int *h_a, *h_b, *h_c;
h_a = (int *)malloc(sizeof(int) * N);
h_b = (int *)malloc(sizeof(int) * N);
h_c = (int *)malloc(sizeof(int) * N);
// Defining device pointers
int *d_a, *d_b, *d_c;
// Initializing two arrays for addition
for (int i = 0; i < N; i++)
{
h_a[i] = i + 1;
h_b[i] = i - 1;
}
// Allocate the memory
cudaMalloc((void **)&d_a, sizeof(int) * N);
cudaMalloc((void **)&d_b, sizeof(int) * N);
cudaMalloc((void **)&d_c, sizeof(int) * N);
// Copy input arrays from host to device memory
cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N * sizeof(int), cudaMemcpyHostToDevice);
// Start Time
clock_t start_h = clock();
// Calling kernels with N blocks and one thread per block, passing device pointers as parameters
gpuAdd <<<10000, 1000 >>>(d_a, d_b, d_c);
cudaThreadSynchronize();
// End Time
clock_t end_h = clock();
// Copy result back to host memory from device memory
cudaMemcpy(h_c, d_c, N * sizeof(int), cudaMemcpyDeviceToHost);
printf("Vector addition on GPU \n");
// Execute Time
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
printf("N = %d \n", N);
printf("Execute time: %f seconds \n", time_h);
// Printing result on console
for (int i = 9999990; i < N; i++)
{
printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], h_c[i]);
}
// Free up memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
system("pause");
return 0;
} | #include "stdio.h"
#include "time.h"
#include <iostream>
#include <hip/hip_runtime.h>
// Defining number of elements in Array
#define N 10000000
// Defining Kernel function for vector addition
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c)
{
// Getting block index of current kernel
int tid = blockIdx.x * 1000 + threadIdx.x;
if (tid < N)
d_c[tid] = d_a[tid] + d_b[tid];
}
int main(void)
{
// Defining host arrays
int *h_a, *h_b, *h_c;
h_a = (int *)malloc(sizeof(int) * N);
h_b = (int *)malloc(sizeof(int) * N);
h_c = (int *)malloc(sizeof(int) * N);
// Defining device pointers
int *d_a, *d_b, *d_c;
// Initializing two arrays for addition
for (int i = 0; i < N; i++)
{
h_a[i] = i + 1;
h_b[i] = i - 1;
}
// Allocate the memory
hipMalloc((void **)&d_a, sizeof(int) * N);
hipMalloc((void **)&d_b, sizeof(int) * N);
hipMalloc((void **)&d_c, sizeof(int) * N);
// Copy input arrays from host to device memory
hipMemcpy(d_a, h_a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, N * sizeof(int), hipMemcpyHostToDevice);
// Start Time
clock_t start_h = clock();
// Calling kernels with N blocks and one thread per block, passing device pointers as parameters
gpuAdd <<<10000, 1000 >>>(d_a, d_b, d_c);
hipDeviceSynchronize();
// End Time
clock_t end_h = clock();
// Copy result back to host memory from device memory
hipMemcpy(h_c, d_c, N * sizeof(int), hipMemcpyDeviceToHost);
printf("Vector addition on GPU \n");
// Execute Time
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
printf("N = %d \n", N);
printf("Execute time: %f seconds \n", time_h);
// Printing result on console
for (int i = 9999990; i < N; i++)
{
printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], h_c[i]);
}
// Free up memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
system("pause");
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "stdio.h"
#include "time.h"
#include <iostream>
#include <hip/hip_runtime.h>
// Defining number of elements in Array
#define N 10000000
// Defining Kernel function for vector addition
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c)
{
// Getting block index of current kernel
int tid = blockIdx.x * 1000 + threadIdx.x;
if (tid < N)
d_c[tid] = d_a[tid] + d_b[tid];
}
int main(void)
{
// Defining host arrays
int *h_a, *h_b, *h_c;
h_a = (int *)malloc(sizeof(int) * N);
h_b = (int *)malloc(sizeof(int) * N);
h_c = (int *)malloc(sizeof(int) * N);
// Defining device pointers
int *d_a, *d_b, *d_c;
// Initializing two arrays for addition
for (int i = 0; i < N; i++)
{
h_a[i] = i + 1;
h_b[i] = i - 1;
}
// Allocate the memory
hipMalloc((void **)&d_a, sizeof(int) * N);
hipMalloc((void **)&d_b, sizeof(int) * N);
hipMalloc((void **)&d_c, sizeof(int) * N);
// Copy input arrays from host to device memory
hipMemcpy(d_a, h_a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, N * sizeof(int), hipMemcpyHostToDevice);
// Start Time
clock_t start_h = clock();
// Calling kernels with N blocks and one thread per block, passing device pointers as parameters
gpuAdd <<<10000, 1000 >>>(d_a, d_b, d_c);
hipDeviceSynchronize();
// End Time
clock_t end_h = clock();
// Copy result back to host memory from device memory
hipMemcpy(h_c, d_c, N * sizeof(int), hipMemcpyDeviceToHost);
printf("Vector addition on GPU \n");
// Execute Time
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
printf("N = %d \n", N);
printf("Execute time: %f seconds \n", time_h);
// Printing result on console
for (int i = 9999990; i < N; i++)
{
printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], h_c[i]);
}
// Free up memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
system("pause");
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6gpuAddPiS_S_
.globl _Z6gpuAddPiS_S_
.p2align 8
.type _Z6gpuAddPiS_S_,@function
_Z6gpuAddPiS_S_:
v_mad_u64_u32 v[1:2], null, s15, 0x3e8, v[0:1]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 0x989680, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6gpuAddPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6gpuAddPiS_S_, .Lfunc_end0-_Z6gpuAddPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6gpuAddPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6gpuAddPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "stdio.h"
#include "time.h"
#include <iostream>
#include <hip/hip_runtime.h>
// Defining number of elements in Array
#define N 10000000
// Defining Kernel function for vector addition
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c)
{
// Getting block index of current kernel
int tid = blockIdx.x * 1000 + threadIdx.x;
if (tid < N)
d_c[tid] = d_a[tid] + d_b[tid];
}
int main(void)
{
// Defining host arrays
int *h_a, *h_b, *h_c;
h_a = (int *)malloc(sizeof(int) * N);
h_b = (int *)malloc(sizeof(int) * N);
h_c = (int *)malloc(sizeof(int) * N);
// Defining device pointers
int *d_a, *d_b, *d_c;
// Initializing two arrays for addition
for (int i = 0; i < N; i++)
{
h_a[i] = i + 1;
h_b[i] = i - 1;
}
// Allocate the memory
hipMalloc((void **)&d_a, sizeof(int) * N);
hipMalloc((void **)&d_b, sizeof(int) * N);
hipMalloc((void **)&d_c, sizeof(int) * N);
// Copy input arrays from host to device memory
hipMemcpy(d_a, h_a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, N * sizeof(int), hipMemcpyHostToDevice);
// Start Time
clock_t start_h = clock();
// Calling kernels with N blocks and one thread per block, passing device pointers as parameters
gpuAdd <<<10000, 1000 >>>(d_a, d_b, d_c);
hipDeviceSynchronize();
// End Time
clock_t end_h = clock();
// Copy result back to host memory from device memory
hipMemcpy(h_c, d_c, N * sizeof(int), hipMemcpyDeviceToHost);
printf("Vector addition on GPU \n");
// Execute Time
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
printf("N = %d \n", N);
printf("Execute time: %f seconds \n", time_h);
// Printing result on console
for (int i = 9999990; i < N; i++)
{
printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], h_c[i]);
}
// Free up memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
system("pause");
return 0;
} | .text
.file "Add_gpu.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z21__device_stub__gpuAddPiS_S_ # -- Begin function _Z21__device_stub__gpuAddPiS_S_
.p2align 4, 0x90
.type _Z21__device_stub__gpuAddPiS_S_,@function
_Z21__device_stub__gpuAddPiS_S_: # @_Z21__device_stub__gpuAddPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6gpuAddPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z21__device_stub__gpuAddPiS_S_, .Lfunc_end0-_Z21__device_stub__gpuAddPiS_S_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $144, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, %rbx
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, %r14
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, %r15
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
leaq 1(%rax), %rcx
movl %ecx, (%rbx,%rax,4)
leal -1(%rax), %edx
movl %edx, (%r14,%rax,4)
movq %rcx, %rax
cmpq $10000000, %rcx # imm = 0x989680
jne .LBB1_1
# %bb.2:
leaq 24(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
leaq 16(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
leaq 8(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
movq 24(%rsp), %rdi
movl $40000000, %edx # imm = 0x2625A00
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $40000000, %edx # imm = 0x2625A00
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
callq clock
movq %rax, %r12
movabsq $4294968296, %rdx # imm = 0x1000003E8
leaq 9000(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z6gpuAddPiS_S_, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq hipDeviceSynchronize
callq clock
movq %rax, %r13
movq 8(%rsp), %rsi
movl $40000000, %edx # imm = 0x2625A00
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
subq %r12, %r13
cvtsi2sd %r13, %xmm0
divsd .LCPI1_0(%rip), %xmm0
movsd %xmm0, 32(%rsp) # 8-byte Spill
movl $.L.str.1, %edi
movl $10000000, %esi # imm = 0x989680
xorl %eax, %eax
callq printf
movl $.L.str.2, %edi
movsd 32(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $1, %al
callq printf
movl $9999990, %r12d # imm = 0x989676
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r12,4), %edx
movl (%r14,%r12,4), %ecx
movl (%r15,%r12,4), %r8d
movl $.L.str.3, %edi
movl %r12d, %esi
xorl %eax, %eax
callq printf
incq %r12
cmpq $10000000, %r12 # imm = 0x989680
jne .LBB1_5
# %bb.6:
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movl $.L.str.4, %edi
callq system
xorl %eax, %eax
addq $144, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6gpuAddPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6gpuAddPiS_S_,@object # @_Z6gpuAddPiS_S_
.section .rodata,"a",@progbits
.globl _Z6gpuAddPiS_S_
.p2align 3, 0x0
_Z6gpuAddPiS_S_:
.quad _Z21__device_stub__gpuAddPiS_S_
.size _Z6gpuAddPiS_S_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "N = %d \n"
.size .L.str.1, 9
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Execute time: %f seconds \n"
.size .L.str.2, 27
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "The sum of %d element is %d + %d = %d\n"
.size .L.str.3, 39
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "pause"
.size .L.str.4, 6
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6gpuAddPiS_S_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Vector addition on GPU "
.size .Lstr, 24
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__gpuAddPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6gpuAddPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6gpuAddPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, 0x3e8, R3 ; /* 0x000003e806067824 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R6, 0x98967f, PT ; /* 0x0098967f0600780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6gpuAddPiS_S_
.globl _Z6gpuAddPiS_S_
.p2align 8
.type _Z6gpuAddPiS_S_,@function
_Z6gpuAddPiS_S_:
v_mad_u64_u32 v[1:2], null, s15, 0x3e8, v[0:1]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 0x989680, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6gpuAddPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6gpuAddPiS_S_, .Lfunc_end0-_Z6gpuAddPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6gpuAddPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6gpuAddPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0002beec_00000000-6_Add_gpu.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_
.type _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_, @function
_Z29__device_stub__Z6gpuAddPiS_S_PiS_S_:
.LFB3694:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6gpuAddPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_, .-_Z29__device_stub__Z6gpuAddPiS_S_PiS_S_
.globl _Z6gpuAddPiS_S_
.type _Z6gpuAddPiS_S_, @function
_Z6gpuAddPiS_S_:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z6gpuAddPiS_S_, .-_Z6gpuAddPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Vector addition on GPU \n"
.LC2:
.string "N = %d \n"
.LC3:
.string "Execute time: %f seconds \n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "The sum of %d element is %d + %d = %d\n"
.section .rodata.str1.1
.LC5:
.string "pause"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $40000000, %edi
call malloc@PLT
movq %rax, %rbx
movl $40000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $40000000, %edi
call malloc@PLT
movq %rax, %r13
movl $1, %eax
.L12:
movl %eax, -4(%rbx,%rax,4)
leal -2(%rax), %edx
movl %edx, -4(%rbp,%rax,4)
addq $1, %rax
cmpq $10000001, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $40000000, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $40000000, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $40000000, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
call clock@PLT
movq %rax, %r14
movl $1000, 44(%rsp)
movl $1, 48(%rsp)
movl $10000, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
call cudaThreadSynchronize@PLT
call clock@PLT
movq %rax, %r12
movl $2, %ecx
movl $40000000, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
subq %r14, %r12
pxor %xmm0, %xmm0
cvtsi2sdq %r12, %xmm0
divsd .LC1(%rip), %xmm0
movq %xmm0, %r12
movl $10000000, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $9999990, %r12d
leaq .LC4(%rip), %r14
.L14:
movl (%rbx,%r12,4), %ecx
movl 0(%r13,%r12,4), %r9d
movl 0(%rbp,%r12,4), %r8d
movl %r12d, %edx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %r12
cmpq $10000000, %r12
jne .L14
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
leaq .LC5(%rip), %rdi
call system@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z29__device_stub__Z6gpuAddPiS_S_PiS_S_
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z6gpuAddPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z6gpuAddPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "Add_gpu.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z21__device_stub__gpuAddPiS_S_ # -- Begin function _Z21__device_stub__gpuAddPiS_S_
.p2align 4, 0x90
.type _Z21__device_stub__gpuAddPiS_S_,@function
_Z21__device_stub__gpuAddPiS_S_: # @_Z21__device_stub__gpuAddPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6gpuAddPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z21__device_stub__gpuAddPiS_S_, .Lfunc_end0-_Z21__device_stub__gpuAddPiS_S_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $144, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, %rbx
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, %r14
movl $40000000, %edi # imm = 0x2625A00
callq malloc
movq %rax, %r15
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
leaq 1(%rax), %rcx
movl %ecx, (%rbx,%rax,4)
leal -1(%rax), %edx
movl %edx, (%r14,%rax,4)
movq %rcx, %rax
cmpq $10000000, %rcx # imm = 0x989680
jne .LBB1_1
# %bb.2:
leaq 24(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
leaq 16(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
leaq 8(%rsp), %rdi
movl $40000000, %esi # imm = 0x2625A00
callq hipMalloc
movq 24(%rsp), %rdi
movl $40000000, %edx # imm = 0x2625A00
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $40000000, %edx # imm = 0x2625A00
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
callq clock
movq %rax, %r12
movabsq $4294968296, %rdx # imm = 0x1000003E8
leaq 9000(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z6gpuAddPiS_S_, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq hipDeviceSynchronize
callq clock
movq %rax, %r13
movq 8(%rsp), %rsi
movl $40000000, %edx # imm = 0x2625A00
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
subq %r12, %r13
cvtsi2sd %r13, %xmm0
divsd .LCPI1_0(%rip), %xmm0
movsd %xmm0, 32(%rsp) # 8-byte Spill
movl $.L.str.1, %edi
movl $10000000, %esi # imm = 0x989680
xorl %eax, %eax
callq printf
movl $.L.str.2, %edi
movsd 32(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $1, %al
callq printf
movl $9999990, %r12d # imm = 0x989676
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r12,4), %edx
movl (%r14,%r12,4), %ecx
movl (%r15,%r12,4), %r8d
movl $.L.str.3, %edi
movl %r12d, %esi
xorl %eax, %eax
callq printf
incq %r12
cmpq $10000000, %r12 # imm = 0x989680
jne .LBB1_5
# %bb.6:
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movl $.L.str.4, %edi
callq system
xorl %eax, %eax
addq $144, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6gpuAddPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6gpuAddPiS_S_,@object # @_Z6gpuAddPiS_S_
.section .rodata,"a",@progbits
.globl _Z6gpuAddPiS_S_
.p2align 3, 0x0
_Z6gpuAddPiS_S_:
.quad _Z21__device_stub__gpuAddPiS_S_
.size _Z6gpuAddPiS_S_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "N = %d \n"
.size .L.str.1, 9
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Execute time: %f seconds \n"
.size .L.str.2, 27
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "The sum of %d element is %d + %d = %d\n"
.size .L.str.3, 39
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "pause"
.size .L.str.4, 6
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6gpuAddPiS_S_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Vector addition on GPU "
.size .Lstr, 24
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__gpuAddPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6gpuAddPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void kernelInitNablaW(float *nabla_w,int tws) {
if ((blockIdx.x*blockDim.x+threadIdx.x)<tws) {
nabla_w[blockIdx.x*blockDim.x+threadIdx.x]=0.0;
}
} | code for sm_80
Function : _Z16kernelInitNablaWPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0003 */
/*0090*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void kernelInitNablaW(float *nabla_w,int tws) {
if ((blockIdx.x*blockDim.x+threadIdx.x)<tws) {
nabla_w[blockIdx.x*blockDim.x+threadIdx.x]=0.0;
}
} | .file "tmpxft_0001a24f_00000000-6_kernelInitNablaW.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z16kernelInitNablaWPfiPfi
.type _Z37__device_stub__Z16kernelInitNablaWPfiPfi, @function
_Z37__device_stub__Z16kernelInitNablaWPfiPfi:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z16kernelInitNablaWPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z37__device_stub__Z16kernelInitNablaWPfiPfi, .-_Z37__device_stub__Z16kernelInitNablaWPfiPfi
.globl _Z16kernelInitNablaWPfi
.type _Z16kernelInitNablaWPfi, @function
_Z16kernelInitNablaWPfi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z16kernelInitNablaWPfiPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16kernelInitNablaWPfi, .-_Z16kernelInitNablaWPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z16kernelInitNablaWPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16kernelInitNablaWPfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void kernelInitNablaW(float *nabla_w,int tws) {
if ((blockIdx.x*blockDim.x+threadIdx.x)<tws) {
nabla_w[blockIdx.x*blockDim.x+threadIdx.x]=0.0;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void kernelInitNablaW(float *nabla_w,int tws) {
if ((blockIdx.x*blockDim.x+threadIdx.x)<tws) {
nabla_w[blockIdx.x*blockDim.x+threadIdx.x]=0.0;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void kernelInitNablaW(float *nabla_w,int tws) {
if ((blockIdx.x*blockDim.x+threadIdx.x)<tws) {
nabla_w[blockIdx.x*blockDim.x+threadIdx.x]=0.0;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16kernelInitNablaWPfi
.globl _Z16kernelInitNablaWPfi
.p2align 8
.type _Z16kernelInitNablaWPfi,@function
_Z16kernelInitNablaWPfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16kernelInitNablaWPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16kernelInitNablaWPfi, .Lfunc_end0-_Z16kernelInitNablaWPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16kernelInitNablaWPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16kernelInitNablaWPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void kernelInitNablaW(float *nabla_w,int tws) {
if ((blockIdx.x*blockDim.x+threadIdx.x)<tws) {
nabla_w[blockIdx.x*blockDim.x+threadIdx.x]=0.0;
}
} | .text
.file "kernelInitNablaW.hip"
.globl _Z31__device_stub__kernelInitNablaWPfi # -- Begin function _Z31__device_stub__kernelInitNablaWPfi
.p2align 4, 0x90
.type _Z31__device_stub__kernelInitNablaWPfi,@function
_Z31__device_stub__kernelInitNablaWPfi: # @_Z31__device_stub__kernelInitNablaWPfi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z16kernelInitNablaWPfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z31__device_stub__kernelInitNablaWPfi, .Lfunc_end0-_Z31__device_stub__kernelInitNablaWPfi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16kernelInitNablaWPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16kernelInitNablaWPfi,@object # @_Z16kernelInitNablaWPfi
.section .rodata,"a",@progbits
.globl _Z16kernelInitNablaWPfi
.p2align 3, 0x0
_Z16kernelInitNablaWPfi:
.quad _Z31__device_stub__kernelInitNablaWPfi
.size _Z16kernelInitNablaWPfi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16kernelInitNablaWPfi"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__kernelInitNablaWPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16kernelInitNablaWPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z16kernelInitNablaWPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0003 */
/*0090*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16kernelInitNablaWPfi
.globl _Z16kernelInitNablaWPfi
.p2align 8
.type _Z16kernelInitNablaWPfi,@function
_Z16kernelInitNablaWPfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16kernelInitNablaWPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16kernelInitNablaWPfi, .Lfunc_end0-_Z16kernelInitNablaWPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16kernelInitNablaWPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16kernelInitNablaWPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0001a24f_00000000-6_kernelInitNablaW.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z16kernelInitNablaWPfiPfi
.type _Z37__device_stub__Z16kernelInitNablaWPfiPfi, @function
_Z37__device_stub__Z16kernelInitNablaWPfiPfi:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z16kernelInitNablaWPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z37__device_stub__Z16kernelInitNablaWPfiPfi, .-_Z37__device_stub__Z16kernelInitNablaWPfiPfi
.globl _Z16kernelInitNablaWPfi
.type _Z16kernelInitNablaWPfi, @function
_Z16kernelInitNablaWPfi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z16kernelInitNablaWPfiPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16kernelInitNablaWPfi, .-_Z16kernelInitNablaWPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z16kernelInitNablaWPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16kernelInitNablaWPfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernelInitNablaW.hip"
.globl _Z31__device_stub__kernelInitNablaWPfi # -- Begin function _Z31__device_stub__kernelInitNablaWPfi
.p2align 4, 0x90
.type _Z31__device_stub__kernelInitNablaWPfi,@function
_Z31__device_stub__kernelInitNablaWPfi: # @_Z31__device_stub__kernelInitNablaWPfi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z16kernelInitNablaWPfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z31__device_stub__kernelInitNablaWPfi, .Lfunc_end0-_Z31__device_stub__kernelInitNablaWPfi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16kernelInitNablaWPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16kernelInitNablaWPfi,@object # @_Z16kernelInitNablaWPfi
.section .rodata,"a",@progbits
.globl _Z16kernelInitNablaWPfi
.p2align 3, 0x0
_Z16kernelInitNablaWPfi:
.quad _Z31__device_stub__kernelInitNablaWPfi
.size _Z16kernelInitNablaWPfi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16kernelInitNablaWPfi"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__kernelInitNablaWPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16kernelInitNablaWPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
__global__ void binPacking (float *bins, float *items, float cap, int length)
{
int x = 0;
for(int i = 0; i < length; i++)
{
x = 0;
if(items[i] > cap)
{
printf ("Element %f je veci od kapaciteta spremnika koji je %f. PREKID!\n", items[i], cap);
break;
}
if(bins[0 * length + x] >= items[i])
{
bins[0 * length + x] -= items[i];
bins[1 * length + x] += items[i];
}
else
{
while(bins[0 * length + x] < items[i])
{
x+=1;
}
bins[0 * length + x] -= items[i];
bins[1 * length + x] += items[i];
}
}
} | code for sm_80
Function : _Z10binPackingPfS_fi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fc800078e00ff */
/*0010*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff097624 */
/* 0x000fe200078e00ff */
/*0020*/ IADD3 R1, R1, -0x10, RZ ; /* 0xfffffff001017810 */
/* 0x000fc80007ffe0ff */
/*0030*/ ISETP.GE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */
/* 0x000fda0003f06270 */
/*0040*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0050*/ SHF.R.S32.HI R2, RZ, 0x1f, R9 ; /* 0x0000001fff027819 */
/* 0x000fe20000011409 */
/*0060*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */
/* 0x000fe200078e00ff */
/*0070*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe20007f1e0ff */
/*0080*/ IMAD.MOV.U32 R0, RZ, RZ, RZ ; /* 0x000000ffff007224 */
/* 0x000fe200078e00ff */
/*0090*/ SHF.L.U64.HI R19, R9.reuse, 0x2, R2 ; /* 0x0000000209137819 */
/* 0x040fe20000010202 */
/*00a0*/ IMAD.WIDE R2, R9, R4, c[0x0][0x160] ; /* 0x0000580009027625 */
/* 0x000fe200078e0204 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00c0*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */
/* 0x000fe400000e06ff */
/*00d0*/ IMAD.MOV.U32 R17, RZ, RZ, 0x4 ; /* 0x00000004ff117424 */
/* 0x001fc800078e00ff */
/*00e0*/ IMAD.WIDE R4, R0, R17, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fca00078e0211 */
/*00f0*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea4000c1e1900 */
/*0100*/ FSETP.GT.AND P0, PT, R8, c[0x0][0x170], PT ; /* 0x00005c0008007a0b */
/* 0x004fda0003f04000 */
/*0110*/ @P0 BRA 0x300 ; /* 0x000001e000000947 */
/* 0x000fea0003800000 */
/*0120*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff0a7624 */
/* 0x000fe400078e00ff */
/*0130*/ IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff0b7624 */
/* 0x000fca00078e00ff */
/*0140*/ LDG.E R13, [R10.64] ; /* 0x000000040a0d7981 */
/* 0x000ea4000c1e1900 */
/*0150*/ FSETP.GE.AND P0, PT, R13, R8, PT ; /* 0x000000080d00720b */
/* 0x004fda0003f06000 */
/*0160*/ @!P0 BRA 0x1e0 ; /* 0x0000007000008947 */
/* 0x000fea0003800000 */
/*0170*/ FADD R13, -R8, R13 ; /* 0x0000000d080d7221 */
/* 0x000fca0000000100 */
/*0180*/ STG.E [R10.64], R13 ; /* 0x0000000d0a007986 */
/* 0x0001e8000c101904 */
/*0190*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e1900 */
/*01a0*/ LDG.E R8, [R2.64] ; /* 0x0000000402087981 */
/* 0x000ea4000c1e1900 */
/*01b0*/ FADD R15, R8, R5 ; /* 0x00000005080f7221 */
/* 0x004fca0000000000 */
/*01c0*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0001e2000c101904 */
/*01d0*/ BRA 0x2c0 ; /* 0x000000e000007947 */
/* 0x000fea0003800000 */
/*01e0*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */
/* 0x000fc800078e00ff */
/*01f0*/ IMAD.WIDE R12, R10, R17, c[0x0][0x160] ; /* 0x000058000a0c7625 */
/* 0x000fca00078e0211 */
/*0200*/ LDG.E R15, [R12.64] ; /* 0x000000040c0f7981 */
/* 0x000ea2000c1e1900 */
/*0210*/ IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0a7810 */
/* 0x000fe40007ffe0ff */
/*0220*/ FSETP.GEU.AND P0, PT, R15, R8, PT ; /* 0x000000080f00720b */
/* 0x004fda0003f0e000 */
/*0230*/ @!P0 BRA 0x1f0 ; /* 0xffffffb000008947 */
/* 0x000fea000383ffff */
/*0240*/ LEA R10, P0, R9, R12, 0x2 ; /* 0x0000000c090a7211 */
/* 0x000fe200078010ff */
/*0250*/ FADD R15, -R8, R15 ; /* 0x0000000f080f7221 */
/* 0x000fc80000000100 */
/*0260*/ IMAD.X R11, R13, 0x1, R19, P0 ; /* 0x000000010d0b7824 */
/* 0x000fe200000e0613 */
/*0270*/ STG.E [R12.64], R15 ; /* 0x0000000f0c007986 */
/* 0x0001e8000c101904 */
/*0280*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e1900 */
/*0290*/ LDG.E R8, [R10.64] ; /* 0x000000040a087981 */
/* 0x000ea4000c1e1900 */
/*02a0*/ FADD R17, R8, R5 ; /* 0x0000000508117221 */
/* 0x004fca0000000000 */
/*02b0*/ STG.E [R10.64], R17 ; /* 0x000000110a007986 */
/* 0x0001e4000c101904 */
/*02c0*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */
/* 0x000fc80007ffe0ff */
/*02d0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fda0003f06270 */
/*02e0*/ @!P0 BRA 0xd0 ; /* 0xfffffde000008947 */
/* 0x000fea000383ffff */
/*02f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0300*/ F2F.F64.F32 R8, R8 ; /* 0x0000000800087310 */
/* 0x000fe20000201800 */
/*0310*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0320*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe400078e00ff */
/*0330*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0340*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x0000660000000a00 */
/*0350*/ F2F.F64.F32 R10, c[0x0][0x170] ; /* 0x00005c00000a7b10 */
/* 0x000ea40000201800 */
/*0360*/ STL.128 [R1], R8 ; /* 0x0000000801007387 */
/* 0x0041e40000100c00 */
/*0370*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x001fe20000000000 */
/*0380*/ MOV R11, 0x3f0 ; /* 0x000003f0000b7802 */
/* 0x000fc40000000f00 */
/*0390*/ MOV R20, 0x370 ; /* 0x0000037000147802 */
/* 0x000fe40000000f00 */
/*03a0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*03b0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*03c0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*03d0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*03e0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*03f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0400*/ BRA 0x400; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0480*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0490*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
__global__ void binPacking (float *bins, float *items, float cap, int length)
{
int x = 0;
for(int i = 0; i < length; i++)
{
x = 0;
if(items[i] > cap)
{
printf ("Element %f je veci od kapaciteta spremnika koji je %f. PREKID!\n", items[i], cap);
break;
}
if(bins[0 * length + x] >= items[i])
{
bins[0 * length + x] -= items[i];
bins[1 * length + x] += items[i];
}
else
{
while(bins[0 * length + x] < items[i])
{
x+=1;
}
bins[0 * length + x] -= items[i];
bins[1 * length + x] += items[i];
}
}
} | .file "tmpxft_00060c51_00000000-6_firstFitAlgorithm.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z34__device_stub__Z10binPackingPfS_fiPfS_fi
.type _Z34__device_stub__Z10binPackingPfS_fiPfS_fi, @function
_Z34__device_stub__Z10binPackingPfS_fiPfS_fi:
.LFB2081:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10binPackingPfS_fi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z34__device_stub__Z10binPackingPfS_fiPfS_fi, .-_Z34__device_stub__Z10binPackingPfS_fiPfS_fi
.globl _Z10binPackingPfS_fi
.type _Z10binPackingPfS_fi, @function
_Z10binPackingPfS_fi:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z10binPackingPfS_fiPfS_fi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z10binPackingPfS_fi, .-_Z10binPackingPfS_fi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10binPackingPfS_fi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10binPackingPfS_fi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
__global__ void binPacking (float *bins, float *items, float cap, int length)
{
int x = 0;
for(int i = 0; i < length; i++)
{
x = 0;
if(items[i] > cap)
{
printf ("Element %f je veci od kapaciteta spremnika koji je %f. PREKID!\n", items[i], cap);
break;
}
if(bins[0 * length + x] >= items[i])
{
bins[0 * length + x] -= items[i];
bins[1 * length + x] += items[i];
}
else
{
while(bins[0 * length + x] < items[i])
{
x+=1;
}
bins[0 * length + x] -= items[i];
bins[1 * length + x] += items[i];
}
}
} | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void binPacking (float *bins, float *items, float cap, int length)
{
int x = 0;
for(int i = 0; i < length; i++)
{
x = 0;
if(items[i] > cap)
{
printf ("Element %f je veci od kapaciteta spremnika koji je %f. PREKID!\n", items[i], cap);
break;
}
if(bins[0 * length + x] >= items[i])
{
bins[0 * length + x] -= items[i];
bins[1 * length + x] += items[i];
}
else
{
while(bins[0 * length + x] < items[i])
{
x+=1;
}
bins[0 * length + x] -= items[i];
bins[1 * length + x] += items[i];
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void binPacking (float *bins, float *items, float cap, int length)
{
int x = 0;
for(int i = 0; i < length; i++)
{
x = 0;
if(items[i] > cap)
{
printf ("Element %f je veci od kapaciteta spremnika koji je %f. PREKID!\n", items[i], cap);
break;
}
if(bins[0 * length + x] >= items[i])
{
bins[0 * length + x] -= items[i];
bins[1 * length + x] += items[i];
}
else
{
while(bins[0 * length + x] < items[i])
{
x+=1;
}
bins[0 * length + x] -= items[i];
bins[1 * length + x] += items[i];
}
}
} | .text
.file "firstFitAlgorithm.hip"
.globl _Z25__device_stub__binPackingPfS_fi # -- Begin function _Z25__device_stub__binPackingPfS_fi
.p2align 4, 0x90
.type _Z25__device_stub__binPackingPfS_fi,@function
_Z25__device_stub__binPackingPfS_fi: # @_Z25__device_stub__binPackingPfS_fi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10binPackingPfS_fi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z25__device_stub__binPackingPfS_fi, .Lfunc_end0-_Z25__device_stub__binPackingPfS_fi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10binPackingPfS_fi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10binPackingPfS_fi,@object # @_Z10binPackingPfS_fi
.section .rodata,"a",@progbits
.globl _Z10binPackingPfS_fi
.p2align 3, 0x0
_Z10binPackingPfS_fi:
.quad _Z25__device_stub__binPackingPfS_fi
.size _Z10binPackingPfS_fi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10binPackingPfS_fi"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__binPackingPfS_fi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10binPackingPfS_fi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00060c51_00000000-6_firstFitAlgorithm.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z34__device_stub__Z10binPackingPfS_fiPfS_fi
.type _Z34__device_stub__Z10binPackingPfS_fiPfS_fi, @function
_Z34__device_stub__Z10binPackingPfS_fiPfS_fi:
.LFB2081:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10binPackingPfS_fi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z34__device_stub__Z10binPackingPfS_fiPfS_fi, .-_Z34__device_stub__Z10binPackingPfS_fiPfS_fi
.globl _Z10binPackingPfS_fi
.type _Z10binPackingPfS_fi, @function
_Z10binPackingPfS_fi:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z10binPackingPfS_fiPfS_fi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z10binPackingPfS_fi, .-_Z10binPackingPfS_fi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10binPackingPfS_fi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10binPackingPfS_fi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "firstFitAlgorithm.hip"
.globl _Z25__device_stub__binPackingPfS_fi # -- Begin function _Z25__device_stub__binPackingPfS_fi
.p2align 4, 0x90
.type _Z25__device_stub__binPackingPfS_fi,@function
_Z25__device_stub__binPackingPfS_fi: # @_Z25__device_stub__binPackingPfS_fi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10binPackingPfS_fi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z25__device_stub__binPackingPfS_fi, .Lfunc_end0-_Z25__device_stub__binPackingPfS_fi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10binPackingPfS_fi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10binPackingPfS_fi,@object # @_Z10binPackingPfS_fi
.section .rodata,"a",@progbits
.globl _Z10binPackingPfS_fi
.p2align 3, 0x0
_Z10binPackingPfS_fi:
.quad _Z25__device_stub__binPackingPfS_fi
.size _Z10binPackingPfS_fi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10binPackingPfS_fi"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__binPackingPfS_fi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10binPackingPfS_fi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*********************************************************************
* Copyright © 2011-2012,
* Marwan Abdellah: <abdellah.marwan@gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
********************************************************************/
/*!
* CUDA : This kernel adds a constant value to the input vector into the
* output vector with length N.
*
* @param devArrayInput
* Input vector.
*
* @param constVal
* Constant value to be added to the input device vector.
*
* @param devArrayOutput
* Sum vector.
*
* @param N
* Vector length.
*
* @author
* Marwan Abdellah <abdellah.marwan@gmail.com>
*
* @date
* Created: August, 2012.
* @date
* Last Update: September, 2012.
*
* @note
* Minimum CUDA version 3.2.
* @note
* Minimum Device Compute Capability 1.0.
*/
template <typename T>
__global__
void Constant_Add_1D_Array_Kernel(T* devArrayInput,
T constVal,
T* devArrayOutput,
int N)
{
int xThreadIdx = threadIdx.x;
int blockWidth = blockDim.x;
int index = blockIdx.x * blockWidth + xThreadIdx;
#ifdef VEC_CHECK
if (index < N)
devArrayOutput[index] = (T) ((T) devArrayInput[index] + (T) constVal);
#else
devArrayOutput[index] = (T) ((T) devArrayInput[index] + (T) constVal);
#endif
} | code for sm_80 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.