system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z15MatrixMulKernel6MatrixS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */
/* 0x000e220000002600 */
/*0020*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff007624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R15, -RZ, RZ, 0, 0 ; /* 0x00000000ff0f7435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e640000002500 */
/*0060*/ ISETP.GE.AND P0, PT, R0, 0x4, PT ; /* 0x000000040000780c */
/* 0x000fe40003f06270 */
/*0070*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000ea20000002200 */
/*0080*/ SHF.R.S32.HI R0, RZ, 0x1f, R0 ; /* 0x0000001fff007819 */
/* 0x000fc60000011400 */
/*0090*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000ee20000002100 */
/*00a0*/ LEA.HI R0, R0, c[0x0][0x160], RZ, 0x2 ; /* 0x0000580000007a11 */
/* 0x000fcc00078f10ff */
/*00b0*/ @!P0 BRA 0x900 ; /* 0x0000084000008947 */
/* 0x000fea0003800000 */
/*00c0*/ SHF.R.S32.HI R21, RZ, 0x2, R0 ; /* 0x00000002ff157819 */
/* 0x000fe20000011400 */
/*00d0*/ IMAD R18, R3.reuse, c[0x0][0x170], R2.reuse ; /* 0x00005c0003127a24 */
/* 0x14cfe200078e0202 */
/*00e0*/ MOV R19, RZ ; /* 0x000000ff00137202 */
/* 0x000fe20000000f00 */
/*00f0*/ IMAD.SHL.U32 R7, R3, 0x10, RZ ; /* 0x0000001003077824 */
/* 0x000fe200078e00ff */
/*0100*/ IADD3 R6, R21.reuse, -0x1, RZ ; /* 0xffffffff15067810 */
/* 0x040fe20007ffe0ff */
/*0110*/ IMAD.MOV.U32 R15, RZ, RZ, RZ ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e00ff */
/*0120*/ LOP3.LUT R0, R21, 0x3, RZ, 0xc0, !PT ; /* 0x0000000315007812 */
/* 0x000fe200078ec0ff */
/*0130*/ IMAD R7, R2, 0x4, R7 ; /* 0x0000000402077824 */
/* 0x000fe200078e0207 */
/*0140*/ ISETP.GE.U32.AND P1, PT, R6, 0x3, PT ; /* 0x000000030600780c */
/* 0x000fe20003f26070 */
/*0150*/ IMAD R6, R3, c[0x0][0x188], R2 ; /* 0x0000620003067a24 */
/* 0x000fe200078e0202 */
/*0160*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fc40003f05270 */
/*0170*/ SHF.R.S32.HI R16, RZ, 0x1f, R18 ; /* 0x0000001fff107819 */
/* 0x000fe40000011412 */
/*0180*/ SHF.R.S32.HI R17, RZ, 0x1f, R6 ; /* 0x0000001fff117819 */
/* 0x000fce0000011406 */
/*0190*/ @!P1 BRA 0x6e0 ; /* 0x0000054000009947 */
/* 0x000fea0003800000 */
/*01a0*/ IMAD.MOV.U32 R20, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff147624 */
/* 0x000fe200078e00ff */
/*01b0*/ HFMA2.MMA R19, -RZ, RZ, 0, 0 ; /* 0x00000000ff137435 */
/* 0x000fe200000001ff */
/*01c0*/ IADD3 R21, R21, -R0, RZ ; /* 0x8000000015157210 */
/* 0x000fe20007ffe0ff */
/*01d0*/ IMAD.MOV.U32 R15, RZ, RZ, RZ ; /* 0x000000ffff0f7224 */
/* 0x000fe400078e00ff */
/*01e0*/ IMAD.SHL.U32 R20, R20, 0x4, RZ ; /* 0x0000000414147824 */
/* 0x000fcc00078e00ff */
/*01f0*/ IMAD R8, R5, c[0x0][0x170], R19 ; /* 0x00005c0005087a24 */
/* 0x001fe400078e0213 */
/*0200*/ IMAD R10, R19, c[0x0][0x188], R4 ; /* 0x00006200130a7a24 */
/* 0x002fc600078e0204 */
/*0210*/ SHF.L.U32 R9, R8, 0x2, RZ ; /* 0x0000000208097819 */
/* 0x000fe200000006ff */
/*0220*/ IMAD.SHL.U32 R10, R10, 0x4, RZ ; /* 0x000000040a0a7824 */
/* 0x000fc600078e00ff */
/*0230*/ IADD3 R11, P1, R18, R9, RZ ; /* 0x00000009120b7210 */
/* 0x000fe40007f3e0ff */
/*0240*/ IADD3 R8, P2, R6, R10, RZ ; /* 0x0000000a06087210 */
/* 0x000fe40007f5e0ff */
/*0250*/ LEA.HI.X.SX32 R14, R9, R16, 0x1, P1 ; /* 0x00000010090e7211 */
/* 0x000fe400008f0eff */
/*0260*/ LEA.HI.X.SX32 R9, R10, R17, 0x1, P2 ; /* 0x000000110a097211 */
/* 0x000fe400010f0eff */
/*0270*/ LEA R22, P1, R11, c[0x0][0x168], 0x2 ; /* 0x00005a000b167a11 */
/* 0x000fe400078210ff */
/*0280*/ LEA R12, P2, R8, c[0x0][0x180], 0x2 ; /* 0x00006000080c7a11 */
/* 0x000fc400078410ff */
/*0290*/ LEA.HI.X R23, R11, c[0x0][0x16c], R14, 0x2, P1 ; /* 0x00005b000b177a11 */
/* 0x000fe400008f140e */
/*02a0*/ LEA.HI.X R13, R8, c[0x0][0x184], R9, 0x2, P2 ; /* 0x00006100080d7a11 */
/* 0x000fc600010f1409 */
/*02b0*/ LDG.E R14, [R22.64] ; /* 0x00000004160e7981 */
/* 0x000ea8000c1e1900 */
/*02c0*/ LDG.E R26, [R12.64] ; /* 0x000000040c1a7981 */
/* 0x000ee2000c1e1900 */
/*02d0*/ IMAD.WIDE R24, R20, 0x4, R12 ; /* 0x0000000414187825 */
/* 0x000fc600078e020c */
/*02e0*/ STS [R7], R14 ; /* 0x0000000e07007388 */
/* 0x004fe80000000800 */
/*02f0*/ STS [R7+0x40], R26 ; /* 0x0000401a07007388 */
/* 0x008fe80000000800 */
/*0300*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0310*/ LDS R27, [R2.X4+0x40] ; /* 0x00004000021b7984 */
/* 0x000fe80000004800 */
/*0320*/ LDS.128 R8, [R3.X16] ; /* 0x0000000003087984 */
/* 0x000e28000000cc00 */
/*0330*/ LDS R29, [R2.X4+0x50] ; /* 0x00005000021d7984 */
/* 0x000e620000004800 */
/*0340*/ FFMA R28, R27, R8, R15 ; /* 0x000000081b1c7223 */
/* 0x001fc6000000000f */
/*0350*/ LDS R15, [R2.X4+0x60] ; /* 0x00006000020f7984 */
/* 0x000e280000004800 */
/*0360*/ LDS R8, [R2.X4+0x70] ; /* 0x0000700002087984 */
/* 0x000ea80000004800 */
/*0370*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0380*/ LDG.E R27, [R22.64+0x10] ; /* 0x00001004161b7981 */
/* 0x000ee8000c1e1900 */
/*0390*/ LDG.E R26, [R24.64] ; /* 0x00000004181a7981 */
/* 0x000f22000c1e1900 */
/*03a0*/ FFMA R9, R29, R9, R28 ; /* 0x000000091d097223 */
/* 0x002fc8000000001c */
/*03b0*/ FFMA R9, R15, R10, R9 ; /* 0x0000000a0f097223 */
/* 0x001fc80000000009 */
/*03c0*/ FFMA R11, R8, R11, R9 ; /* 0x0000000b080b7223 */
/* 0x004fe40000000009 */
/*03d0*/ IMAD.WIDE R8, R20, 0x4, R24 ; /* 0x0000000414087825 */
/* 0x000fe200078e0218 */
/*03e0*/ STS [R7], R27 ; /* 0x0000001b07007388 */
/* 0x008fe80000000800 */
/*03f0*/ STS [R7+0x40], R26 ; /* 0x0000401a07007388 */
/* 0x010fe80000000800 */
/*0400*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0410*/ LDS R10, [R2.X4+0x40] ; /* 0x00004000020a7984 */
/* 0x000fe80000004800 */
/*0420*/ LDS.128 R12, [R3.X16] ; /* 0x00000000030c7984 */
/* 0x000e28000000cc00 */
/*0430*/ LDS R28, [R2.X4+0x50] ; /* 0x00005000021c7984 */
/* 0x000e620000004800 */
/*0440*/ FFMA R10, R10, R12, R11 ; /* 0x0000000c0a0a7223 */
/* 0x001fc6000000000b */
/*0450*/ LDS R11, [R2.X4+0x60] ; /* 0x00006000020b7984 */
/* 0x000e280000004800 */
/*0460*/ LDS R12, [R2.X4+0x70] ; /* 0x00007000020c7984 */
/* 0x000ea80000004800 */
/*0470*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0480*/ LDG.E R29, [R22.64+0x20] ; /* 0x00002004161d7981 */
/* 0x000ee8000c1e1900 */
/*0490*/ LDG.E R24, [R8.64] ; /* 0x0000000408187981 */
/* 0x000f22000c1e1900 */
/*04a0*/ FFMA R13, R28, R13, R10 ; /* 0x0000000d1c0d7223 */
/* 0x002fc8000000000a */
/*04b0*/ FFMA R14, R11, R14, R13 ; /* 0x0000000e0b0e7223 */
/* 0x001fc8000000000d */
/*04c0*/ FFMA R15, R12, R15, R14 ; /* 0x0000000f0c0f7223 */
/* 0x004fe2000000000e */
/*04d0*/ STS [R7], R29 ; /* 0x0000001d07007388 */
/* 0x008fe80000000800 */
/*04e0*/ STS [R7+0x40], R24 ; /* 0x0000401807007388 */
/* 0x0101e80000000800 */
/*04f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0500*/ IMAD.WIDE R24, R20, 0x4, R8 ; /* 0x0000000414187825 */
/* 0x001fca00078e0208 */
/*0510*/ LDS R12, [R2.X4+0x40] ; /* 0x00004000020c7984 */
/* 0x000fe80000004800 */
/*0520*/ LDS R26, [R2.X4+0x50] ; /* 0x00005000021a7984 */
/* 0x000fe80000004800 */
/*0530*/ LDS R27, [R2.X4+0x60] ; /* 0x00006000021b7984 */
/* 0x000fe80000004800 */
/*0540*/ LDS R28, [R2.X4+0x70] ; /* 0x00007000021c7984 */
/* 0x000fe80000004800 */
/*0550*/ LDS.128 R8, [R3.X16] ; /* 0x0000000003087984 */
/* 0x000e28000000cc00 */
/*0560*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0570*/ LDG.E R22, [R22.64+0x30] ; /* 0x0000300416167981 */
/* 0x000ea8000c1e1900 */
/*0580*/ LDG.E R24, [R24.64] ; /* 0x0000000418187981 */
/* 0x000ee2000c1e1900 */
/*0590*/ IADD3 R21, R21, -0x4, RZ ; /* 0xfffffffc15157810 */
/* 0x000fc40007ffe0ff */
/*05a0*/ IADD3 R19, R19, 0x4, RZ ; /* 0x0000000413137810 */
/* 0x000fe40007ffe0ff */
/*05b0*/ ISETP.NE.AND P1, PT, R21, RZ, PT ; /* 0x000000ff1500720c */
/* 0x000fe20003f25270 */
/*05c0*/ FFMA R8, R12, R8, R15 ; /* 0x000000080c087223 */
/* 0x001fc8000000000f */
/*05d0*/ FFMA R26, R26, R9, R8 ; /* 0x000000091a1a7223 */
/* 0x000fc80000000008 */
/*05e0*/ FFMA R10, R27, R10, R26 ; /* 0x0000000a1b0a7223 */
/* 0x000fc8000000001a */
/*05f0*/ FFMA R10, R28, R11, R10 ; /* 0x0000000b1c0a7223 */
/* 0x000fe2000000000a */
/*0600*/ STS [R7], R22 ; /* 0x0000001607007388 */
/* 0x004fe80000000800 */
/*0610*/ STS [R7+0x40], R24 ; /* 0x0000401807007388 */
/* 0x008fe80000000800 */
/*0620*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0630*/ LDS R29, [R2.X4+0x40] ; /* 0x00004000021d7984 */
/* 0x000fe80000004800 */
/*0640*/ LDS.128 R12, [R3.X16] ; /* 0x00000000030c7984 */
/* 0x000e28000000cc00 */
/*0650*/ LDS R23, [R2.X4+0x50] ; /* 0x0000500002177984 */
/* 0x000e680000004800 */
/*0660*/ LDS R9, [R2.X4+0x60] ; /* 0x0000600002097984 */
/* 0x000ea80000004800 */
/*0670*/ LDS R8, [R2.X4+0x70] ; /* 0x0000700002087984 */
/* 0x000ee20000004800 */
/*0680*/ FFMA R10, R29, R12, R10 ; /* 0x0000000c1d0a7223 */
/* 0x001fc8000000000a */
/*0690*/ FFMA R10, R23, R13, R10 ; /* 0x0000000d170a7223 */
/* 0x002fc8000000000a */
/*06a0*/ FFMA R9, R9, R14, R10 ; /* 0x0000000e09097223 */
/* 0x004fc8000000000a */
/*06b0*/ FFMA R15, R8, R15, R9 ; /* 0x0000000f080f7223 */
/* 0x008fe20000000009 */
/*06c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*06d0*/ @P1 BRA 0x1f0 ; /* 0xfffffb1000001947 */
/* 0x000fea000383ffff */
/*06e0*/ @!P0 BRA 0x900 ; /* 0x0000021000008947 */
/* 0x000fea0003800000 */
/*06f0*/ IMAD R14, R19, c[0x0][0x188], R4 ; /* 0x00006200130e7a24 */
/* 0x002fe400078e0204 */
/*0700*/ IMAD R19, R5, c[0x0][0x170], R19 ; /* 0x00005c0005137a24 */
/* 0x001fc600078e0213 */
/*0710*/ SHF.L.U32 R14, R14, 0x2, RZ ; /* 0x000000020e0e7819 */
/* 0x000fe200000006ff */
/*0720*/ IMAD.SHL.U32 R19, R19, 0x4, RZ ; /* 0x0000000413137824 */
/* 0x000fca00078e00ff */
/*0730*/ IADD3 R10, P0, R18, R19, RZ ; /* 0x00000013120a7210 */
/* 0x000fe40007f1e0ff */
/*0740*/ IADD3 R8, P1, R6, R14, RZ ; /* 0x0000000e06087210 */
/* 0x000fe40007f3e0ff */
/*0750*/ LEA.HI.X.SX32 R11, R19, R16, 0x1, P0 ; /* 0x00000010130b7211 */
/* 0x000fe400000f0eff */
/*0760*/ LEA.HI.X.SX32 R9, R14, R17, 0x1, P1 ; /* 0x000000110e097211 */
/* 0x000fe400008f0eff */
/*0770*/ LEA R12, P0, R10, c[0x0][0x168], 0x2 ; /* 0x00005a000a0c7a11 */
/* 0x000fe400078010ff */
/*0780*/ LEA R20, P1, R8, c[0x0][0x180], 0x2 ; /* 0x0000600008147a11 */
/* 0x000fc400078210ff */
/*0790*/ LEA.HI.X R13, R10, c[0x0][0x16c], R11, 0x2, P0 ; /* 0x00005b000a0d7a11 */
/* 0x000fe400000f140b */
/*07a0*/ LEA.HI.X R21, R8, c[0x0][0x184], R9, 0x2, P1 ; /* 0x0000610008157a11 */
/* 0x000fc600008f1409 */
/*07b0*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x0000a8000c1e1900 */
/*07c0*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000ee2000c1e1900 */
/*07d0*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */
/* 0x000fe40007ffe0ff */
/*07e0*/ IADD3 R19, R19, 0x4, RZ ; /* 0x0000000413137810 */
/* 0x000fe40007ffe0ff */
/*07f0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f05270 */
/*0800*/ MOV R13, c[0x0][0x188] ; /* 0x00006200000d7a02 */
/* 0x001fca0000000f00 */
/*0810*/ IMAD R14, R13, 0x4, R14 ; /* 0x000000040d0e7824 */
/* 0x000fe200078e020e */
/*0820*/ STS [R7], R12 ; /* 0x0000000c07007388 */
/* 0x004fe80000000800 */
/*0830*/ STS [R7+0x40], R20 ; /* 0x0000401407007388 */
/* 0x008fe80000000800 */
/*0840*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0850*/ LDS R22, [R2.X4+0x40] ; /* 0x0000400002167984 */
/* 0x000fe80000004800 */
/*0860*/ LDS.128 R8, [R3.X16] ; /* 0x0000000003087984 */
/* 0x000e28000000cc00 */
/*0870*/ LDS R23, [R2.X4+0x50] ; /* 0x0000500002177984 */
/* 0x000e680000004800 */
/*0880*/ LDS R24, [R2.X4+0x60] ; /* 0x0000600002187984 */
/* 0x000ea80000004800 */
/*0890*/ LDS R25, [R2.X4+0x70] ; /* 0x0000700002197984 */
/* 0x000ee20000004800 */
/*08a0*/ FFMA R8, R22, R8, R15 ; /* 0x0000000816087223 */
/* 0x001fc8000000000f */
/*08b0*/ FFMA R9, R23, R9, R8 ; /* 0x0000000917097223 */
/* 0x002fc80000000008 */
/*08c0*/ FFMA R10, R24, R10, R9 ; /* 0x0000000a180a7223 */
/* 0x004fc80000000009 */
/*08d0*/ FFMA R15, R25, R11, R10 ; /* 0x0000000b190f7223 */
/* 0x008fe2000000000a */
/*08e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*08f0*/ @P0 BRA 0x730 ; /* 0xfffffe3000000947 */
/* 0x000fea000383ffff */
/*0900*/ IMAD R4, R5, c[0x0][0x1a0], R4 ; /* 0x0000680005047a24 */
/* 0x003fe400078e0204 */
/*0910*/ IMAD R2, R3, c[0x0][0x1a0], R2 ; /* 0x0000680003027a24 */
/* 0x00cfc600078e0202 */
/*0920*/ SHF.L.U32 R3, R4, 0x2, RZ ; /* 0x0000000204037819 */
/* 0x000fc800000006ff */
/*0930*/ SHF.R.S32.HI R5, RZ, 0x1f, R3 ; /* 0x0000001fff057819 */
/* 0x000fe40000011403 */
/*0940*/ IADD3 R3, P0, R2, R3, RZ ; /* 0x0000000302037210 */
/* 0x000fc80007f1e0ff */
/*0950*/ LEA.HI.X.SX32 R0, R2, R5, 0x1, P0 ; /* 0x0000000502007211 */
/* 0x000fe400000f0eff */
/*0960*/ LEA R2, P0, R3, c[0x0][0x198], 0x2 ; /* 0x0000660003027a11 */
/* 0x000fc800078010ff */
/*0970*/ LEA.HI.X R3, R3, c[0x0][0x19c], R0, 0x2, P0 ; /* 0x0000670003037a11 */
/* 0x000fca00000f1400 */
/*0980*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x000fe2000c101904 */
/*0990*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*09a0*/ BRA 0x9a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*09b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15MatrixMulKernel6MatrixS_S_
.globl _Z15MatrixMulKernel6MatrixS_S_
.p2align 8
.type _Z15MatrixMulKernel6MatrixS_S_,@function
_Z15MatrixMulKernel6MatrixS_S_:
s_clause 0x1
s_load_b32 s5, s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x38
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s5, 4
s_cbranch_scc1 .LBB0_5
s_clause 0x3
s_load_b32 s10, s[0:1], 0x10
s_load_b32 s4, s[0:1], 0x28
s_load_b64 s[6:7], s[0:1], 0x8
s_load_b64 s[8:9], s[0:1], 0x20
v_lshlrev_b32_e32 v2, 2, v0
v_lshlrev_b32_e32 v3, 4, v1
s_ashr_i32 s11, s5, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
s_lshr_b32 s11, s11, 30
v_add_nc_u32_e32 v4, 64, v2
s_add_i32 s5, s5, s11
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
s_ashr_i32 s5, s5, 2
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[5:6], null, s10, v1, v[0:1]
v_mad_u64_u32 v[7:8], null, s4, v1, v[0:1]
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v8, 31, v7
v_lshlrev_b64 v[9:10], 2, v[5:6]
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v5, v3, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[11:12], 2, v[7:8]
v_add_nc_u32_e32 v6, v4, v3
v_add_co_u32 v7, vcc_lo, s6, v9
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v9, vcc_lo, s8, v11
v_add_co_ci_u32_e32 v10, vcc_lo, s9, v12, vcc_lo
s_mul_i32 s6, s10, s15
s_mov_b32 s7, 0
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_2:
s_mul_i32 s8, s7, s4
s_add_i32 s9, s7, s6
s_add_i32 s10, s8, s14
s_lshl_b32 s8, s9, 2
s_lshl_b32 s10, s10, 2
s_ashr_i32 s9, s8, 31
s_ashr_i32 s11, s10, 31
s_lshl_b64 s[8:9], s[8:9], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_add_co_u32 v11, vcc_lo, v7, s8
v_add_co_ci_u32_e32 v12, vcc_lo, s9, v8, vcc_lo
s_lshl_b64 s[8:9], s[10:11], 2
v_add_co_u32 v13, vcc_lo, v9, s8
v_add_co_ci_u32_e32 v14, vcc_lo, s9, v10, vcc_lo
global_load_b32 v12, v[11:12], off
global_load_b32 v13, v[13:14], off
v_mov_b32_e32 v11, v4
s_mov_b32 s8, 0
s_waitcnt vmcnt(1)
ds_store_b32 v5, v12
s_waitcnt vmcnt(0)
ds_store_b32 v6, v13
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_3:
v_add_nc_u32_e32 v12, s8, v3
s_add_i32 s8, s8, 4
ds_load_b32 v13, v11
ds_load_b32 v12, v12
v_add_nc_u32_e32 v11, 16, v11
s_cmp_eq_u32 s8, 16
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v2, v12, v13
s_cbranch_scc0 .LBB0_3
s_add_i32 s7, s7, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s7, s5
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_2
.LBB0_5:
s_set_inst_prefetch_distance 0x2
s_load_b32 s0, s[0:1], 0x40
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[3:4], null, s0, v1, v[0:1]
s_mul_i32 s0, s0, s15
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s0, s0, s14
s_lshl_b32 s0, s0, 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_ashr_i32 s1, s0, 31
v_ashrrev_i32_e32 v4, 31, v3
s_lshl_b64 s[0:1], s[0:1], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
s_add_u32 s0, s2, s0
s_addc_u32 s1, s3, s1
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15MatrixMulKernel6MatrixS_S_
.amdhsa_group_segment_fixed_size 128
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 72
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15MatrixMulKernel6MatrixS_S_, .Lfunc_end0-_Z15MatrixMulKernel6MatrixS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 24
.value_kind: by_value
- .offset: 24
.size: 24
.value_kind: by_value
- .offset: 48
.size: 24
.value_kind: by_value
.group_segment_fixed_size: 128
.kernarg_segment_align: 8
.kernarg_segment_size: 72
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15MatrixMulKernel6MatrixS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15MatrixMulKernel6MatrixS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //
// Created by heidies on 7/7/18.
//
#include <cuda_runtime.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
__global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, const int nx, const int ny){
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * nx + ix;
if(ix < nx && iy < ny)
C[idx] = A[idx] + B[idx];
}
void initialData(float* ip, unsigned long long size){
time_t t;
srand((unsigned)time(&t));
for(unsigned long long i = 0; i < size; ++i){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6) * 1e+3;
}
int main(int argc, char **argv){
int nx = 1 << 14;
int ny = 1 << 14;
unsigned long long size = nx * ny;
size_t nBytes = size * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
h_C = (float*)malloc(nBytes);
initialData(h_A, size);
initialData(h_B, size);
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, nBytes, cudaMemcpyHostToDevice);
int blockdimx = 32;
int blockdimy = 16;
if(argc > 2){
blockdimx = atoi(argv[1]);
blockdimy = atoi(argv[2]);
}
dim3 block(blockdimx, blockdimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
double iStart, iElaps;
iStart = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), " << "(" << block.x << ", " << block.y << ") >>> " <<
"elapsed " << iElaps << " ms" << endl;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
} | code for sm_80
Function : _Z16sumMatrixOnGPU2DPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */
/* 0x000fe200078e0200 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*00e0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe400078e0207 */
/*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0207 */
/*0120*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //
// Created by heidies on 7/7/18.
//
#include <cuda_runtime.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
__global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, const int nx, const int ny){
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * nx + ix;
if(ix < nx && iy < ny)
C[idx] = A[idx] + B[idx];
}
void initialData(float* ip, unsigned long long size){
time_t t;
srand((unsigned)time(&t));
for(unsigned long long i = 0; i < size; ++i){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6) * 1e+3;
}
int main(int argc, char **argv){
int nx = 1 << 14;
int ny = 1 << 14;
unsigned long long size = nx * ny;
size_t nBytes = size * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
h_C = (float*)malloc(nBytes);
initialData(h_A, size);
initialData(h_B, size);
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, nBytes, cudaMemcpyHostToDevice);
int blockdimx = 32;
int blockdimy = 16;
if(argc > 2){
blockdimx = atoi(argv[1]);
blockdimy = atoi(argv[2]);
}
dim3 block(blockdimx, blockdimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
double iStart, iElaps;
iStart = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), " << "(" << block.x << ", " << block.y << ") >>> " <<
"elapsed " << iElaps << " ms" << endl;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
} | .file "tmpxft_0005f016_00000000-6_sumMatrixOnGPU2D.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11initialDataPfy
.type _Z11initialDataPfy, @function
_Z11initialDataPfy:
.LFB3669:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $16, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %r12
movq %rsi, %rbp
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
call time@PLT
movl %eax, %edi
call srand@PLT
testq %rbp, %rbp
je .L3
movl $0, %ebx
.L5:
call rand@PLT
movzbl %al, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC0(%rip), %xmm0
movss %xmm0, (%r12,%rbx,4)
addq $1, %rbx
cmpq %rbx, %rbp
jne .L5
.L3:
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L9
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size _Z11initialDataPfy, .-_Z11initialDataPfy
.globl _Z9cpuSecondv
.type _Z9cpuSecondv, @function
_Z9cpuSecondv:
.LFB3670:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC1(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
mulsd .LC2(%rip), %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L13
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size _Z9cpuSecondv, .-_Z9cpuSecondv
.globl _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
.type _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii, @function
_Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii:
.LFB3696:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L18
.L14:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16sumMatrixOnGPU2DPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L14
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii, .-_Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
.globl _Z16sumMatrixOnGPU2DPfS_S_ii
.type _Z16sumMatrixOnGPU2DPfS_S_ii, @function
_Z16sumMatrixOnGPU2DPfS_S_ii:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z16sumMatrixOnGPU2DPfS_S_ii, .-_Z16sumMatrixOnGPU2DPfS_S_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "sumMatrixOnGPU2D <<< ("
.LC4:
.string ", "
.LC5:
.string "), "
.LC6:
.string "("
.LC7:
.string ") >>> "
.LC8:
.string "elapsed "
.LC9:
.string " ms"
.text
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movl %edi, %ebp
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $1073741824, %edi
call malloc@PLT
movq %rax, %r13
movl $1073741824, %edi
call malloc@PLT
movq %rax, %r12
movl $1073741824, %edi
call malloc@PLT
movq %rax, %r15
movq %rax, 16(%rsp)
movl $268435456, %esi
movq %r13, %rdi
call _Z11initialDataPfy
movl $268435456, %esi
movq %r12, %rdi
call _Z11initialDataPfy
leaq 40(%rsp), %rdi
movl $1073741824, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movl $1073741824, %esi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movl $1073741824, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $1073741824, %edx
movq %r13, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $1073741824, %edx
movq %r12, %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $1073741824, %edx
movq %r15, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $16, %r14d
movl $32, %r15d
cmpl $2, %ebp
jg .L28
.L23:
movl %r14d, 8(%rsp)
movl %r15d, 12(%rsp)
movl $1, 72(%rsp)
leal 16383(%r14), %eax
movl $0, %edx
divl %r14d
movl %eax, %ebx
leal 16383(%r15), %eax
movl $0, %edx
divl %r15d
movl %eax, %ebp
movl $1, 84(%rsp)
call _Z9cpuSecondv
movsd %xmm0, 24(%rsp)
movl %ebp, 76(%rsp)
movl %ebx, 80(%rsp)
movl %r15d, 64(%rsp)
movl %r14d, 68(%rsp)
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl 84(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L24:
call cudaDeviceSynchronize@PLT
call _Z9cpuSecondv
subsd 24(%rsp), %xmm0
movq %xmm0, %r14
leaq .LC3(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rbp
movq %rbp, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebx, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC5(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC6(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 12(%rsp), %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
movq %rbp, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 8(%rsp), %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC7(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC8(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r14, %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC9(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 16(%rsp), %rdi
call free@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L30
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r15d
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r14d
jmp .L23
.L29:
movl $16384, %r8d
movl $16384, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
jmp .L24
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z16sumMatrixOnGPU2DPfS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z16sumMatrixOnGPU2DPfS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1092616192
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long -1598689907
.long 1051772663
.align 8
.LC2:
.long 0
.long 1083129856
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //
// Created by heidies on 7/7/18.
//
#include <cuda_runtime.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
__global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, const int nx, const int ny){
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * nx + ix;
if(ix < nx && iy < ny)
C[idx] = A[idx] + B[idx];
}
void initialData(float* ip, unsigned long long size){
time_t t;
srand((unsigned)time(&t));
for(unsigned long long i = 0; i < size; ++i){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6) * 1e+3;
}
int main(int argc, char **argv){
int nx = 1 << 14;
int ny = 1 << 14;
unsigned long long size = nx * ny;
size_t nBytes = size * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
h_C = (float*)malloc(nBytes);
initialData(h_A, size);
initialData(h_B, size);
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, nBytes, cudaMemcpyHostToDevice);
int blockdimx = 32;
int blockdimy = 16;
if(argc > 2){
blockdimx = atoi(argv[1]);
blockdimy = atoi(argv[2]);
}
dim3 block(blockdimx, blockdimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
double iStart, iElaps;
iStart = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), " << "(" << block.x << ", " << block.y << ") >>> " <<
"elapsed " << iElaps << " ms" << endl;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
} | //
// Created by heidies on 7/7/18.
//
#include <hip/hip_runtime.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
__global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, const int nx, const int ny){
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * nx + ix;
if(ix < nx && iy < ny)
C[idx] = A[idx] + B[idx];
}
void initialData(float* ip, unsigned long long size){
time_t t;
srand((unsigned)time(&t));
for(unsigned long long i = 0; i < size; ++i){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6) * 1e+3;
}
int main(int argc, char **argv){
int nx = 1 << 14;
int ny = 1 << 14;
unsigned long long size = nx * ny;
size_t nBytes = size * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
h_C = (float*)malloc(nBytes);
initialData(h_A, size);
initialData(h_B, size);
float *d_A, *d_B, *d_C;
hipMalloc((float**)&d_A, nBytes);
hipMalloc((float**)&d_B, nBytes);
hipMalloc((float**)&d_C, nBytes);
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, nBytes, hipMemcpyHostToDevice);
int blockdimx = 32;
int blockdimy = 16;
if(argc > 2){
blockdimx = atoi(argv[1]);
blockdimy = atoi(argv[2]);
}
dim3 block(blockdimx, blockdimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
double iStart, iElaps;
iStart = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(d_A, d_B, d_C, nx, ny);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), " << "(" << block.x << ", " << block.y << ") >>> " <<
"elapsed " << iElaps << " ms" << endl;
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //
// Created by heidies on 7/7/18.
//
#include <hip/hip_runtime.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
__global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, const int nx, const int ny){
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * nx + ix;
if(ix < nx && iy < ny)
C[idx] = A[idx] + B[idx];
}
void initialData(float* ip, unsigned long long size){
time_t t;
srand((unsigned)time(&t));
for(unsigned long long i = 0; i < size; ++i){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6) * 1e+3;
}
int main(int argc, char **argv){
int nx = 1 << 14;
int ny = 1 << 14;
unsigned long long size = nx * ny;
size_t nBytes = size * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
h_C = (float*)malloc(nBytes);
initialData(h_A, size);
initialData(h_B, size);
float *d_A, *d_B, *d_C;
hipMalloc((float**)&d_A, nBytes);
hipMalloc((float**)&d_B, nBytes);
hipMalloc((float**)&d_C, nBytes);
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, nBytes, hipMemcpyHostToDevice);
int blockdimx = 32;
int blockdimy = 16;
if(argc > 2){
blockdimx = atoi(argv[1]);
blockdimy = atoi(argv[2]);
}
dim3 block(blockdimx, blockdimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
double iStart, iElaps;
iStart = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(d_A, d_B, d_C, nx, ny);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), " << "(" << block.x << ", " << block.y << ") >>> " <<
"elapsed " << iElaps << " ms" << endl;
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16sumMatrixOnGPU2DPfS_S_ii
.globl _Z16sumMatrixOnGPU2DPfS_S_ii
.p2align 8
.type _Z16sumMatrixOnGPU2DPfS_S_ii,@function
_Z16sumMatrixOnGPU2DPfS_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s5, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b128 s[8:11], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v1, s4, v[0:1]
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s8, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16sumMatrixOnGPU2DPfS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16sumMatrixOnGPU2DPfS_S_ii, .Lfunc_end0-_Z16sumMatrixOnGPU2DPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16sumMatrixOnGPU2DPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16sumMatrixOnGPU2DPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //
// Created by heidies on 7/7/18.
//
#include <hip/hip_runtime.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
__global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, const int nx, const int ny){
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * nx + ix;
if(ix < nx && iy < ny)
C[idx] = A[idx] + B[idx];
}
void initialData(float* ip, unsigned long long size){
time_t t;
srand((unsigned)time(&t));
for(unsigned long long i = 0; i < size; ++i){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6) * 1e+3;
}
int main(int argc, char **argv){
int nx = 1 << 14;
int ny = 1 << 14;
unsigned long long size = nx * ny;
size_t nBytes = size * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
h_C = (float*)malloc(nBytes);
initialData(h_A, size);
initialData(h_B, size);
float *d_A, *d_B, *d_C;
hipMalloc((float**)&d_A, nBytes);
hipMalloc((float**)&d_B, nBytes);
hipMalloc((float**)&d_C, nBytes);
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, nBytes, hipMemcpyHostToDevice);
int blockdimx = 32;
int blockdimy = 16;
if(argc > 2){
blockdimx = atoi(argv[1]);
blockdimy = atoi(argv[2]);
}
dim3 block(blockdimx, blockdimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
double iStart, iElaps;
iStart = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(d_A, d_B, d_C, nx, ny);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), " << "(" << block.x << ", " << block.y << ") >>> " <<
"elapsed " << iElaps << " ms" << endl;
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
} | .text
.file "sumMatrixOnGPU2D.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii # -- Begin function _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.p2align 4, 0x90
.type _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii,@function
_Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii: # @_Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16sumMatrixOnGPU2DPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii, .Lfunc_end0-_Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z11initialDataPfy
.LCPI1_0:
.long 0x41200000 # float 10
.text
.globl _Z11initialDataPfy
.p2align 4, 0x90
.type _Z11initialDataPfy,@function
_Z11initialDataPfy: # @_Z11initialDataPfy
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $16, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movq %rdi, %r14
leaq 8(%rsp), %rdi
callq time
movl %eax, %edi
callq srand
testq %rbx, %rbx
je .LBB1_3
# %bb.1: # %.lr.ph.preheader
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI1_0(%rip), %xmm0
movss %xmm0, (%r14,%r15,4)
incq %r15
cmpq %r15, %rbx
jne .LBB1_2
.LBB1_3: # %._crit_edge
addq $16, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z11initialDataPfy, .Lfunc_end1-_Z11initialDataPfy
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z9cpuSecondv
.LCPI2_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.LCPI2_1:
.quad 0x408f400000000000 # double 1000
.text
.globl _Z9cpuSecondv
.p2align 4, 0x90
.type _Z9cpuSecondv,@function
_Z9cpuSecondv: # @_Z9cpuSecondv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI2_0(%rip), %xmm0
addsd %xmm1, %xmm0
mulsd .LCPI2_1(%rip), %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z9cpuSecondv, .Lfunc_end2-_Z9cpuSecondv
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x41200000 # float 10
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI3_1:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.LCPI3_2:
.quad 0x408f400000000000 # double 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r12
movl %edi, %ebp
movl $1073741824, %edi # imm = 0x40000000
callq malloc
movq %rax, %rbx
movl $1073741824, %edi # imm = 0x40000000
callq malloc
movq %rax, %r14
movl $1073741824, %edi # imm = 0x40000000
callq malloc
movq %rax, %r13
leaq 48(%rsp), %rdi
callq time
movl %eax, %edi
callq srand
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI3_0(%rip), %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq $268435456, %r15 # imm = 0x10000000
jne .LBB3_1
# %bb.2: # %_Z11initialDataPfy.exit
leaq 48(%rsp), %rdi
callq time
movl %eax, %edi
callq srand
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_3: # %.lr.ph.i40
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI3_0(%rip), %xmm0
movss %xmm0, (%r14,%r15,4)
incq %r15
cmpq $268435456, %r15 # imm = 0x10000000
jne .LBB3_3
# %bb.4: # %_Z11initialDataPfy.exit43
leaq 32(%rsp), %rdi
movl $1073741824, %esi # imm = 0x40000000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $1073741824, %esi # imm = 0x40000000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $1073741824, %esi # imm = 0x40000000
callq hipMalloc
movq 32(%rsp), %rdi
movl $1073741824, %edx # imm = 0x40000000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $1073741824, %edx # imm = 0x40000000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $1073741824, %edx # imm = 0x40000000
movq %r13, %rsi
movl $1, %ecx
callq hipMemcpy
cmpl $3, %ebp
movq %r13, 96(%rsp) # 8-byte Spill
jl .LBB3_5
# %bb.6:
movq 8(%r12), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
movq 16(%r12), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
movl %r15d, %eax
shlq $32, %r12
orq %rax, %r12
jmp .LBB3_7
.LBB3_5:
movabsq $68719476768, %r12 # imm = 0x1000000020
.LBB3_7:
leal 16383(%r12), %eax
xorl %edx, %edx
divl %r12d
movl %eax, %r15d
movq %r12, %r13
shrq $32, %r13
leal 16383(%r13), %eax
xorl %edx, %edx
divl %r13d
movl %eax, %ebp
movq %rbp, 104(%rsp) # 8-byte Spill
shlq $32, %rbp
orq %r15, %rbp
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 48(%rsp), %xmm0
cvtsi2sdq 56(%rsp), %xmm1
mulsd .LCPI3_1(%rip), %xmm1
addsd %xmm0, %xmm1
mulsd .LCPI3_2(%rip), %xmm1
movsd %xmm1, 8(%rsp) # 8-byte Spill
movq %rbp, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_9
# %bb.8:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 176(%rsp)
movq %rcx, 168(%rsp)
movq %rdx, 160(%rsp)
movl $16384, 44(%rsp) # imm = 0x4000
movl $16384, 40(%rsp) # imm = 0x4000
leaq 176(%rsp), %rax
movq %rax, 48(%rsp)
leaq 168(%rsp), %rax
movq %rax, 56(%rsp)
leaq 160(%rsp), %rax
movq %rax, 64(%rsp)
leaq 44(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rax
movq %rax, 80(%rsp)
leaq 144(%rsp), %rdi
leaq 128(%rsp), %rsi
leaq 120(%rsp), %rdx
leaq 112(%rsp), %rcx
callq __hipPopCallConfiguration
movq 144(%rsp), %rsi
movl 152(%rsp), %edx
movq 128(%rsp), %rcx
movl 136(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z16sumMatrixOnGPU2DPfS_S_ii, %edi
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
pushq 128(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_9:
callq hipDeviceSynchronize
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 48(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 56(%rsp), %xmm1
mulsd .LCPI3_1(%rip), %xmm1
addsd %xmm0, %xmm1
mulsd .LCPI3_2(%rip), %xmm1
subsd 8(%rsp), %xmm1 # 8-byte Folded Reload
movsd %xmm1, 8(%rsp) # 8-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $22, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %r15d, %esi
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r15
movl $.L.str.1, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 104(%rsp), %esi # 4-byte Reload
movq %r15, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r15
movl $.L.str.2, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.3, %esi
movl $1, %edx
movq %r15, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %r12d, %esi
movq %r15, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r15
movl $.L.str.1, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r15, %rdi
movq %r13, %rsi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r15
movl $.L.str.4, %esi
movl $6, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.5, %esi
movl $8, %edx
movq %r15, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r15, %rdi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %r15
movl $.L.str.6, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%r15), %rax
movq -24(%rax), %rax
movq 240(%r15,%rax), %r12
testq %r12, %r12
je .LBB3_14
# %bb.10: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r12)
movq 96(%rsp), %r13 # 8-byte Reload
je .LBB3_12
# %bb.11:
movzbl 67(%r12), %eax
jmp .LBB3_13
.LBB3_12:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB3_13: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movq %r15, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r13, %rdi
callq free
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_14:
.cfi_def_cfa_offset 240
callq _ZSt16__throw_bad_castv
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16sumMatrixOnGPU2DPfS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16sumMatrixOnGPU2DPfS_S_ii,@object # @_Z16sumMatrixOnGPU2DPfS_S_ii
.section .rodata,"a",@progbits
.globl _Z16sumMatrixOnGPU2DPfS_S_ii
.p2align 3, 0x0
_Z16sumMatrixOnGPU2DPfS_S_ii:
.quad _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.size _Z16sumMatrixOnGPU2DPfS_S_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "sumMatrixOnGPU2D <<< ("
.size .L.str, 23
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz ", "
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "), "
.size .L.str.2, 4
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "("
.size .L.str.3, 2
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz ") >>> "
.size .L.str.4, 7
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "elapsed "
.size .L.str.5, 9
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz " ms"
.size .L.str.6, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16sumMatrixOnGPU2DPfS_S_ii"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16sumMatrixOnGPU2DPfS_S_ii
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z16sumMatrixOnGPU2DPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */
/* 0x000fe200078e0200 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*00e0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe400078e0207 */
/*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0207 */
/*0120*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16sumMatrixOnGPU2DPfS_S_ii
.globl _Z16sumMatrixOnGPU2DPfS_S_ii
.p2align 8
.type _Z16sumMatrixOnGPU2DPfS_S_ii,@function
_Z16sumMatrixOnGPU2DPfS_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s5, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b128 s[8:11], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v1, s4, v[0:1]
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s8, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16sumMatrixOnGPU2DPfS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16sumMatrixOnGPU2DPfS_S_ii, .Lfunc_end0-_Z16sumMatrixOnGPU2DPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16sumMatrixOnGPU2DPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16sumMatrixOnGPU2DPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0005f016_00000000-6_sumMatrixOnGPU2D.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11initialDataPfy
.type _Z11initialDataPfy, @function
_Z11initialDataPfy:
.LFB3669:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $16, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %r12
movq %rsi, %rbp
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
call time@PLT
movl %eax, %edi
call srand@PLT
testq %rbp, %rbp
je .L3
movl $0, %ebx
.L5:
call rand@PLT
movzbl %al, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC0(%rip), %xmm0
movss %xmm0, (%r12,%rbx,4)
addq $1, %rbx
cmpq %rbx, %rbp
jne .L5
.L3:
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L9
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size _Z11initialDataPfy, .-_Z11initialDataPfy
.globl _Z9cpuSecondv
.type _Z9cpuSecondv, @function
_Z9cpuSecondv:
.LFB3670:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC1(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
mulsd .LC2(%rip), %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L13
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size _Z9cpuSecondv, .-_Z9cpuSecondv
.globl _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
.type _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii, @function
_Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii:
.LFB3696:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L18
.L14:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16sumMatrixOnGPU2DPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L14
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii, .-_Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
.globl _Z16sumMatrixOnGPU2DPfS_S_ii
.type _Z16sumMatrixOnGPU2DPfS_S_ii, @function
_Z16sumMatrixOnGPU2DPfS_S_ii:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z16sumMatrixOnGPU2DPfS_S_ii, .-_Z16sumMatrixOnGPU2DPfS_S_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "sumMatrixOnGPU2D <<< ("
.LC4:
.string ", "
.LC5:
.string "), "
.LC6:
.string "("
.LC7:
.string ") >>> "
.LC8:
.string "elapsed "
.LC9:
.string " ms"
.text
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movl %edi, %ebp
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $1073741824, %edi
call malloc@PLT
movq %rax, %r13
movl $1073741824, %edi
call malloc@PLT
movq %rax, %r12
movl $1073741824, %edi
call malloc@PLT
movq %rax, %r15
movq %rax, 16(%rsp)
movl $268435456, %esi
movq %r13, %rdi
call _Z11initialDataPfy
movl $268435456, %esi
movq %r12, %rdi
call _Z11initialDataPfy
leaq 40(%rsp), %rdi
movl $1073741824, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movl $1073741824, %esi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movl $1073741824, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $1073741824, %edx
movq %r13, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $1073741824, %edx
movq %r12, %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $1073741824, %edx
movq %r15, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $16, %r14d
movl $32, %r15d
cmpl $2, %ebp
jg .L28
.L23:
movl %r14d, 8(%rsp)
movl %r15d, 12(%rsp)
movl $1, 72(%rsp)
leal 16383(%r14), %eax
movl $0, %edx
divl %r14d
movl %eax, %ebx
leal 16383(%r15), %eax
movl $0, %edx
divl %r15d
movl %eax, %ebp
movl $1, 84(%rsp)
call _Z9cpuSecondv
movsd %xmm0, 24(%rsp)
movl %ebp, 76(%rsp)
movl %ebx, 80(%rsp)
movl %r15d, 64(%rsp)
movl %r14d, 68(%rsp)
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl 84(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L24:
call cudaDeviceSynchronize@PLT
call _Z9cpuSecondv
subsd 24(%rsp), %xmm0
movq %xmm0, %r14
leaq .LC3(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rbp
movq %rbp, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebx, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC5(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC6(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 12(%rsp), %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
movq %rbp, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 8(%rsp), %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC7(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC8(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r14, %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC9(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 16(%rsp), %rdi
call free@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L30
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r15d
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r14d
jmp .L23
.L29:
movl $16384, %r8d
movl $16384, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
jmp .L24
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z16sumMatrixOnGPU2DPfS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z16sumMatrixOnGPU2DPfS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1092616192
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long -1598689907
.long 1051772663
.align 8
.LC2:
.long 0
.long 1083129856
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sumMatrixOnGPU2D.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii # -- Begin function _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.p2align 4, 0x90
.type _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii,@function
_Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii: # @_Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16sumMatrixOnGPU2DPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii, .Lfunc_end0-_Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z11initialDataPfy
.LCPI1_0:
.long 0x41200000 # float 10
.text
.globl _Z11initialDataPfy
.p2align 4, 0x90
.type _Z11initialDataPfy,@function
_Z11initialDataPfy: # @_Z11initialDataPfy
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $16, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movq %rdi, %r14
leaq 8(%rsp), %rdi
callq time
movl %eax, %edi
callq srand
testq %rbx, %rbx
je .LBB1_3
# %bb.1: # %.lr.ph.preheader
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI1_0(%rip), %xmm0
movss %xmm0, (%r14,%r15,4)
incq %r15
cmpq %r15, %rbx
jne .LBB1_2
.LBB1_3: # %._crit_edge
addq $16, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z11initialDataPfy, .Lfunc_end1-_Z11initialDataPfy
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z9cpuSecondv
.LCPI2_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.LCPI2_1:
.quad 0x408f400000000000 # double 1000
.text
.globl _Z9cpuSecondv
.p2align 4, 0x90
.type _Z9cpuSecondv,@function
_Z9cpuSecondv: # @_Z9cpuSecondv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI2_0(%rip), %xmm0
addsd %xmm1, %xmm0
mulsd .LCPI2_1(%rip), %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z9cpuSecondv, .Lfunc_end2-_Z9cpuSecondv
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x41200000 # float 10
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI3_1:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.LCPI3_2:
.quad 0x408f400000000000 # double 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r12
movl %edi, %ebp
movl $1073741824, %edi # imm = 0x40000000
callq malloc
movq %rax, %rbx
movl $1073741824, %edi # imm = 0x40000000
callq malloc
movq %rax, %r14
movl $1073741824, %edi # imm = 0x40000000
callq malloc
movq %rax, %r13
leaq 48(%rsp), %rdi
callq time
movl %eax, %edi
callq srand
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI3_0(%rip), %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq $268435456, %r15 # imm = 0x10000000
jne .LBB3_1
# %bb.2: # %_Z11initialDataPfy.exit
leaq 48(%rsp), %rdi
callq time
movl %eax, %edi
callq srand
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_3: # %.lr.ph.i40
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI3_0(%rip), %xmm0
movss %xmm0, (%r14,%r15,4)
incq %r15
cmpq $268435456, %r15 # imm = 0x10000000
jne .LBB3_3
# %bb.4: # %_Z11initialDataPfy.exit43
leaq 32(%rsp), %rdi
movl $1073741824, %esi # imm = 0x40000000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $1073741824, %esi # imm = 0x40000000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $1073741824, %esi # imm = 0x40000000
callq hipMalloc
movq 32(%rsp), %rdi
movl $1073741824, %edx # imm = 0x40000000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $1073741824, %edx # imm = 0x40000000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $1073741824, %edx # imm = 0x40000000
movq %r13, %rsi
movl $1, %ecx
callq hipMemcpy
cmpl $3, %ebp
movq %r13, 96(%rsp) # 8-byte Spill
jl .LBB3_5
# %bb.6:
movq 8(%r12), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
movq 16(%r12), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
movl %r15d, %eax
shlq $32, %r12
orq %rax, %r12
jmp .LBB3_7
.LBB3_5:
movabsq $68719476768, %r12 # imm = 0x1000000020
.LBB3_7:
leal 16383(%r12), %eax
xorl %edx, %edx
divl %r12d
movl %eax, %r15d
movq %r12, %r13
shrq $32, %r13
leal 16383(%r13), %eax
xorl %edx, %edx
divl %r13d
movl %eax, %ebp
movq %rbp, 104(%rsp) # 8-byte Spill
shlq $32, %rbp
orq %r15, %rbp
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 48(%rsp), %xmm0
cvtsi2sdq 56(%rsp), %xmm1
mulsd .LCPI3_1(%rip), %xmm1
addsd %xmm0, %xmm1
mulsd .LCPI3_2(%rip), %xmm1
movsd %xmm1, 8(%rsp) # 8-byte Spill
movq %rbp, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_9
# %bb.8:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 176(%rsp)
movq %rcx, 168(%rsp)
movq %rdx, 160(%rsp)
movl $16384, 44(%rsp) # imm = 0x4000
movl $16384, 40(%rsp) # imm = 0x4000
leaq 176(%rsp), %rax
movq %rax, 48(%rsp)
leaq 168(%rsp), %rax
movq %rax, 56(%rsp)
leaq 160(%rsp), %rax
movq %rax, 64(%rsp)
leaq 44(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rax
movq %rax, 80(%rsp)
leaq 144(%rsp), %rdi
leaq 128(%rsp), %rsi
leaq 120(%rsp), %rdx
leaq 112(%rsp), %rcx
callq __hipPopCallConfiguration
movq 144(%rsp), %rsi
movl 152(%rsp), %edx
movq 128(%rsp), %rcx
movl 136(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z16sumMatrixOnGPU2DPfS_S_ii, %edi
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
pushq 128(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_9:
callq hipDeviceSynchronize
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 48(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 56(%rsp), %xmm1
mulsd .LCPI3_1(%rip), %xmm1
addsd %xmm0, %xmm1
mulsd .LCPI3_2(%rip), %xmm1
subsd 8(%rsp), %xmm1 # 8-byte Folded Reload
movsd %xmm1, 8(%rsp) # 8-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $22, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %r15d, %esi
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r15
movl $.L.str.1, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 104(%rsp), %esi # 4-byte Reload
movq %r15, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r15
movl $.L.str.2, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.3, %esi
movl $1, %edx
movq %r15, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl %r12d, %esi
movq %r15, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r15
movl $.L.str.1, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r15, %rdi
movq %r13, %rsi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r15
movl $.L.str.4, %esi
movl $6, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.5, %esi
movl $8, %edx
movq %r15, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r15, %rdi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %r15
movl $.L.str.6, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%r15), %rax
movq -24(%rax), %rax
movq 240(%r15,%rax), %r12
testq %r12, %r12
je .LBB3_14
# %bb.10: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r12)
movq 96(%rsp), %r13 # 8-byte Reload
je .LBB3_12
# %bb.11:
movzbl 67(%r12), %eax
jmp .LBB3_13
.LBB3_12:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB3_13: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movq %r15, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r13, %rdi
callq free
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_14:
.cfi_def_cfa_offset 240
callq _ZSt16__throw_bad_castv
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16sumMatrixOnGPU2DPfS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16sumMatrixOnGPU2DPfS_S_ii,@object # @_Z16sumMatrixOnGPU2DPfS_S_ii
.section .rodata,"a",@progbits
.globl _Z16sumMatrixOnGPU2DPfS_S_ii
.p2align 3, 0x0
_Z16sumMatrixOnGPU2DPfS_S_ii:
.quad _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.size _Z16sumMatrixOnGPU2DPfS_S_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "sumMatrixOnGPU2D <<< ("
.size .L.str, 23
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz ", "
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "), "
.size .L.str.2, 4
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "("
.size .L.str.3, 2
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz ") >>> "
.size .L.str.4, 7
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "elapsed "
.size .L.str.5, 9
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz " ms"
.size .L.str.6, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16sumMatrixOnGPU2DPfS_S_ii"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16sumMatrixOnGPU2DPfS_S_ii
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <iostream>
int main(void){
int xs[3] = {2,3,1};
thrust::device_vector<int> d_xs(xs, xs+3);
thrust::sort_by_key(d_xs.begin(), d_xs.end(), d_xs.begin());
std::cout << d_xs[0] << d_xs[1] << d_xs[2] << std::endl;
return 0;
} | #include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <iostream>
int main(void){
int xs[3] = {2,3,1};
thrust::device_vector<int> d_xs(xs, xs+3);
thrust::sort_by_key(d_xs.begin(), d_xs.end(), d_xs.begin());
std::cout << d_xs[0] << d_xs[1] << d_xs[2] << std::endl;
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda.h>
#include <iostream>
__global__
void addKernel(int* A_d, int* B_d, int*C_d); //vector addition(device code)
void vecAdd(int* A, int* B, int* C, int n); //loading, transfer, execution(host code)
int main(void){
const int SIZE=5;
int a[SIZE]={1,2,3,4,5};
int b[SIZE]={10,20,30,40,50};
int c[SIZE]={0};
vecAdd(a,b,c,5);
}
// Compute vector sum C = A+B
// Each thread performs one pairwise addition
__global__
void addKernel(int* A_d, int* B_d, int*C_d)
{
// each thread knows its own index
int i = threadIdx.x;
C_d[i] = A_d[i] + B_d[i];
}
void vecAdd(int* A, int* B, int* C, int n)
{
int size = n * sizeof(int);
int* A_d=0;
int* B_d=0;
int* C_d=0;
// Allocate device memory
cudaMalloc((void **) &A_d, size);
cudaMalloc((void **) &B_d, size);
cudaMalloc((void **) &C_d, size);
// Transfer A and B to device memory
cudaMemcpy(A_d, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, size, cudaMemcpyHostToDevice);
//configure grid ==> <<<number of thread blocks within grid, number of threads in each thread block>>>
addKernel<<<1, size>>>(A_d, B_d, C_d);
// Transfer C from device to host
cudaMemcpy(C, C_d, size, cudaMemcpyDeviceToHost);
// Free device memory for A, B, C
cudaFree(A_d); cudaFree(B_d); cudaFree (C_d);
} | code for sm_80
Function : _Z9addKernelPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <iostream>
__global__
void addKernel(int* A_d, int* B_d, int*C_d); //vector addition(device code)
void vecAdd(int* A, int* B, int* C, int n); //loading, transfer, execution(host code)
int main(void){
const int SIZE=5;
int a[SIZE]={1,2,3,4,5};
int b[SIZE]={10,20,30,40,50};
int c[SIZE]={0};
vecAdd(a,b,c,5);
}
// Compute vector sum C = A+B
// Each thread performs one pairwise addition
__global__
void addKernel(int* A_d, int* B_d, int*C_d)
{
// each thread knows its own index
int i = threadIdx.x;
C_d[i] = A_d[i] + B_d[i];
}
void vecAdd(int* A, int* B, int* C, int n)
{
int size = n * sizeof(int);
int* A_d=0;
int* B_d=0;
int* C_d=0;
// Allocate device memory
cudaMalloc((void **) &A_d, size);
cudaMalloc((void **) &B_d, size);
cudaMalloc((void **) &C_d, size);
// Transfer A and B to device memory
cudaMemcpy(A_d, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, size, cudaMemcpyHostToDevice);
//configure grid ==> <<<number of thread blocks within grid, number of threads in each thread block>>>
addKernel<<<1, size>>>(A_d, B_d, C_d);
// Transfer C from device to host
cudaMemcpy(C, C_d, size, cudaMemcpyDeviceToHost);
// Free device memory for A, B, C
cudaFree(A_d); cudaFree(B_d); cudaFree (C_d);
} | .file "tmpxft_0015dc9f_00000000-6_vecAdd.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9addKernelPiS_S_PiS_S_
.type _Z32__device_stub__Z9addKernelPiS_S_PiS_S_, @function
_Z32__device_stub__Z9addKernelPiS_S_PiS_S_:
.LFB3695:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9addKernelPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z32__device_stub__Z9addKernelPiS_S_PiS_S_, .-_Z32__device_stub__Z9addKernelPiS_S_PiS_S_
.globl _Z9addKernelPiS_S_
.type _Z9addKernelPiS_S_, @function
_Z9addKernelPiS_S_:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9addKernelPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z9addKernelPiS_S_, .-_Z9addKernelPiS_S_
.globl _Z6vecAddPiS_S_i
.type _Z6vecAddPiS_S_i, @function
_Z6vecAddPiS_S_i:
.LFB3670:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r14
movq %rsi, %r13
movq %rdx, %r12
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leal 0(,%rcx,4), %ebp
movq $0, 8(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movslq %ebp, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebp, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z32__device_stub__Z9addKernelPiS_S_PiS_S_
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size _Z6vecAddPiS_S_i, .-_Z6vecAddPiS_S_i
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $1, (%rsp)
movl $2, 4(%rsp)
movl $3, 8(%rsp)
movl $4, 12(%rsp)
movl $5, 16(%rsp)
movl $10, 32(%rsp)
movl $20, 36(%rsp)
movl $30, 40(%rsp)
movl $40, 44(%rsp)
movl $50, 48(%rsp)
pxor %xmm0, %xmm0
movaps %xmm0, 64(%rsp)
movl $0, 80(%rsp)
leaq 64(%rsp), %rdx
leaq 32(%rsp), %rsi
movq %rsp, %rdi
movl $5, %ecx
call _Z6vecAddPiS_S_i
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9addKernelPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9addKernelPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <iostream>
__global__
void addKernel(int* A_d, int* B_d, int*C_d); //vector addition(device code)
void vecAdd(int* A, int* B, int* C, int n); //loading, transfer, execution(host code)
int main(void){
const int SIZE=5;
int a[SIZE]={1,2,3,4,5};
int b[SIZE]={10,20,30,40,50};
int c[SIZE]={0};
vecAdd(a,b,c,5);
}
// Compute vector sum C = A+B
// Each thread performs one pairwise addition
__global__
void addKernel(int* A_d, int* B_d, int*C_d)
{
// each thread knows its own index
int i = threadIdx.x;
C_d[i] = A_d[i] + B_d[i];
}
void vecAdd(int* A, int* B, int* C, int n)
{
int size = n * sizeof(int);
int* A_d=0;
int* B_d=0;
int* C_d=0;
// Allocate device memory
cudaMalloc((void **) &A_d, size);
cudaMalloc((void **) &B_d, size);
cudaMalloc((void **) &C_d, size);
// Transfer A and B to device memory
cudaMemcpy(A_d, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, size, cudaMemcpyHostToDevice);
//configure grid ==> <<<number of thread blocks within grid, number of threads in each thread block>>>
addKernel<<<1, size>>>(A_d, B_d, C_d);
// Transfer C from device to host
cudaMemcpy(C, C_d, size, cudaMemcpyDeviceToHost);
// Free device memory for A, B, C
cudaFree(A_d); cudaFree(B_d); cudaFree (C_d);
} | #include <hip/hip_runtime.h>
#include <iostream>
__global__
void addKernel(int* A_d, int* B_d, int*C_d); //vector addition(device code)
void vecAdd(int* A, int* B, int* C, int n); //loading, transfer, execution(host code)
int main(void){
const int SIZE=5;
int a[SIZE]={1,2,3,4,5};
int b[SIZE]={10,20,30,40,50};
int c[SIZE]={0};
vecAdd(a,b,c,5);
}
// Compute vector sum C = A+B
// Each thread performs one pairwise addition
__global__
void addKernel(int* A_d, int* B_d, int*C_d)
{
// each thread knows its own index
int i = threadIdx.x;
C_d[i] = A_d[i] + B_d[i];
}
void vecAdd(int* A, int* B, int* C, int n)
{
int size = n * sizeof(int);
int* A_d=0;
int* B_d=0;
int* C_d=0;
// Allocate device memory
hipMalloc((void **) &A_d, size);
hipMalloc((void **) &B_d, size);
hipMalloc((void **) &C_d, size);
// Transfer A and B to device memory
hipMemcpy(A_d, A, size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B, size, hipMemcpyHostToDevice);
//configure grid ==> <<<number of thread blocks within grid, number of threads in each thread block>>>
addKernel<<<1, size>>>(A_d, B_d, C_d);
// Transfer C from device to host
hipMemcpy(C, C_d, size, hipMemcpyDeviceToHost);
// Free device memory for A, B, C
hipFree(A_d); hipFree(B_d); hipFree (C_d);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
__global__
void addKernel(int* A_d, int* B_d, int*C_d); //vector addition(device code)
void vecAdd(int* A, int* B, int* C, int n); //loading, transfer, execution(host code)
int main(void){
const int SIZE=5;
int a[SIZE]={1,2,3,4,5};
int b[SIZE]={10,20,30,40,50};
int c[SIZE]={0};
vecAdd(a,b,c,5);
}
// Compute vector sum C = A+B
// Each thread performs one pairwise addition
__global__
void addKernel(int* A_d, int* B_d, int*C_d)
{
// each thread knows its own index
int i = threadIdx.x;
C_d[i] = A_d[i] + B_d[i];
}
void vecAdd(int* A, int* B, int* C, int n)
{
int size = n * sizeof(int);
int* A_d=0;
int* B_d=0;
int* C_d=0;
// Allocate device memory
hipMalloc((void **) &A_d, size);
hipMalloc((void **) &B_d, size);
hipMalloc((void **) &C_d, size);
// Transfer A and B to device memory
hipMemcpy(A_d, A, size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B, size, hipMemcpyHostToDevice);
//configure grid ==> <<<number of thread blocks within grid, number of threads in each thread block>>>
addKernel<<<1, size>>>(A_d, B_d, C_d);
// Transfer C from device to host
hipMemcpy(C, C_d, size, hipMemcpyDeviceToHost);
// Free device memory for A, B, C
hipFree(A_d); hipFree(B_d); hipFree (C_d);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9addKernelPiS_S_
.globl _Z9addKernelPiS_S_
.p2align 8
.type _Z9addKernelPiS_S_,@function
_Z9addKernelPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9addKernelPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9addKernelPiS_S_, .Lfunc_end0-_Z9addKernelPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9addKernelPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z9addKernelPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
__global__
void addKernel(int* A_d, int* B_d, int*C_d); //vector addition(device code)
void vecAdd(int* A, int* B, int* C, int n); //loading, transfer, execution(host code)
int main(void){
const int SIZE=5;
int a[SIZE]={1,2,3,4,5};
int b[SIZE]={10,20,30,40,50};
int c[SIZE]={0};
vecAdd(a,b,c,5);
}
// Compute vector sum C = A+B
// Each thread performs one pairwise addition
__global__
void addKernel(int* A_d, int* B_d, int*C_d)
{
// each thread knows its own index
int i = threadIdx.x;
C_d[i] = A_d[i] + B_d[i];
}
void vecAdd(int* A, int* B, int* C, int n)
{
int size = n * sizeof(int);
int* A_d=0;
int* B_d=0;
int* C_d=0;
// Allocate device memory
hipMalloc((void **) &A_d, size);
hipMalloc((void **) &B_d, size);
hipMalloc((void **) &C_d, size);
// Transfer A and B to device memory
hipMemcpy(A_d, A, size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B, size, hipMemcpyHostToDevice);
//configure grid ==> <<<number of thread blocks within grid, number of threads in each thread block>>>
addKernel<<<1, size>>>(A_d, B_d, C_d);
// Transfer C from device to host
hipMemcpy(C, C_d, size, hipMemcpyDeviceToHost);
// Free device memory for A, B, C
hipFree(A_d); hipFree(B_d); hipFree (C_d);
} | .text
.file "vecAdd.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function main
.LCPI0_0:
.long 1 # 0x1
.long 2 # 0x2
.long 3 # 0x3
.long 4 # 0x4
.LCPI0_1:
.long 10 # 0xa
.long 20 # 0x14
.long 30 # 0x1e
.long 40 # 0x28
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movaps .LCPI0_0(%rip), %xmm0 # xmm0 = [1,2,3,4]
movaps %xmm0, 64(%rsp)
movl $5, 80(%rsp)
movaps .LCPI0_1(%rip), %xmm0 # xmm0 = [10,20,30,40]
movaps %xmm0, 32(%rsp)
movl $50, 48(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movl $0, 16(%rsp)
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
movl $5, %ecx
callq _Z6vecAddPiS_S_i
xorl %eax, %eax
addq $88, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.globl _Z6vecAddPiS_S_i # -- Begin function _Z6vecAddPiS_S_i
.p2align 4, 0x90
.type _Z6vecAddPiS_S_i,@function
_Z6vecAddPiS_S_i: # @_Z6vecAddPiS_S_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $128, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
# kill: def $ecx killed $ecx def $rcx
movq %rdx, %rbx
movq %rsi, %r12
movq %rdi, %r13
leal (,%rcx,4), %r15d
movq $0, 16(%rsp)
movq $0, 8(%rsp)
movq $0, (%rsp)
movslq %r15d, %r14
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq %rsp, %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r13, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %r15
orq $1, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9addKernelPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq (%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
addq $128, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z6vecAddPiS_S_i, .Lfunc_end1-_Z6vecAddPiS_S_i
.cfi_endproc
# -- End function
.globl _Z24__device_stub__addKernelPiS_S_ # -- Begin function _Z24__device_stub__addKernelPiS_S_
.p2align 4, 0x90
.type _Z24__device_stub__addKernelPiS_S_,@function
_Z24__device_stub__addKernelPiS_S_: # @_Z24__device_stub__addKernelPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9addKernelPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z24__device_stub__addKernelPiS_S_, .Lfunc_end2-_Z24__device_stub__addKernelPiS_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9addKernelPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9addKernelPiS_S_,@object # @_Z9addKernelPiS_S_
.section .rodata,"a",@progbits
.globl _Z9addKernelPiS_S_
.p2align 3, 0x0
_Z9addKernelPiS_S_:
.quad _Z24__device_stub__addKernelPiS_S_
.size _Z9addKernelPiS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9addKernelPiS_S_"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__addKernelPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9addKernelPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9addKernelPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9addKernelPiS_S_
.globl _Z9addKernelPiS_S_
.p2align 8
.type _Z9addKernelPiS_S_,@function
_Z9addKernelPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9addKernelPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9addKernelPiS_S_, .Lfunc_end0-_Z9addKernelPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9addKernelPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z9addKernelPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0015dc9f_00000000-6_vecAdd.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9addKernelPiS_S_PiS_S_
.type _Z32__device_stub__Z9addKernelPiS_S_PiS_S_, @function
_Z32__device_stub__Z9addKernelPiS_S_PiS_S_:
.LFB3695:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9addKernelPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z32__device_stub__Z9addKernelPiS_S_PiS_S_, .-_Z32__device_stub__Z9addKernelPiS_S_PiS_S_
.globl _Z9addKernelPiS_S_
.type _Z9addKernelPiS_S_, @function
_Z9addKernelPiS_S_:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9addKernelPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z9addKernelPiS_S_, .-_Z9addKernelPiS_S_
.globl _Z6vecAddPiS_S_i
.type _Z6vecAddPiS_S_i, @function
_Z6vecAddPiS_S_i:
.LFB3670:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r14
movq %rsi, %r13
movq %rdx, %r12
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leal 0(,%rcx,4), %ebp
movq $0, 8(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movslq %ebp, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebp, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z32__device_stub__Z9addKernelPiS_S_PiS_S_
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size _Z6vecAddPiS_S_i, .-_Z6vecAddPiS_S_i
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $1, (%rsp)
movl $2, 4(%rsp)
movl $3, 8(%rsp)
movl $4, 12(%rsp)
movl $5, 16(%rsp)
movl $10, 32(%rsp)
movl $20, 36(%rsp)
movl $30, 40(%rsp)
movl $40, 44(%rsp)
movl $50, 48(%rsp)
pxor %xmm0, %xmm0
movaps %xmm0, 64(%rsp)
movl $0, 80(%rsp)
leaq 64(%rsp), %rdx
leaq 32(%rsp), %rsi
movq %rsp, %rdi
movl $5, %ecx
call _Z6vecAddPiS_S_i
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9addKernelPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9addKernelPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "vecAdd.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function main
.LCPI0_0:
.long 1 # 0x1
.long 2 # 0x2
.long 3 # 0x3
.long 4 # 0x4
.LCPI0_1:
.long 10 # 0xa
.long 20 # 0x14
.long 30 # 0x1e
.long 40 # 0x28
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movaps .LCPI0_0(%rip), %xmm0 # xmm0 = [1,2,3,4]
movaps %xmm0, 64(%rsp)
movl $5, 80(%rsp)
movaps .LCPI0_1(%rip), %xmm0 # xmm0 = [10,20,30,40]
movaps %xmm0, 32(%rsp)
movl $50, 48(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movl $0, 16(%rsp)
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
movl $5, %ecx
callq _Z6vecAddPiS_S_i
xorl %eax, %eax
addq $88, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.globl _Z6vecAddPiS_S_i # -- Begin function _Z6vecAddPiS_S_i
.p2align 4, 0x90
.type _Z6vecAddPiS_S_i,@function
_Z6vecAddPiS_S_i: # @_Z6vecAddPiS_S_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $128, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
# kill: def $ecx killed $ecx def $rcx
movq %rdx, %rbx
movq %rsi, %r12
movq %rdi, %r13
leal (,%rcx,4), %r15d
movq $0, 16(%rsp)
movq $0, 8(%rsp)
movq $0, (%rsp)
movslq %r15d, %r14
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq %rsp, %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r13, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %r15
orq $1, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9addKernelPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq (%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
addq $128, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z6vecAddPiS_S_i, .Lfunc_end1-_Z6vecAddPiS_S_i
.cfi_endproc
# -- End function
.globl _Z24__device_stub__addKernelPiS_S_ # -- Begin function _Z24__device_stub__addKernelPiS_S_
.p2align 4, 0x90
.type _Z24__device_stub__addKernelPiS_S_,@function
_Z24__device_stub__addKernelPiS_S_: # @_Z24__device_stub__addKernelPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9addKernelPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z24__device_stub__addKernelPiS_S_, .Lfunc_end2-_Z24__device_stub__addKernelPiS_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9addKernelPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9addKernelPiS_S_,@object # @_Z9addKernelPiS_S_
.section .rodata,"a",@progbits
.globl _Z9addKernelPiS_S_
.p2align 3, 0x0
_Z9addKernelPiS_S_:
.quad _Z24__device_stub__addKernelPiS_S_
.size _Z9addKernelPiS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9addKernelPiS_S_"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__addKernelPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9addKernelPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // Matrix addition, GPU version
// nvcc matrix_gpu.cu -L /usr/local/cuda/lib -lcudart -o matrix_gpu
#include <stdio.h>
const int blocksize = 16;
const int N = 256;
const int gridsize = N / blocksize;
__global__
void add_matrix(float *a, float *b, float *c, int N)
{
// coalesced
/*
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
*/
// Non Coalesced
int index_y = blockIdx.x * blockDim.x + threadIdx.x;
int index_x = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
}
int main()
{
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
unsigned long size = N*N*sizeof(float);
float *gpu_a;
float *gpu_b;
float *gpu_c;
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
cudaEvent_t begin;
cudaEvent_t end;
float elapsed;
cudaEventCreate(&begin);
cudaEventCreate(&end);
cudaMalloc( (void**)&gpu_a, size);
cudaMalloc( (void**)&gpu_b, size);
cudaMalloc( (void**)&gpu_c, size);
// dim3 dimBlock( blockDim.x, blockDim.y, 1);
dim3 dimBlock( blocksize,blocksize,1);
// dim3 dimGrid( blockIdx.x, blockIdx.y );
dim3 dimGrid( gridsize, gridsize );
cudaMemcpy( gpu_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( gpu_b, b, size, cudaMemcpyHostToDevice );
cudaEventRecord(begin, 0);
add_matrix<<<dimGrid, dimBlock>>>(gpu_a,gpu_b,gpu_c,N);
cudaThreadSynchronize();
cudaEventRecord(end, 0);
cudaMemcpy( c, gpu_c, size, cudaMemcpyDeviceToHost );
cudaFree( gpu_a );
cudaFree( gpu_b );
cudaFree( gpu_c );
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, begin, end);
for (int i = N-16; i < N; i++)
{
for (int j = N-16; j < N; j++)
{
printf("%0.2f ", c[i+j*N]);
}
printf("\n");
}
printf("\n");
//printf("Blocksize = %i\tN = %i Time : %f\n",blocksize,N,elapsed*1000);
printf("%f\n",elapsed*1000);
} | code for sm_80
Function : _Z10add_matrixPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0xc] ; /* 0x0000030000047ab9 */
/* 0x000fe20000000800 */
/*0030*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0040*/ ULDC UR5, c[0x0][0x0] ; /* 0x0000000000057ab9 */
/* 0x000fe20000000800 */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0060*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fc6000f8e023f */
/*0070*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0080*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0090*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fe400078e0203 */
/*00a0*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fc800078e0205 */
/*00b0*/ IMAD R0, R0, UR4, R3 ; /* 0x0000000400007c24 */
/* 0x000fe2000f8e0203 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00d0*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0207 */
/*00e0*/ IMAD.WIDE R4, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x0c0fe400078e0207 */
/*00f0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0207 */
/*0120*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // Matrix addition, GPU version
// nvcc matrix_gpu.cu -L /usr/local/cuda/lib -lcudart -o matrix_gpu
#include <stdio.h>
const int blocksize = 16;
const int N = 256;
const int gridsize = N / blocksize;
__global__
void add_matrix(float *a, float *b, float *c, int N)
{
// coalesced
/*
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
*/
// Non Coalesced
int index_y = blockIdx.x * blockDim.x + threadIdx.x;
int index_x = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
}
int main()
{
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
unsigned long size = N*N*sizeof(float);
float *gpu_a;
float *gpu_b;
float *gpu_c;
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
cudaEvent_t begin;
cudaEvent_t end;
float elapsed;
cudaEventCreate(&begin);
cudaEventCreate(&end);
cudaMalloc( (void**)&gpu_a, size);
cudaMalloc( (void**)&gpu_b, size);
cudaMalloc( (void**)&gpu_c, size);
// dim3 dimBlock( blockDim.x, blockDim.y, 1);
dim3 dimBlock( blocksize,blocksize,1);
// dim3 dimGrid( blockIdx.x, blockIdx.y );
dim3 dimGrid( gridsize, gridsize );
cudaMemcpy( gpu_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( gpu_b, b, size, cudaMemcpyHostToDevice );
cudaEventRecord(begin, 0);
add_matrix<<<dimGrid, dimBlock>>>(gpu_a,gpu_b,gpu_c,N);
cudaThreadSynchronize();
cudaEventRecord(end, 0);
cudaMemcpy( c, gpu_c, size, cudaMemcpyDeviceToHost );
cudaFree( gpu_a );
cudaFree( gpu_b );
cudaFree( gpu_c );
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, begin, end);
for (int i = N-16; i < N; i++)
{
for (int j = N-16; j < N; j++)
{
printf("%0.2f ", c[i+j*N]);
}
printf("\n");
}
printf("\n");
//printf("Blocksize = %i\tN = %i Time : %f\n",blocksize,N,elapsed*1000);
printf("%f\n",elapsed*1000);
} | .file "tmpxft_0013ad9f_00000000-6_matrix_gpu.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i
.type _Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i, @function
_Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10add_matrixPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i, .-_Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i
.globl _Z10add_matrixPfS_S_i
.type _Z10add_matrixPfS_S_i, @function
_Z10add_matrixPfS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10add_matrixPfS_S_i, .-_Z10add_matrixPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "%0.2f "
.LC2:
.string "\n"
.LC4:
.string "%f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $80, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $262144, %edi
call _Znam@PLT
movq %rax, %rbp
movl $262144, %edi
call _Znam@PLT
movq %rax, %rbx
movl $262144, %edi
call _Znam@PLT
movq %rax, %r12
movl $10, %esi
movl $0, %ecx
movss .LC0(%rip), %xmm2
.L12:
leaq 0(,%rcx,4), %rdx
movl $0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %esi, %xmm1
.L13:
movss %xmm1, 0(%rbp,%rdx)
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss %xmm2, %xmm0
movss %xmm0, (%rbx,%rdx)
addl $1, %eax
addq $1024, %rdx
cmpl $256, %eax
jne .L13
addq $1, %rcx
addl $1, %esi
cmpq $256, %rcx
jne .L12
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
leaq 8(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
movl $16, 48(%rsp)
movl $16, 52(%rsp)
movl $1, 56(%rsp)
movl $16, 60(%rsp)
movl $16, 64(%rsp)
movl $1, 68(%rsp)
movl $1, %ecx
movl $262144, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $262144, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl 56(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 48(%rsp), %rdx
movq 60(%rsp), %rdi
movl 68(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L15:
call cudaThreadSynchronize@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movl $2, %ecx
movl $262144, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 4(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 263104(%r12), %rbp
movl $240, %r13d
leaq .LC1(%rip), %r12
leaq .LC2(%rip), %r14
jmp .L16
.L23:
movl $256, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i
jmp .L15
.L24:
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addq $4, %rbp
cmpl $256, %r13d
je .L18
.L16:
leaq -16384(%rbp), %rbx
.L17:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $1024, %rbx
cmpq %rbp, %rbx
jne .L17
jmp .L24
.L18:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movss .LC3(%rip), %xmm0
mulss 4(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L25
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z10add_matrixPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z10add_matrixPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 998244352
.align 4
.LC3:
.long 1148846080
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // Matrix addition, GPU version
// nvcc matrix_gpu.cu -L /usr/local/cuda/lib -lcudart -o matrix_gpu
#include <stdio.h>
const int blocksize = 16;
const int N = 256;
const int gridsize = N / blocksize;
__global__
void add_matrix(float *a, float *b, float *c, int N)
{
// coalesced
/*
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
*/
// Non Coalesced
int index_y = blockIdx.x * blockDim.x + threadIdx.x;
int index_x = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
}
int main()
{
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
unsigned long size = N*N*sizeof(float);
float *gpu_a;
float *gpu_b;
float *gpu_c;
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
cudaEvent_t begin;
cudaEvent_t end;
float elapsed;
cudaEventCreate(&begin);
cudaEventCreate(&end);
cudaMalloc( (void**)&gpu_a, size);
cudaMalloc( (void**)&gpu_b, size);
cudaMalloc( (void**)&gpu_c, size);
// dim3 dimBlock( blockDim.x, blockDim.y, 1);
dim3 dimBlock( blocksize,blocksize,1);
// dim3 dimGrid( blockIdx.x, blockIdx.y );
dim3 dimGrid( gridsize, gridsize );
cudaMemcpy( gpu_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( gpu_b, b, size, cudaMemcpyHostToDevice );
cudaEventRecord(begin, 0);
add_matrix<<<dimGrid, dimBlock>>>(gpu_a,gpu_b,gpu_c,N);
cudaThreadSynchronize();
cudaEventRecord(end, 0);
cudaMemcpy( c, gpu_c, size, cudaMemcpyDeviceToHost );
cudaFree( gpu_a );
cudaFree( gpu_b );
cudaFree( gpu_c );
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, begin, end);
for (int i = N-16; i < N; i++)
{
for (int j = N-16; j < N; j++)
{
printf("%0.2f ", c[i+j*N]);
}
printf("\n");
}
printf("\n");
//printf("Blocksize = %i\tN = %i Time : %f\n",blocksize,N,elapsed*1000);
printf("%f\n",elapsed*1000);
} | // Matrix addition, GPU version
// nvcc matrix_gpu.cu -L /usr/local/cuda/lib -lcudart -o matrix_gpu
#include <hip/hip_runtime.h>
#include <stdio.h>
const int blocksize = 16;
const int N = 256;
const int gridsize = N / blocksize;
__global__
void add_matrix(float *a, float *b, float *c, int N)
{
// coalesced
/*
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
*/
// Non Coalesced
int index_y = blockIdx.x * blockDim.x + threadIdx.x;
int index_x = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
}
int main()
{
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
unsigned long size = N*N*sizeof(float);
float *gpu_a;
float *gpu_b;
float *gpu_c;
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
hipEvent_t begin;
hipEvent_t end;
float elapsed;
hipEventCreate(&begin);
hipEventCreate(&end);
hipMalloc( (void**)&gpu_a, size);
hipMalloc( (void**)&gpu_b, size);
hipMalloc( (void**)&gpu_c, size);
// dim3 dimBlock( blockDim.x, blockDim.y, 1);
dim3 dimBlock( blocksize,blocksize,1);
// dim3 dimGrid( blockIdx.x, blockIdx.y );
dim3 dimGrid( gridsize, gridsize );
hipMemcpy( gpu_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( gpu_b, b, size, hipMemcpyHostToDevice );
hipEventRecord(begin, 0);
add_matrix<<<dimGrid, dimBlock>>>(gpu_a,gpu_b,gpu_c,N);
hipDeviceSynchronize();
hipEventRecord(end, 0);
hipMemcpy( c, gpu_c, size, hipMemcpyDeviceToHost );
hipFree( gpu_a );
hipFree( gpu_b );
hipFree( gpu_c );
hipEventSynchronize(end);
hipEventElapsedTime(&elapsed, begin, end);
for (int i = N-16; i < N; i++)
{
for (int j = N-16; j < N; j++)
{
printf("%0.2f ", c[i+j*N]);
}
printf("\n");
}
printf("\n");
//printf("Blocksize = %i\tN = %i Time : %f\n",blocksize,N,elapsed*1000);
printf("%f\n",elapsed*1000);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // Matrix addition, GPU version
// nvcc matrix_gpu.cu -L /usr/local/cuda/lib -lcudart -o matrix_gpu
#include <hip/hip_runtime.h>
#include <stdio.h>
const int blocksize = 16;
const int N = 256;
const int gridsize = N / blocksize;
__global__
void add_matrix(float *a, float *b, float *c, int N)
{
// coalesced
/*
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
*/
// Non Coalesced
int index_y = blockIdx.x * blockDim.x + threadIdx.x;
int index_x = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
}
int main()
{
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
unsigned long size = N*N*sizeof(float);
float *gpu_a;
float *gpu_b;
float *gpu_c;
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
hipEvent_t begin;
hipEvent_t end;
float elapsed;
hipEventCreate(&begin);
hipEventCreate(&end);
hipMalloc( (void**)&gpu_a, size);
hipMalloc( (void**)&gpu_b, size);
hipMalloc( (void**)&gpu_c, size);
// dim3 dimBlock( blockDim.x, blockDim.y, 1);
dim3 dimBlock( blocksize,blocksize,1);
// dim3 dimGrid( blockIdx.x, blockIdx.y );
dim3 dimGrid( gridsize, gridsize );
hipMemcpy( gpu_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( gpu_b, b, size, hipMemcpyHostToDevice );
hipEventRecord(begin, 0);
add_matrix<<<dimGrid, dimBlock>>>(gpu_a,gpu_b,gpu_c,N);
hipDeviceSynchronize();
hipEventRecord(end, 0);
hipMemcpy( c, gpu_c, size, hipMemcpyDeviceToHost );
hipFree( gpu_a );
hipFree( gpu_b );
hipFree( gpu_c );
hipEventSynchronize(end);
hipEventElapsedTime(&elapsed, begin, end);
for (int i = N-16; i < N; i++)
{
for (int j = N-16; j < N; j++)
{
printf("%0.2f ", c[i+j*N]);
}
printf("\n");
}
printf("\n");
//printf("Blocksize = %i\tN = %i Time : %f\n",blocksize,N,elapsed*1000);
printf("%f\n",elapsed*1000);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10add_matrixPfS_S_i
.globl _Z10add_matrixPfS_S_i
.p2align 8
.type _Z10add_matrixPfS_S_i,@function
_Z10add_matrixPfS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x20
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s2, 0xffff
s_lshr_b32 s2, s2, 16
v_mad_u64_u32 v[2:3], null, s14, s4, v[1:2]
s_mul_i32 s3, s3, s4
s_load_b128 s[4:7], s[0:1], 0x0
s_mul_i32 s15, s15, s2
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, s3, v2
v_add3_u32 v0, s15, v0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10add_matrixPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10add_matrixPfS_S_i, .Lfunc_end0-_Z10add_matrixPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10add_matrixPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10add_matrixPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // Matrix addition, GPU version
// nvcc matrix_gpu.cu -L /usr/local/cuda/lib -lcudart -o matrix_gpu
#include <hip/hip_runtime.h>
#include <stdio.h>
const int blocksize = 16;
const int N = 256;
const int gridsize = N / blocksize;
__global__
void add_matrix(float *a, float *b, float *c, int N)
{
// coalesced
/*
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
*/
// Non Coalesced
int index_y = blockIdx.x * blockDim.x + threadIdx.x;
int index_x = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
}
int main()
{
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
unsigned long size = N*N*sizeof(float);
float *gpu_a;
float *gpu_b;
float *gpu_c;
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
hipEvent_t begin;
hipEvent_t end;
float elapsed;
hipEventCreate(&begin);
hipEventCreate(&end);
hipMalloc( (void**)&gpu_a, size);
hipMalloc( (void**)&gpu_b, size);
hipMalloc( (void**)&gpu_c, size);
// dim3 dimBlock( blockDim.x, blockDim.y, 1);
dim3 dimBlock( blocksize,blocksize,1);
// dim3 dimGrid( blockIdx.x, blockIdx.y );
dim3 dimGrid( gridsize, gridsize );
hipMemcpy( gpu_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( gpu_b, b, size, hipMemcpyHostToDevice );
hipEventRecord(begin, 0);
add_matrix<<<dimGrid, dimBlock>>>(gpu_a,gpu_b,gpu_c,N);
hipDeviceSynchronize();
hipEventRecord(end, 0);
hipMemcpy( c, gpu_c, size, hipMemcpyDeviceToHost );
hipFree( gpu_a );
hipFree( gpu_b );
hipFree( gpu_c );
hipEventSynchronize(end);
hipEventElapsedTime(&elapsed, begin, end);
for (int i = N-16; i < N; i++)
{
for (int j = N-16; j < N; j++)
{
printf("%0.2f ", c[i+j*N]);
}
printf("\n");
}
printf("\n");
//printf("Blocksize = %i\tN = %i Time : %f\n",blocksize,N,elapsed*1000);
printf("%f\n",elapsed*1000);
} | .text
.file "matrix_gpu.hip"
.globl _Z25__device_stub__add_matrixPfS_S_i # -- Begin function _Z25__device_stub__add_matrixPfS_S_i
.p2align 4, 0x90
.type _Z25__device_stub__add_matrixPfS_S_i,@function
_Z25__device_stub__add_matrixPfS_S_i: # @_Z25__device_stub__add_matrixPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10add_matrixPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z25__device_stub__add_matrixPfS_S_i, .Lfunc_end0-_Z25__device_stub__add_matrixPfS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x3b800000 # float 0.00390625
.LCPI1_1:
.long 0x447a0000 # float 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $160, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $262144, %edi # imm = 0x40000
callq _Znam
movq %rax, %r15
movl $262144, %edi # imm = 0x40000
callq _Znam
movq %rax, %r14
movl $262144, %edi # imm = 0x40000
callq _Znam
movq %rax, %rbx
xorl %eax, %eax
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # %.preheader40
# =>This Loop Header: Depth=1
# Child Loop BB1_2 Depth 2
leal 10(%rcx), %edx
xorps %xmm1, %xmm1
cvtsi2ss %edx, %xmm1
movq %rax, %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB1_2: # Parent Loop BB1_1 Depth=1
# => This Inner Loop Header: Depth=2
movss %xmm1, (%r15,%rdx)
xorps %xmm2, %xmm2
cvtsi2ss %esi, %xmm2
mulss %xmm0, %xmm2
movss %xmm2, (%r14,%rdx)
incq %rsi
addq $1024, %rdx # imm = 0x400
cmpq $256, %rsi # imm = 0x100
jne .LBB1_2
# %bb.3: # in Loop: Header=BB1_1 Depth=1
incq %rcx
addq $4, %rax
cmpq $256, %rcx # imm = 0x100
jne .LBB1_1
# %bb.4:
leaq 48(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
leaq 32(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
movq 32(%rsp), %rdi
movl $262144, %edx # imm = 0x40000
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $262144, %edx # imm = 0x40000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 48(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $68719476752, %rdi # imm = 0x1000000010
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_6
# %bb.5:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq %rdx, 104(%rsp)
movl $256, 44(%rsp) # imm = 0x100
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 44(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z10add_matrixPfS_S_i, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_6:
callq hipDeviceSynchronize
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rsi
movl $262144, %edx # imm = 0x40000
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 48(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 128(%rsp), %rdi
callq hipEventElapsedTime
addq $246720, %rbx # imm = 0x3C3C0
movl $240, %r14d
.p2align 4, 0x90
.LBB1_7: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_8 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_8: # Parent Loop BB1_7 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%r15), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
addq $1024, %r15 # imm = 0x400
cmpq $16384, %r15 # imm = 0x4000
jne .LBB1_8
# %bb.9: # in Loop: Header=BB1_7 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $4, %rbx
cmpq $256, %r14 # imm = 0x100
jne .LBB1_7
# %bb.10:
movl $10, %edi
callq putchar@PLT
movss 128(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
mulss .LCPI1_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10add_matrixPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10add_matrixPfS_S_i,@object # @_Z10add_matrixPfS_S_i
.section .rodata,"a",@progbits
.globl _Z10add_matrixPfS_S_i
.p2align 3, 0x0
_Z10add_matrixPfS_S_i:
.quad _Z25__device_stub__add_matrixPfS_S_i
.size _Z10add_matrixPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%0.2f "
.size .L.str, 7
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%f\n"
.size .L.str.2, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10add_matrixPfS_S_i"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__add_matrixPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10add_matrixPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10add_matrixPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0xc] ; /* 0x0000030000047ab9 */
/* 0x000fe20000000800 */
/*0030*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0040*/ ULDC UR5, c[0x0][0x0] ; /* 0x0000000000057ab9 */
/* 0x000fe20000000800 */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0060*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fc6000f8e023f */
/*0070*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0080*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0090*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fe400078e0203 */
/*00a0*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fc800078e0205 */
/*00b0*/ IMAD R0, R0, UR4, R3 ; /* 0x0000000400007c24 */
/* 0x000fe2000f8e0203 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00d0*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0207 */
/*00e0*/ IMAD.WIDE R4, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x0c0fe400078e0207 */
/*00f0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0207 */
/*0120*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10add_matrixPfS_S_i
.globl _Z10add_matrixPfS_S_i
.p2align 8
.type _Z10add_matrixPfS_S_i,@function
_Z10add_matrixPfS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x20
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s2, 0xffff
s_lshr_b32 s2, s2, 16
v_mad_u64_u32 v[2:3], null, s14, s4, v[1:2]
s_mul_i32 s3, s3, s4
s_load_b128 s[4:7], s[0:1], 0x0
s_mul_i32 s15, s15, s2
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, s3, v2
v_add3_u32 v0, s15, v0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10add_matrixPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10add_matrixPfS_S_i, .Lfunc_end0-_Z10add_matrixPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10add_matrixPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10add_matrixPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0013ad9f_00000000-6_matrix_gpu.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i
.type _Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i, @function
_Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10add_matrixPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i, .-_Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i
.globl _Z10add_matrixPfS_S_i
.type _Z10add_matrixPfS_S_i, @function
_Z10add_matrixPfS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10add_matrixPfS_S_i, .-_Z10add_matrixPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "%0.2f "
.LC2:
.string "\n"
.LC4:
.string "%f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $80, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $262144, %edi
call _Znam@PLT
movq %rax, %rbp
movl $262144, %edi
call _Znam@PLT
movq %rax, %rbx
movl $262144, %edi
call _Znam@PLT
movq %rax, %r12
movl $10, %esi
movl $0, %ecx
movss .LC0(%rip), %xmm2
.L12:
leaq 0(,%rcx,4), %rdx
movl $0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %esi, %xmm1
.L13:
movss %xmm1, 0(%rbp,%rdx)
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss %xmm2, %xmm0
movss %xmm0, (%rbx,%rdx)
addl $1, %eax
addq $1024, %rdx
cmpl $256, %eax
jne .L13
addq $1, %rcx
addl $1, %esi
cmpq $256, %rcx
jne .L12
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
leaq 8(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
movl $16, 48(%rsp)
movl $16, 52(%rsp)
movl $1, 56(%rsp)
movl $16, 60(%rsp)
movl $16, 64(%rsp)
movl $1, 68(%rsp)
movl $1, %ecx
movl $262144, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $262144, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl 56(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 48(%rsp), %rdx
movq 60(%rsp), %rdi
movl 68(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L15:
call cudaThreadSynchronize@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movl $2, %ecx
movl $262144, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 4(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 263104(%r12), %rbp
movl $240, %r13d
leaq .LC1(%rip), %r12
leaq .LC2(%rip), %r14
jmp .L16
.L23:
movl $256, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z35__device_stub__Z10add_matrixPfS_S_iPfS_S_i
jmp .L15
.L24:
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addq $4, %rbp
cmpl $256, %r13d
je .L18
.L16:
leaq -16384(%rbp), %rbx
.L17:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $1024, %rbx
cmpq %rbp, %rbx
jne .L17
jmp .L24
.L18:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movss .LC3(%rip), %xmm0
mulss 4(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L25
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z10add_matrixPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z10add_matrixPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 998244352
.align 4
.LC3:
.long 1148846080
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "matrix_gpu.hip"
.globl _Z25__device_stub__add_matrixPfS_S_i # -- Begin function _Z25__device_stub__add_matrixPfS_S_i
.p2align 4, 0x90
.type _Z25__device_stub__add_matrixPfS_S_i,@function
_Z25__device_stub__add_matrixPfS_S_i: # @_Z25__device_stub__add_matrixPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10add_matrixPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z25__device_stub__add_matrixPfS_S_i, .Lfunc_end0-_Z25__device_stub__add_matrixPfS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x3b800000 # float 0.00390625
.LCPI1_1:
.long 0x447a0000 # float 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $160, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $262144, %edi # imm = 0x40000
callq _Znam
movq %rax, %r15
movl $262144, %edi # imm = 0x40000
callq _Znam
movq %rax, %r14
movl $262144, %edi # imm = 0x40000
callq _Znam
movq %rax, %rbx
xorl %eax, %eax
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # %.preheader40
# =>This Loop Header: Depth=1
# Child Loop BB1_2 Depth 2
leal 10(%rcx), %edx
xorps %xmm1, %xmm1
cvtsi2ss %edx, %xmm1
movq %rax, %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB1_2: # Parent Loop BB1_1 Depth=1
# => This Inner Loop Header: Depth=2
movss %xmm1, (%r15,%rdx)
xorps %xmm2, %xmm2
cvtsi2ss %esi, %xmm2
mulss %xmm0, %xmm2
movss %xmm2, (%r14,%rdx)
incq %rsi
addq $1024, %rdx # imm = 0x400
cmpq $256, %rsi # imm = 0x100
jne .LBB1_2
# %bb.3: # in Loop: Header=BB1_1 Depth=1
incq %rcx
addq $4, %rax
cmpq $256, %rcx # imm = 0x100
jne .LBB1_1
# %bb.4:
leaq 48(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
leaq 32(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
movq 32(%rsp), %rdi
movl $262144, %edx # imm = 0x40000
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $262144, %edx # imm = 0x40000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 48(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $68719476752, %rdi # imm = 0x1000000010
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_6
# %bb.5:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq %rdx, 104(%rsp)
movl $256, 44(%rsp) # imm = 0x100
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 44(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z10add_matrixPfS_S_i, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_6:
callq hipDeviceSynchronize
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rsi
movl $262144, %edx # imm = 0x40000
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 48(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 128(%rsp), %rdi
callq hipEventElapsedTime
addq $246720, %rbx # imm = 0x3C3C0
movl $240, %r14d
.p2align 4, 0x90
.LBB1_7: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_8 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_8: # Parent Loop BB1_7 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%r15), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
addq $1024, %r15 # imm = 0x400
cmpq $16384, %r15 # imm = 0x4000
jne .LBB1_8
# %bb.9: # in Loop: Header=BB1_7 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $4, %rbx
cmpq $256, %r14 # imm = 0x100
jne .LBB1_7
# %bb.10:
movl $10, %edi
callq putchar@PLT
movss 128(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
mulss .LCPI1_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10add_matrixPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10add_matrixPfS_S_i,@object # @_Z10add_matrixPfS_S_i
.section .rodata,"a",@progbits
.globl _Z10add_matrixPfS_S_i
.p2align 3, 0x0
_Z10add_matrixPfS_S_i:
.quad _Z25__device_stub__add_matrixPfS_S_i
.size _Z10add_matrixPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%0.2f "
.size .L.str, 7
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%f\n"
.size .L.str.2, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10add_matrixPfS_S_i"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__add_matrixPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10add_matrixPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <chrono>
#include <bits/stdc++.h>
using namespace std;
using namespace std::chrono;
__global__ void maximum(int *input) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if(input[second] > input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void minimum(int *input, int n) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if((first < n && second < n) && input[second] < input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void gpu_sum(int *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
__global__ void mean_diff_sq(float *input, float mean) {
input[threadIdx.x] -= mean;
input[threadIdx.x] *= input[threadIdx.x];
}
void copy_int_to_float(float *dest, int *src, int size){
for(int i = 0; i < size; i++)
dest[i] = (float)src[i];
}
__global__ void gpu_sd(float *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
long cpu_sum(int *input, int n) {
long sum = 0;
for(int i = 0 ; i < n ; i++) {
sum += input[i];
}
return sum;
}
long cpu_min(int *arr, int n) {
int min = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] < min)
min = arr[i];
}
return min;
}
long cpu_max(int *arr, int n) {
int max = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] > max)
max = arr[i];
}
return max;
}
double cpu_sd(int *arr, int n, float mean) {
float *arr_std = new float[n];
for(int i = 0 ; i < n ; i++) {
arr_std[i] = pow(((float)arr[i] - mean),2);
}
double total = 0;
for(int i = 0 ; i < n ; i++) {
total += arr_std[i];
}
total = total / n;
return sqrt(total);
}
void random_init(int *arr, int n) {
for(int i = 0 ; i < n ; i++) {
arr[i] = rand()%1000;
}
}
int main() {
int *d;
int n = 80;
int *arr = new int[n];
int result;
int size = n * sizeof(int);
random_init(arr,n);
cout<<"Input Array: [";
for(int i = 0 ; i < n ; i++) {
cout<<arr[i]<<", ";
}
cout<<"]"<<endl;
cout<<"======================================="<<endl;
cudaMalloc((void **)&d,size);
cudaMemcpy(d,arr,size,cudaMemcpyHostToDevice);
float gpu_elapsed_time;
cudaEvent_t gpu_start,gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start,0);
gpu_sum<<<1,n/2>>>(d);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"GPU Sum is: "<<result<<"\n";
float mean = (double)result/n;
cout<<"GPU Mean is: "<<mean<<endl;
float *arr_float = new float[n];
float *arr_std, std;
cudaMalloc((void **)&arr_std,n*sizeof(float));
copy_int_to_float(arr_float, arr, n);
cudaMemcpy(arr_std,arr_float,n*sizeof(float),cudaMemcpyHostToDevice);
mean_diff_sq <<<1,n>>>(arr_std, mean);
gpu_sd <<<1,n/2>>>(arr_std);
cudaMemcpy(&std,arr_std,sizeof(float),cudaMemcpyDeviceToHost);
cout<<"GPU Standard Deviation: "<<sqrt(std/n)<<endl;
cout<<"======================================="<<endl;
auto start = high_resolution_clock::now();
ios_base::sync_with_stdio(false);
result = cpu_sum(arr,n);
cout<<"CPU Sum is: "<<result<<"\n";
auto stop = high_resolution_clock::now();
double time_taken = chrono::duration_cast<chrono::milliseconds>(stop - start).count();
time_taken *= 1e-9;
mean = (float)result/n;
cout<<"CPU Mean is: "<<mean<<endl;
std = cpu_sd(arr, n, mean);
cout<<"CPU Standard Deviation: "<<std<<endl;
cout<<"======================================="<<endl;
result = 0;
cudaMemcpy(d,arr,size,cudaMemcpyHostToDevice);
minimum<<<1,n/2>>>(d,n);
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"GPU Min is: "<<result<<endl;
result = cpu_min(arr,n);
cout<<"CPU Min is: "<<result<<"\n";
cout<<"======================================="<<endl;
cudaMemcpy(d,arr,size,cudaMemcpyHostToDevice);
maximum<<<1,n/2>>>(d);
int gMax;
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"GPU Max is: "<<result<<endl;
result = cpu_max(arr,n);
cout<<"CPU Max is: "<<result<<"\n";
cout<<"======================================="<<endl;
return 0;
} | code for sm_80
Function : _Z6gpu_sdPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R11, SR_TID.X ; /* 0x00000000000b7919 */
/* 0x000e220000002100 */
/*0020*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */
/* 0x000fe40000000a00 */
/*0040*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */
/* 0x000fe20000000000 */
/*0050*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fe20003f06270 */
/*0060*/ UMOV UR5, 0x0 ; /* 0x0000000000057882 */
/* 0x000fd80000000000 */
/*0070*/ @!P0 BRA 0x1f0 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*0080*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */
/* 0x000fe200078e00ff */
/*0090*/ SHF.L.U32 R12, R11, 0x1, RZ ; /* 0x000000010b0c7819 */
/* 0x001fe200000006ff */
/*00a0*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */
/* 0x000fc60000000000 */
/*00b0*/ ISETP.GE.AND P0, PT, R11, R0, PT ; /* 0x000000000b00720c */
/* 0x000fda0003f06270 */
/*00c0*/ @!P0 IMAD R4, R12, UR4, RZ ; /* 0x000000040c048c24 */
/* 0x000fe4000f8e02ff */
/*00d0*/ @!P0 IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff038424 */
/* 0x000fc600078e00ff */
/*00e0*/ @!P0 IADD3 R2, R4.reuse, UR4, RZ ; /* 0x0000000404028c10 */
/* 0x040fe2000fffe0ff */
/*00f0*/ @!P0 IMAD.WIDE R4, R4, R3, c[0x0][0x160] ; /* 0x0000580004048625 */
/* 0x000fc800078e0203 */
/*0100*/ @!P0 IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002028625 */
/* 0x000fe200078e0203 */
/*0110*/ @!P0 LDG.E R8, [R4.64] ; /* 0x0000000804088981 */
/* 0x000eaa000c1e1900 */
/*0120*/ @!P0 LDG.E R3, [R2.64] ; /* 0x0000000802038981 */
/* 0x000ea2000c1e1900 */
/*0130*/ ISETP.NE.AND P1, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fe20003f25270 */
/*0140*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */
/* 0x000fe2000800063f */
/*0150*/ MOV R10, RZ ; /* 0x000000ff000a7202 */
/* 0x000fd60000000f00 */
/*0160*/ @P1 I2F.F64 R6, R0 ; /* 0x0000000000061312 */
/* 0x000e240000201c00 */
/*0170*/ @P1 DMUL R6, R6, 0.5 ; /* 0x3fe0000006061828 */
/* 0x001e0c0000000000 */
/*0180*/ @P1 F2I.F64.CEIL R10, R6 ; /* 0x00000006000a1311 */
/* 0x001e240000309100 */
/*0190*/ IMAD.MOV.U32 R0, RZ, RZ, R10 ; /* 0x000000ffff007224 */
/* 0x001fe400078e000a */
/*01a0*/ @!P0 FADD R9, R8, R3 ; /* 0x0000000308098221 */
/* 0x004fca0000000000 */
/*01b0*/ @!P0 STG.E [R4.64], R9 ; /* 0x0000000904008986 */
/* 0x0001e2000c101908 */
/*01c0*/ ISETP.GT.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fda0003f04270 */
/*01d0*/ @P0 BRA 0xb0 ; /* 0xfffffed000000947 */
/* 0x001fea000383ffff */
/*01e0*/ USHF.R.S32.HI UR5, URZ, 0x1f, UR4 ; /* 0x0000001f3f057899 */
/* 0x000fe40008011404 */
/*01f0*/ ISETP.NE.AND P0, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x001fda0003f05270 */
/*0200*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0210*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0220*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */
/* 0x000fe20000000f00 */
/*0230*/ ULEA UR6, UP0, UR4, UR6, 0x2 ; /* 0x0000000604067291 */
/* 0x000fe2000f80103f */
/*0240*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */
/* 0x000fc600078e00ff */
/*0250*/ ULEA.HI.X UR7, UR4, UR7, UR5, 0x2, UP0 ; /* 0x0000000704077291 */
/* 0x000fe400080f1405 */
/*0260*/ MOV R4, UR6 ; /* 0x0000000600047c02 */
/* 0x000fe20008000f00 */
/*0270*/ LDG.E R0, [R2.64] ; /* 0x0000000802007981 */
/* 0x000ea6000c1e1900 */
/*0280*/ IMAD.U32 R5, RZ, RZ, UR7 ; /* 0x00000007ff057e24 */
/* 0x000fcc000f8e00ff */
/*0290*/ LDG.E R5, [R4.64] ; /* 0x0000000804057981 */
/* 0x000ea4000c1e1900 */
/*02a0*/ FADD R7, R0, R5 ; /* 0x0000000500077221 */
/* 0x004fca0000000000 */
/*02b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101908 */
/*02c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02d0*/ BRA 0x2d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z12mean_diff_sqPff
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x001fca00078e0003 */
/*0050*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0060*/ FADD R0, R0, -c[0x0][0x168] ; /* 0x80005a0000007621 */
/* 0x004fc80000000000 */
/*0070*/ FMUL R5, R0, R0 ; /* 0x0000000000057220 */
/* 0x000fca0000400000 */
/*0080*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0090*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00a0*/ BRA 0xa0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z7gpu_sumPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R11, SR_TID.X ; /* 0x00000000000b7919 */
/* 0x000e220000002100 */
/*0020*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */
/* 0x000fe40000000a00 */
/*0040*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */
/* 0x000fe40000000000 */
/*0050*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fe20003f06270 */
/*0060*/ UMOV UR5, 0x0 ; /* 0x0000000000057882 */
/* 0x000fd80000000000 */
/*0070*/ @!P0 BRA 0x1f0 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*0080*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0090*/ IMAD.SHL.U32 R12, R11, 0x2, RZ ; /* 0x000000020b0c7824 */
/* 0x001fe200078e00ff */
/*00a0*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */
/* 0x000fe40000000000 */
/*00b0*/ ISETP.GE.AND P0, PT, R11, R0, PT ; /* 0x000000000b00720c */
/* 0x000fda0003f06270 */
/*00c0*/ @!P0 IMAD R4, R12, UR4, RZ ; /* 0x000000040c048c24 */
/* 0x000fe2000f8e02ff */
/*00d0*/ @!P0 MOV R3, 0x4 ; /* 0x0000000400038802 */
/* 0x000fc80000000f00 */
/*00e0*/ @!P0 IADD3 R2, R4.reuse, UR4, RZ ; /* 0x0000000404028c10 */
/* 0x040fe2000fffe0ff */
/*00f0*/ @!P0 IMAD.WIDE R4, R4, R3, c[0x0][0x160] ; /* 0x0000580004048625 */
/* 0x000fc800078e0203 */
/*0100*/ @!P0 IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002028625 */
/* 0x000fe200078e0203 */
/*0110*/ @!P0 LDG.E R8, [R4.64] ; /* 0x0000000804088981 */
/* 0x000eaa000c1e1900 */
/*0120*/ @!P0 LDG.E R3, [R2.64] ; /* 0x0000000802038981 */
/* 0x000ea2000c1e1900 */
/*0130*/ ISETP.NE.AND P1, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fe20003f25270 */
/*0140*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e00ff */
/*0150*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */
/* 0x000fd6000800063f */
/*0160*/ @P1 I2F.F64 R6, R0 ; /* 0x0000000000061312 */
/* 0x000e240000201c00 */
/*0170*/ @P1 DMUL R6, R6, 0.5 ; /* 0x3fe0000006061828 */
/* 0x001e0c0000000000 */
/*0180*/ @P1 F2I.F64.CEIL R10, R6 ; /* 0x00000006000a1311 */
/* 0x001e240000309100 */
/*0190*/ IMAD.MOV.U32 R0, RZ, RZ, R10 ; /* 0x000000ffff007224 */
/* 0x001fe200078e000a */
/*01a0*/ @!P0 IADD3 R9, R8, R3, RZ ; /* 0x0000000308098210 */
/* 0x004fca0007ffe0ff */
/*01b0*/ @!P0 STG.E [R4.64], R9 ; /* 0x0000000904008986 */
/* 0x0001e2000c101908 */
/*01c0*/ ISETP.GT.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fda0003f04270 */
/*01d0*/ @P0 BRA 0xb0 ; /* 0xfffffed000000947 */
/* 0x001fea000383ffff */
/*01e0*/ USHF.R.S32.HI UR5, URZ, 0x1f, UR4 ; /* 0x0000001f3f057899 */
/* 0x000fe40008011404 */
/*01f0*/ ISETP.NE.AND P0, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x001fda0003f05270 */
/*0200*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0210*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0220*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */
/* 0x000fe20000000f00 */
/*0230*/ ULEA UR6, UP0, UR4, UR6, 0x2 ; /* 0x0000000604067291 */
/* 0x000fe2000f80103f */
/*0240*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */
/* 0x000fc600078e00ff */
/*0250*/ ULEA.HI.X UR7, UR4, UR7, UR5, 0x2, UP0 ; /* 0x0000000704077291 */
/* 0x000fe400080f1405 */
/*0260*/ MOV R4, UR6 ; /* 0x0000000600047c02 */
/* 0x000fe20008000f00 */
/*0270*/ LDG.E R0, [R2.64] ; /* 0x0000000802007981 */
/* 0x000ea6000c1e1900 */
/*0280*/ IMAD.U32 R5, RZ, RZ, UR7 ; /* 0x00000007ff057e24 */
/* 0x000fcc000f8e00ff */
/*0290*/ LDG.E R5, [R4.64] ; /* 0x0000000804057981 */
/* 0x000ea4000c1e1900 */
/*02a0*/ IADD3 R7, R0, R5, RZ ; /* 0x0000000500077210 */
/* 0x004fca0007ffe0ff */
/*02b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101908 */
/*02c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02d0*/ BRA 0x2d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z7minimumPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */
/* 0x000fca00078e00ff */
/*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fda0003f06270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e220000002100 */
/*0050*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */
/* 0x000fe200078e00ff */
/*0060*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */
/* 0x000fe40000000000 */
/*0070*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0080*/ SHF.L.U32 R8, R7, 0x1, RZ ; /* 0x0000000107087819 */
/* 0x001fe400000006ff */
/*0090*/ ISETP.NE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fe20003f05270 */
/*00a0*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x000fe200078e00ff */
/*00b0*/ BSSY B0, 0x1e0 ; /* 0x0000012000007945 */
/* 0x000ff60003800000 */
/*00c0*/ @P0 I2F.F64 R2, R0 ; /* 0x0000000000020312 */
/* 0x000e240000201c00 */
/*00d0*/ @P0 DMUL R2, R2, 0.5 ; /* 0x3fe0000002020828 */
/* 0x001e0c0000000000 */
/*00e0*/ @P0 F2I.F64.CEIL R6, R2 ; /* 0x0000000200060311 */
/* 0x0010620000309100 */
/*00f0*/ ISETP.GE.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06270 */
/*0100*/ @P0 BRA 0x1d0 ; /* 0x000000c000000947 */
/* 0x000fea0003800000 */
/*0110*/ IMAD R5, R8, UR4, RZ ; /* 0x0000000408057c24 */
/* 0x001fca000f8e02ff */
/*0120*/ IADD3 R3, R5, UR4, RZ ; /* 0x0000000405037c10 */
/* 0x000fc8000fffe0ff */
/*0130*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x168], PT ; /* 0x00005a0003007a0c */
/* 0x000fc80003f06270 */
/*0140*/ ISETP.GE.OR P0, PT, R5, c[0x0][0x168], P0 ; /* 0x00005a0005007a0c */
/* 0x000fda0000706670 */
/*0150*/ @P0 BRA 0x1d0 ; /* 0x0000007000000947 */
/* 0x000fea0003800000 */
/*0160*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */
/* 0x000fc800078e00ff */
/*0170*/ IMAD.WIDE R2, R3, R4, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fc800078e0204 */
/*0180*/ IMAD.WIDE R4, R5, R4, c[0x0][0x160] ; /* 0x0000580005047625 */
/* 0x000fe400078e0204 */
/*0190*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea8000c1e1900 */
/*01a0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000ea4000c1e1900 */
/*01b0*/ ISETP.GE.AND P0, PT, R3, R0, PT ; /* 0x000000000300720c */
/* 0x004fda0003f06270 */
/*01c0*/ @!P0 STG.E [R4.64], R3 ; /* 0x0000000304008986 */
/* 0x0001e4000c101906 */
/*01d0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x001fea0003800000 */
/*01e0*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x002fe20003f04270 */
/*01f0*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */
/* 0x000fe2000800063f */
/*0200*/ MOV R0, R6 ; /* 0x0000000600007202 */
/* 0x000fd60000000f00 */
/*0210*/ @P0 BRA 0x90 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z7maximumPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */
/* 0x000fca00078e00ff */
/*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fda0003f06270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e220000002100 */
/*0050*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0060*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */
/* 0x000fe40000000000 */
/*0070*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0080*/ IMAD.SHL.U32 R8, R7, 0x2, RZ ; /* 0x0000000207087824 */
/* 0x001fe400078e00ff */
/*0090*/ ISETP.NE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fe20003f05270 */
/*00a0*/ BSSY B0, 0x1b0 ; /* 0x0000010000007945 */
/* 0x000fe20003800000 */
/*00b0*/ MOV R6, RZ ; /* 0x000000ff00067202 */
/* 0x000fd60000000f00 */
/*00c0*/ @P0 I2F.F64 R2, R0 ; /* 0x0000000000020312 */
/* 0x000e240000201c00 */
/*00d0*/ @P0 DMUL R2, R2, 0.5 ; /* 0x3fe0000002020828 */
/* 0x001e0c0000000000 */
/*00e0*/ @P0 F2I.F64.CEIL R6, R2 ; /* 0x0000000200060311 */
/* 0x0010620000309100 */
/*00f0*/ ISETP.GE.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06270 */
/*0100*/ @P0 BRA 0x1a0 ; /* 0x0000009000000947 */
/* 0x000fea0003800000 */
/*0110*/ IMAD R4, R8, UR4, RZ ; /* 0x0000000408047c24 */
/* 0x001fe4000f8e02ff */
/*0120*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc600078e00ff */
/*0130*/ IADD3 R2, R4.reuse, UR4, RZ ; /* 0x0000000404027c10 */
/* 0x040fe2000fffe0ff */
/*0140*/ IMAD.WIDE R4, R4, R3, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fc800078e0203 */
/*0150*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fe200078e0203 */
/*0160*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000eaa000c1e1900 */
/*0170*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea4000c1e1900 */
/*0180*/ ISETP.GT.AND P0, PT, R3, R0, PT ; /* 0x000000000300720c */
/* 0x004fda0003f04270 */
/*0190*/ @P0 STG.E [R4.64], R3 ; /* 0x0000000304000986 */
/* 0x0001e4000c101906 */
/*01a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x001fea0003800000 */
/*01b0*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x002fe20003f04270 */
/*01c0*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */
/* 0x000fe2000800063f */
/*01d0*/ MOV R0, R6 ; /* 0x0000000600007202 */
/* 0x000fd60000000f00 */
/*01e0*/ @P0 BRA 0x90 ; /* 0xfffffea000000947 */
/* 0x000fea000383ffff */
/*01f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0200*/ BRA 0x200; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <chrono>
#include <bits/stdc++.h>
using namespace std;
using namespace std::chrono;
__global__ void maximum(int *input) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if(input[second] > input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void minimum(int *input, int n) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if((first < n && second < n) && input[second] < input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void gpu_sum(int *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
__global__ void mean_diff_sq(float *input, float mean) {
input[threadIdx.x] -= mean;
input[threadIdx.x] *= input[threadIdx.x];
}
void copy_int_to_float(float *dest, int *src, int size){
for(int i = 0; i < size; i++)
dest[i] = (float)src[i];
}
__global__ void gpu_sd(float *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
long cpu_sum(int *input, int n) {
long sum = 0;
for(int i = 0 ; i < n ; i++) {
sum += input[i];
}
return sum;
}
long cpu_min(int *arr, int n) {
int min = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] < min)
min = arr[i];
}
return min;
}
long cpu_max(int *arr, int n) {
int max = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] > max)
max = arr[i];
}
return max;
}
double cpu_sd(int *arr, int n, float mean) {
float *arr_std = new float[n];
for(int i = 0 ; i < n ; i++) {
arr_std[i] = pow(((float)arr[i] - mean),2);
}
double total = 0;
for(int i = 0 ; i < n ; i++) {
total += arr_std[i];
}
total = total / n;
return sqrt(total);
}
void random_init(int *arr, int n) {
for(int i = 0 ; i < n ; i++) {
arr[i] = rand()%1000;
}
}
int main() {
int *d;
int n = 80;
int *arr = new int[n];
int result;
int size = n * sizeof(int);
random_init(arr,n);
cout<<"Input Array: [";
for(int i = 0 ; i < n ; i++) {
cout<<arr[i]<<", ";
}
cout<<"]"<<endl;
cout<<"======================================="<<endl;
cudaMalloc((void **)&d,size);
cudaMemcpy(d,arr,size,cudaMemcpyHostToDevice);
float gpu_elapsed_time;
cudaEvent_t gpu_start,gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start,0);
gpu_sum<<<1,n/2>>>(d);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"GPU Sum is: "<<result<<"\n";
float mean = (double)result/n;
cout<<"GPU Mean is: "<<mean<<endl;
float *arr_float = new float[n];
float *arr_std, std;
cudaMalloc((void **)&arr_std,n*sizeof(float));
copy_int_to_float(arr_float, arr, n);
cudaMemcpy(arr_std,arr_float,n*sizeof(float),cudaMemcpyHostToDevice);
mean_diff_sq <<<1,n>>>(arr_std, mean);
gpu_sd <<<1,n/2>>>(arr_std);
cudaMemcpy(&std,arr_std,sizeof(float),cudaMemcpyDeviceToHost);
cout<<"GPU Standard Deviation: "<<sqrt(std/n)<<endl;
cout<<"======================================="<<endl;
auto start = high_resolution_clock::now();
ios_base::sync_with_stdio(false);
result = cpu_sum(arr,n);
cout<<"CPU Sum is: "<<result<<"\n";
auto stop = high_resolution_clock::now();
double time_taken = chrono::duration_cast<chrono::milliseconds>(stop - start).count();
time_taken *= 1e-9;
mean = (float)result/n;
cout<<"CPU Mean is: "<<mean<<endl;
std = cpu_sd(arr, n, mean);
cout<<"CPU Standard Deviation: "<<std<<endl;
cout<<"======================================="<<endl;
result = 0;
cudaMemcpy(d,arr,size,cudaMemcpyHostToDevice);
minimum<<<1,n/2>>>(d,n);
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"GPU Min is: "<<result<<endl;
result = cpu_min(arr,n);
cout<<"CPU Min is: "<<result<<"\n";
cout<<"======================================="<<endl;
cudaMemcpy(d,arr,size,cudaMemcpyHostToDevice);
maximum<<<1,n/2>>>(d);
int gMax;
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"GPU Max is: "<<result<<endl;
result = cpu_max(arr,n);
cout<<"CPU Max is: "<<result<<"\n";
cout<<"======================================="<<endl;
return 0;
} | .file "tmpxft_001ae651_00000000-6_Assignment1.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB10870:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10870:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z17copy_int_to_floatPfPii
.type _Z17copy_int_to_floatPfPii, @function
_Z17copy_int_to_floatPfPii:
.LFB10859:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L3
movslq %edx, %rdx
salq $2, %rdx
movl $0, %eax
.L5:
pxor %xmm0, %xmm0
cvtsi2ssl (%rsi,%rax), %xmm0
movss %xmm0, (%rdi,%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE10859:
.size _Z17copy_int_to_floatPfPii, .-_Z17copy_int_to_floatPfPii
.globl _Z7cpu_sumPii
.type _Z7cpu_sumPii, @function
_Z7cpu_sumPii:
.LFB10860:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L10
movq %rdi, %rax
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rsi
movl $0, %edx
.L9:
movslq (%rax), %rcx
addq %rcx, %rdx
addq $4, %rax
cmpq %rsi, %rax
jne .L9
.L7:
movq %rdx, %rax
ret
.L10:
movl $0, %edx
jmp .L7
.cfi_endproc
.LFE10860:
.size _Z7cpu_sumPii, .-_Z7cpu_sumPii
.globl _Z7cpu_minPii
.type _Z7cpu_minPii, @function
_Z7cpu_minPii:
.LFB10861:
.cfi_startproc
endbr64
movl (%rdi), %eax
cmpl $1, %esi
jle .L13
leaq 4(%rdi), %rdx
leal -2(%rsi), %ecx
leaq 8(%rdi,%rcx,4), %rsi
.L14:
movl (%rdx), %ecx
cmpl %ecx, %eax
cmovg %ecx, %eax
addq $4, %rdx
cmpq %rsi, %rdx
jne .L14
.L13:
cltq
ret
.cfi_endproc
.LFE10861:
.size _Z7cpu_minPii, .-_Z7cpu_minPii
.globl _Z7cpu_maxPii
.type _Z7cpu_maxPii, @function
_Z7cpu_maxPii:
.LFB10862:
.cfi_startproc
endbr64
movl (%rdi), %eax
cmpl $1, %esi
jle .L17
leaq 4(%rdi), %rdx
leal -2(%rsi), %ecx
leaq 8(%rdi,%rcx,4), %rsi
.L18:
movl (%rdx), %ecx
cmpl %ecx, %eax
cmovl %ecx, %eax
addq $4, %rdx
cmpq %rsi, %rdx
jne .L18
.L17:
cltq
ret
.cfi_endproc
.LFE10862:
.size _Z7cpu_maxPii, .-_Z7cpu_maxPii
.globl _Z6cpu_sdPiif
.type _Z6cpu_sdPiif, @function
_Z6cpu_sdPiif:
.LFB10863:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $16, %rsp
.cfi_def_cfa_offset 48
movss %xmm0, 12(%rsp)
movslq %esi, %rbx
movabsq $2305843009213693950, %rax
cmpq %rbx, %rax
jb .L21
movq %rdi, %r12
movl %esi, %ebp
salq $2, %rbx
movq %rbx, %rdi
call _Znam@PLT
movl $0, %edx
testl %ebp, %ebp
jle .L36
.L22:
pxor %xmm0, %xmm0
cvtsi2ssl (%r12,%rdx), %xmm0
subss 12(%rsp), %xmm0
mulss %xmm0, %xmm0
movss %xmm0, (%rax,%rdx)
addq $4, %rdx
cmpq %rdx, %rbx
jne .L22
movq %rax, %rdx
addq %rbx, %rax
pxor %xmm0, %xmm0
.L24:
pxor %xmm1, %xmm1
cvtss2sd (%rdx), %xmm1
addsd %xmm1, %xmm0
addq $4, %rdx
cmpq %rax, %rdx
jne .L24
pxor %xmm1, %xmm1
cvtsi2sdl %ebp, %xmm1
divsd %xmm1, %xmm0
pxor %xmm1, %xmm1
ucomisd %xmm0, %xmm1
ja .L34
.L28:
sqrtsd %xmm0, %xmm0
.L20:
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
call __cxa_throw_bad_array_new_length@PLT
.L34:
call sqrt@PLT
jmp .L20
.L36:
pxor %xmm1, %xmm1
cvtsi2sdl %ebp, %xmm1
pxor %xmm0, %xmm0
divsd %xmm1, %xmm0
jmp .L28
.cfi_endproc
.LFE10863:
.size _Z6cpu_sdPiif, .-_Z6cpu_sdPiif
.globl _Z11random_initPii
.type _Z11random_initPii, @function
_Z11random_initPii:
.LFB10864:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L42
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L39:
call rand@PLT
movslq %eax, %rdx
imulq $274877907, %rdx, %rdx
sarq $38, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $1000, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L39
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L42:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE10864:
.size _Z11random_initPii, .-_Z11random_initPii
.globl _Z26__device_stub__Z7maximumPiPi
.type _Z26__device_stub__Z7maximumPiPi, @function
_Z26__device_stub__Z7maximumPiPi:
.LFB10892:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L49
.L45:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L50
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L49:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z7maximumPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L45
.L50:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10892:
.size _Z26__device_stub__Z7maximumPiPi, .-_Z26__device_stub__Z7maximumPiPi
.globl _Z7maximumPi
.type _Z7maximumPi, @function
_Z7maximumPi:
.LFB10893:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z7maximumPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10893:
.size _Z7maximumPi, .-_Z7maximumPi
.globl _Z27__device_stub__Z7minimumPiiPii
.type _Z27__device_stub__Z7minimumPiiPii, @function
_Z27__device_stub__Z7minimumPiiPii:
.LFB10894:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L57
.L53:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L58
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L57:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z7minimumPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L53
.L58:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10894:
.size _Z27__device_stub__Z7minimumPiiPii, .-_Z27__device_stub__Z7minimumPiiPii
.globl _Z7minimumPii
.type _Z7minimumPii, @function
_Z7minimumPii:
.LFB10895:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z7minimumPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10895:
.size _Z7minimumPii, .-_Z7minimumPii
.globl _Z26__device_stub__Z7gpu_sumPiPi
.type _Z26__device_stub__Z7gpu_sumPiPi, @function
_Z26__device_stub__Z7gpu_sumPiPi:
.LFB10896:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L65
.L61:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L66
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L65:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z7gpu_sumPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L61
.L66:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10896:
.size _Z26__device_stub__Z7gpu_sumPiPi, .-_Z26__device_stub__Z7gpu_sumPiPi
.globl _Z7gpu_sumPi
.type _Z7gpu_sumPi, @function
_Z7gpu_sumPi:
.LFB10897:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z7gpu_sumPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10897:
.size _Z7gpu_sumPi, .-_Z7gpu_sumPi
.globl _Z33__device_stub__Z12mean_diff_sqPffPff
.type _Z33__device_stub__Z12mean_diff_sqPffPff, @function
_Z33__device_stub__Z12mean_diff_sqPffPff:
.LFB10898:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movss %xmm0, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L73
.L69:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L74
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L73:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z12mean_diff_sqPff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L69
.L74:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10898:
.size _Z33__device_stub__Z12mean_diff_sqPffPff, .-_Z33__device_stub__Z12mean_diff_sqPffPff
.globl _Z12mean_diff_sqPff
.type _Z12mean_diff_sqPff, @function
_Z12mean_diff_sqPff:
.LFB10899:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z12mean_diff_sqPffPff
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10899:
.size _Z12mean_diff_sqPff, .-_Z12mean_diff_sqPff
.globl _Z25__device_stub__Z6gpu_sdPfPf
.type _Z25__device_stub__Z6gpu_sdPfPf, @function
_Z25__device_stub__Z6gpu_sdPfPf:
.LFB10900:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L81
.L77:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L82
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L81:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z6gpu_sdPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L77
.L82:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10900:
.size _Z25__device_stub__Z6gpu_sdPfPf, .-_Z25__device_stub__Z6gpu_sdPfPf
.globl _Z6gpu_sdPf
.type _Z6gpu_sdPf, @function
_Z6gpu_sdPf:
.LFB10901:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z6gpu_sdPfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10901:
.size _Z6gpu_sdPf, .-_Z6gpu_sdPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Input Array: ["
.LC2:
.string ", "
.LC3:
.string "]"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "======================================="
.section .rodata.str1.1
.LC5:
.string "GPU Sum is: "
.LC6:
.string "\n"
.LC8:
.string "GPU Mean is: "
.LC9:
.string "GPU Standard Deviation: "
.LC12:
.string "CPU Sum is: "
.LC13:
.string "CPU Mean is: "
.LC14:
.string "CPU Standard Deviation: "
.LC15:
.string "GPU Min is: "
.LC16:
.string "CPU Min is: "
.LC17:
.string "GPU Max is: "
.LC18:
.string "CPU Max is: "
.text
.globl main
.type main, @function
main:
.LFB10865:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $96, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $320, %edi
call _Znam@PLT
movq %rax, %r14
movl $80, %esi
movq %rax, %rdi
call _Z11random_initPii
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %r14, %rbx
leaq 320(%r14), %r13
leaq _ZSt4cout(%rip), %r12
leaq .LC2(%rip), %rbp
.L86:
movl (%rbx), %esi
movq %r12, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $2, %edx
movq %rbp, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbx, %r13
jne .L86
leaq .LC3(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC4(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq 32(%rsp), %rdi
movl $320, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $320, %edx
movq %r14, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movl $40, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L100
.L87:
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movq 48(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 24(%rsp), %rdi
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
call cudaEventElapsedTime@PLT
movq 40(%rsp), %rdi
call cudaEventDestroy@PLT
movq 48(%rsp), %rdi
call cudaEventDestroy@PLT
leaq 20(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 20(%rsp), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC6(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
pxor %xmm0, %xmm0
cvtsi2sdl 20(%rsp), %xmm0
divsd .LC7(%rip), %xmm0
pxor %xmm2, %xmm2
cvtsd2ss %xmm0, %xmm2
movss %xmm2, 12(%rsp)
leaq .LC8(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $320, %edi
call _Znam@PLT
movq %rax, %rbx
leaq 56(%rsp), %rdi
movl $320, %esi
call cudaMalloc@PLT
movl $80, %edx
movq %r14, %rsi
movq %rbx, %rdi
call _Z17copy_int_to_floatPfPii
movl $1, %ecx
movl $320, %edx
movq %rbx, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $80, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L101
.L88:
movl $40, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L102
.L89:
leaq 28(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 56(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC9(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbx
movss 28(%rsp), %xmm0
divss .LC10(%rip), %xmm0
pxor %xmm1, %xmm1
ucomiss %xmm0, %xmm1
ja .L98
sqrtss %xmm0, %xmm0
.L92:
cvtss2sd %xmm0, %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC4(%rip), %rbp
movq %rbp, %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movl $0, %edi
call _ZNSt8ios_base15sync_with_stdioEb@PLT
movl $80, %esi
movq %r14, %rdi
call _Z7cpu_sumPii
movl %eax, 20(%rsp)
leaq .LC12(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 20(%rsp), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC6(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
pxor %xmm0, %xmm0
cvtsi2ssl 20(%rsp), %xmm0
divss .LC10(%rip), %xmm0
movss %xmm0, 12(%rsp)
leaq .LC13(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movss 12(%rsp), %xmm0
movl $80, %esi
movq %r14, %rdi
call _Z6cpu_sdPiif
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 28(%rsp)
leaq .LC14(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 28(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %rbp, %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, 20(%rsp)
movl $1, %ecx
movl $320, %edx
movq %r14, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $40, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L103
.L93:
leaq 20(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC15(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 20(%rsp), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $80, %esi
movq %r14, %rdi
call _Z7cpu_minPii
movl %eax, 20(%rsp)
leaq .LC16(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 20(%rsp), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC6(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq .LC4(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $1, %ecx
movl $320, %edx
movq %r14, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $40, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L104
.L94:
leaq 20(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC17(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 20(%rsp), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $80, %esi
movq %r14, %rdi
call _Z7cpu_maxPii
movl %eax, 20(%rsp)
leaq .LC18(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 20(%rsp), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC6(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq .LC4(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L105
movl $0, %eax
addq $96, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L100:
.cfi_restore_state
movq 32(%rsp), %rdi
call _Z26__device_stub__Z7gpu_sumPiPi
jmp .L87
.L101:
movss 12(%rsp), %xmm0
movq 56(%rsp), %rdi
call _Z33__device_stub__Z12mean_diff_sqPffPff
jmp .L88
.L102:
movq 56(%rsp), %rdi
call _Z25__device_stub__Z6gpu_sdPfPf
jmp .L89
.L98:
call sqrtf@PLT
jmp .L92
.L103:
movl $80, %esi
movq 32(%rsp), %rdi
call _Z27__device_stub__Z7minimumPiiPii
jmp .L93
.L104:
movq 32(%rsp), %rdi
call _Z26__device_stub__Z7maximumPiPi
jmp .L94
.L105:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10865:
.size main, .-main
.section .rodata.str1.1
.LC19:
.string "_Z6gpu_sdPf"
.LC20:
.string "_Z12mean_diff_sqPff"
.LC21:
.string "_Z7gpu_sumPi"
.LC22:
.string "_Z7minimumPii"
.LC23:
.string "_Z7maximumPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB10903:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z6gpu_sdPf(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC20(%rip), %rdx
movq %rdx, %rcx
leaq _Z12mean_diff_sqPff(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC21(%rip), %rdx
movq %rdx, %rcx
leaq _Z7gpu_sumPi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC22(%rip), %rdx
movq %rdx, %rcx
leaq _Z7minimumPii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC23(%rip), %rdx
movq %rdx, %rcx
leaq _Z7maximumPi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10903:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC7:
.long 0
.long 1079246848
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC10:
.long 1117782016
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <chrono>
#include <bits/stdc++.h>
using namespace std;
using namespace std::chrono;
__global__ void maximum(int *input) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if(input[second] > input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void minimum(int *input, int n) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if((first < n && second < n) && input[second] < input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void gpu_sum(int *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
__global__ void mean_diff_sq(float *input, float mean) {
input[threadIdx.x] -= mean;
input[threadIdx.x] *= input[threadIdx.x];
}
void copy_int_to_float(float *dest, int *src, int size){
for(int i = 0; i < size; i++)
dest[i] = (float)src[i];
}
__global__ void gpu_sd(float *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
long cpu_sum(int *input, int n) {
long sum = 0;
for(int i = 0 ; i < n ; i++) {
sum += input[i];
}
return sum;
}
long cpu_min(int *arr, int n) {
int min = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] < min)
min = arr[i];
}
return min;
}
long cpu_max(int *arr, int n) {
int max = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] > max)
max = arr[i];
}
return max;
}
double cpu_sd(int *arr, int n, float mean) {
float *arr_std = new float[n];
for(int i = 0 ; i < n ; i++) {
arr_std[i] = pow(((float)arr[i] - mean),2);
}
double total = 0;
for(int i = 0 ; i < n ; i++) {
total += arr_std[i];
}
total = total / n;
return sqrt(total);
}
void random_init(int *arr, int n) {
for(int i = 0 ; i < n ; i++) {
arr[i] = rand()%1000;
}
}
int main() {
int *d;
int n = 80;
int *arr = new int[n];
int result;
int size = n * sizeof(int);
random_init(arr,n);
cout<<"Input Array: [";
for(int i = 0 ; i < n ; i++) {
cout<<arr[i]<<", ";
}
cout<<"]"<<endl;
cout<<"======================================="<<endl;
cudaMalloc((void **)&d,size);
cudaMemcpy(d,arr,size,cudaMemcpyHostToDevice);
float gpu_elapsed_time;
cudaEvent_t gpu_start,gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start,0);
gpu_sum<<<1,n/2>>>(d);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"GPU Sum is: "<<result<<"\n";
float mean = (double)result/n;
cout<<"GPU Mean is: "<<mean<<endl;
float *arr_float = new float[n];
float *arr_std, std;
cudaMalloc((void **)&arr_std,n*sizeof(float));
copy_int_to_float(arr_float, arr, n);
cudaMemcpy(arr_std,arr_float,n*sizeof(float),cudaMemcpyHostToDevice);
mean_diff_sq <<<1,n>>>(arr_std, mean);
gpu_sd <<<1,n/2>>>(arr_std);
cudaMemcpy(&std,arr_std,sizeof(float),cudaMemcpyDeviceToHost);
cout<<"GPU Standard Deviation: "<<sqrt(std/n)<<endl;
cout<<"======================================="<<endl;
auto start = high_resolution_clock::now();
ios_base::sync_with_stdio(false);
result = cpu_sum(arr,n);
cout<<"CPU Sum is: "<<result<<"\n";
auto stop = high_resolution_clock::now();
double time_taken = chrono::duration_cast<chrono::milliseconds>(stop - start).count();
time_taken *= 1e-9;
mean = (float)result/n;
cout<<"CPU Mean is: "<<mean<<endl;
std = cpu_sd(arr, n, mean);
cout<<"CPU Standard Deviation: "<<std<<endl;
cout<<"======================================="<<endl;
result = 0;
cudaMemcpy(d,arr,size,cudaMemcpyHostToDevice);
minimum<<<1,n/2>>>(d,n);
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"GPU Min is: "<<result<<endl;
result = cpu_min(arr,n);
cout<<"CPU Min is: "<<result<<"\n";
cout<<"======================================="<<endl;
cudaMemcpy(d,arr,size,cudaMemcpyHostToDevice);
maximum<<<1,n/2>>>(d);
int gMax;
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"GPU Max is: "<<result<<endl;
result = cpu_max(arr,n);
cout<<"CPU Max is: "<<result<<"\n";
cout<<"======================================="<<endl;
return 0;
} | #include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <chrono>
#include <bits/stdc++.h>
using namespace std;
using namespace std::chrono;
__global__ void maximum(int *input) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if(input[second] > input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void minimum(int *input, int n) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if((first < n && second < n) && input[second] < input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void gpu_sum(int *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
__global__ void mean_diff_sq(float *input, float mean) {
input[threadIdx.x] -= mean;
input[threadIdx.x] *= input[threadIdx.x];
}
void copy_int_to_float(float *dest, int *src, int size){
for(int i = 0; i < size; i++)
dest[i] = (float)src[i];
}
__global__ void gpu_sd(float *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
long cpu_sum(int *input, int n) {
long sum = 0;
for(int i = 0 ; i < n ; i++) {
sum += input[i];
}
return sum;
}
long cpu_min(int *arr, int n) {
int min = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] < min)
min = arr[i];
}
return min;
}
long cpu_max(int *arr, int n) {
int max = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] > max)
max = arr[i];
}
return max;
}
double cpu_sd(int *arr, int n, float mean) {
float *arr_std = new float[n];
for(int i = 0 ; i < n ; i++) {
arr_std[i] = pow(((float)arr[i] - mean),2);
}
double total = 0;
for(int i = 0 ; i < n ; i++) {
total += arr_std[i];
}
total = total / n;
return sqrt(total);
}
void random_init(int *arr, int n) {
for(int i = 0 ; i < n ; i++) {
arr[i] = rand()%1000;
}
}
int main() {
int *d;
int n = 80;
int *arr = new int[n];
int result;
int size = n * sizeof(int);
random_init(arr,n);
cout<<"Input Array: [";
for(int i = 0 ; i < n ; i++) {
cout<<arr[i]<<", ";
}
cout<<"]"<<endl;
cout<<"======================================="<<endl;
hipMalloc((void **)&d,size);
hipMemcpy(d,arr,size,hipMemcpyHostToDevice);
float gpu_elapsed_time;
hipEvent_t gpu_start,gpu_stop;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_stop);
hipEventRecord(gpu_start,0);
gpu_sum<<<1,n/2>>>(d);
hipEventRecord(gpu_stop, 0);
hipEventSynchronize(gpu_stop);
hipEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
hipEventDestroy(gpu_start);
hipEventDestroy(gpu_stop);
hipMemcpy(&result,d,sizeof(int),hipMemcpyDeviceToHost);
cout<<"GPU Sum is: "<<result<<"\n";
float mean = (double)result/n;
cout<<"GPU Mean is: "<<mean<<endl;
float *arr_float = new float[n];
float *arr_std, std;
hipMalloc((void **)&arr_std,n*sizeof(float));
copy_int_to_float(arr_float, arr, n);
hipMemcpy(arr_std,arr_float,n*sizeof(float),hipMemcpyHostToDevice);
mean_diff_sq <<<1,n>>>(arr_std, mean);
gpu_sd <<<1,n/2>>>(arr_std);
hipMemcpy(&std,arr_std,sizeof(float),hipMemcpyDeviceToHost);
cout<<"GPU Standard Deviation: "<<sqrt(std/n)<<endl;
cout<<"======================================="<<endl;
auto start = high_resolution_clock::now();
ios_base::sync_with_stdio(false);
result = cpu_sum(arr,n);
cout<<"CPU Sum is: "<<result<<"\n";
auto stop = high_resolution_clock::now();
double time_taken = chrono::duration_cast<chrono::milliseconds>(stop - start).count();
time_taken *= 1e-9;
mean = (float)result/n;
cout<<"CPU Mean is: "<<mean<<endl;
std = cpu_sd(arr, n, mean);
cout<<"CPU Standard Deviation: "<<std<<endl;
cout<<"======================================="<<endl;
result = 0;
hipMemcpy(d,arr,size,hipMemcpyHostToDevice);
minimum<<<1,n/2>>>(d,n);
hipMemcpy(&result,d,sizeof(int),hipMemcpyDeviceToHost);
cout<<"GPU Min is: "<<result<<endl;
result = cpu_min(arr,n);
cout<<"CPU Min is: "<<result<<"\n";
cout<<"======================================="<<endl;
hipMemcpy(d,arr,size,hipMemcpyHostToDevice);
maximum<<<1,n/2>>>(d);
int gMax;
hipMemcpy(&result,d,sizeof(int),hipMemcpyDeviceToHost);
cout<<"GPU Max is: "<<result<<endl;
result = cpu_max(arr,n);
cout<<"CPU Max is: "<<result<<"\n";
cout<<"======================================="<<endl;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <chrono>
#include <bits/stdc++.h>
using namespace std;
using namespace std::chrono;
__global__ void maximum(int *input) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if(input[second] > input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void minimum(int *input, int n) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if((first < n && second < n) && input[second] < input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void gpu_sum(int *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
__global__ void mean_diff_sq(float *input, float mean) {
input[threadIdx.x] -= mean;
input[threadIdx.x] *= input[threadIdx.x];
}
void copy_int_to_float(float *dest, int *src, int size){
for(int i = 0; i < size; i++)
dest[i] = (float)src[i];
}
__global__ void gpu_sd(float *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
long cpu_sum(int *input, int n) {
long sum = 0;
for(int i = 0 ; i < n ; i++) {
sum += input[i];
}
return sum;
}
long cpu_min(int *arr, int n) {
int min = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] < min)
min = arr[i];
}
return min;
}
long cpu_max(int *arr, int n) {
int max = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] > max)
max = arr[i];
}
return max;
}
double cpu_sd(int *arr, int n, float mean) {
float *arr_std = new float[n];
for(int i = 0 ; i < n ; i++) {
arr_std[i] = pow(((float)arr[i] - mean),2);
}
double total = 0;
for(int i = 0 ; i < n ; i++) {
total += arr_std[i];
}
total = total / n;
return sqrt(total);
}
void random_init(int *arr, int n) {
for(int i = 0 ; i < n ; i++) {
arr[i] = rand()%1000;
}
}
int main() {
int *d;
int n = 80;
int *arr = new int[n];
int result;
int size = n * sizeof(int);
random_init(arr,n);
cout<<"Input Array: [";
for(int i = 0 ; i < n ; i++) {
cout<<arr[i]<<", ";
}
cout<<"]"<<endl;
cout<<"======================================="<<endl;
hipMalloc((void **)&d,size);
hipMemcpy(d,arr,size,hipMemcpyHostToDevice);
float gpu_elapsed_time;
hipEvent_t gpu_start,gpu_stop;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_stop);
hipEventRecord(gpu_start,0);
gpu_sum<<<1,n/2>>>(d);
hipEventRecord(gpu_stop, 0);
hipEventSynchronize(gpu_stop);
hipEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
hipEventDestroy(gpu_start);
hipEventDestroy(gpu_stop);
hipMemcpy(&result,d,sizeof(int),hipMemcpyDeviceToHost);
cout<<"GPU Sum is: "<<result<<"\n";
float mean = (double)result/n;
cout<<"GPU Mean is: "<<mean<<endl;
float *arr_float = new float[n];
float *arr_std, std;
hipMalloc((void **)&arr_std,n*sizeof(float));
copy_int_to_float(arr_float, arr, n);
hipMemcpy(arr_std,arr_float,n*sizeof(float),hipMemcpyHostToDevice);
mean_diff_sq <<<1,n>>>(arr_std, mean);
gpu_sd <<<1,n/2>>>(arr_std);
hipMemcpy(&std,arr_std,sizeof(float),hipMemcpyDeviceToHost);
cout<<"GPU Standard Deviation: "<<sqrt(std/n)<<endl;
cout<<"======================================="<<endl;
auto start = high_resolution_clock::now();
ios_base::sync_with_stdio(false);
result = cpu_sum(arr,n);
cout<<"CPU Sum is: "<<result<<"\n";
auto stop = high_resolution_clock::now();
double time_taken = chrono::duration_cast<chrono::milliseconds>(stop - start).count();
time_taken *= 1e-9;
mean = (float)result/n;
cout<<"CPU Mean is: "<<mean<<endl;
std = cpu_sd(arr, n, mean);
cout<<"CPU Standard Deviation: "<<std<<endl;
cout<<"======================================="<<endl;
result = 0;
hipMemcpy(d,arr,size,hipMemcpyHostToDevice);
minimum<<<1,n/2>>>(d,n);
hipMemcpy(&result,d,sizeof(int),hipMemcpyDeviceToHost);
cout<<"GPU Min is: "<<result<<endl;
result = cpu_min(arr,n);
cout<<"CPU Min is: "<<result<<"\n";
cout<<"======================================="<<endl;
hipMemcpy(d,arr,size,hipMemcpyHostToDevice);
maximum<<<1,n/2>>>(d);
int gMax;
hipMemcpy(&result,d,sizeof(int),hipMemcpyDeviceToHost);
cout<<"GPU Max is: "<<result<<endl;
result = cpu_max(arr,n);
cout<<"CPU Max is: "<<result<<"\n";
cout<<"======================================="<<endl;
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7maximumPi
.globl _Z7maximumPi
.p2align 8
.type _Z7maximumPi,@function
_Z7maximumPi:
s_load_b32 s2, s[0:1], 0x14
s_waitcnt lgkmcnt(0)
v_cmp_eq_u16_e64 s3, s2, 0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s3
s_cbranch_vccnz .LBB0_8
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v3, 1, v0
s_and_b32 s3, 0xffff, s2
s_mov_b32 s2, 1
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_lshl_b32 s2, s2, 1
s_delay_alu instid0(VALU_DEP_1)
s_cmp_gt_i32 s4, 0
s_mov_b32 s3, s4
s_cbranch_scc0 .LBB0_8
.LBB0_3:
s_mov_b32 s4, exec_lo
v_cmpx_gt_u32_e64 s3, v0
s_cbranch_execz .LBB0_6
v_mul_lo_u32 v1, v3, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v4, s2, v1
v_ashrrev_i32_e32 v2, 31, v1
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
s_clause 0x1
global_load_b32 v4, v[4:5], off
global_load_b32 v5, v[1:2], off
s_waitcnt vmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, v4, v5
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_6
global_store_b32 v[1:2], v4, off
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s4
s_cmp_eq_u32 s3, 1
s_mov_b32 s4, 0
s_cbranch_scc1 .LBB0_2
v_cvt_f64_i32_e32 v[1:2], s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[1:2], v[1:2], 0.5
v_ceil_f64_e32 v[1:2], v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f64_e32 v1, v[1:2]
v_readfirstlane_b32 s4, v1
s_branch .LBB0_2
.LBB0_8:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7maximumPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 5
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7maximumPi, .Lfunc_end0-_Z7maximumPi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z7minimumPii
.globl _Z7minimumPii
.p2align 8
.type _Z7minimumPii,@function
_Z7minimumPii:
s_load_b32 s4, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
v_cmp_eq_u16_e64 s2, s4, 0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s2
s_cbranch_vccnz .LBB1_9
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x8
v_lshlrev_b32_e32 v5, 1, v0
s_and_b32 s4, 0xffff, s4
s_mov_b32 s1, 1
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_3
.p2align 6
.LBB1_2:
s_lshl_b32 s1, s1, 1
s_delay_alu instid0(VALU_DEP_1)
s_cmp_gt_i32 s5, 0
s_mov_b32 s4, s5
s_cbranch_scc0 .LBB1_9
.LBB1_3:
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB1_7
v_mul_lo_u32 v1, v5, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v3, s1, v1
v_max_i32_e32 v2, v1, v3
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s0, v2
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB1_7
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[3:4]
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v1, vcc_lo, s2, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
s_clause 0x1
global_load_b32 v3, v[3:4], off
global_load_b32 v4, v[1:2], off
s_waitcnt vmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, v3, v4
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB1_7
global_store_b32 v[1:2], v3, off
.LBB1_7:
s_or_b32 exec_lo, exec_lo, s5
s_cmp_eq_u32 s4, 1
s_mov_b32 s5, 0
s_cbranch_scc1 .LBB1_2
v_cvt_f64_i32_e32 v[1:2], s4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[1:2], v[1:2], 0.5
v_ceil_f64_e32 v[1:2], v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f64_e32 v1, v[1:2]
v_readfirstlane_b32 s5, v1
s_branch .LBB1_2
.LBB1_9:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7minimumPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 6
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z7minimumPii, .Lfunc_end1-_Z7minimumPii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z7gpu_sumPi
.globl _Z7gpu_sumPi
.p2align 8
.type _Z7gpu_sumPi,@function
_Z7gpu_sumPi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x14
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_cmp_eq_u16_e64 s3, s2, 0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s3
s_cbranch_vccnz .LBB2_9
v_lshlrev_b32_e32 v1, 1, v0
s_and_b32 s3, 0xffff, s2
s_mov_b32 s2, 1
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB2_2:
s_mov_b32 s4, exec_lo
v_cmpx_gt_u32_e64 s3, v0
s_cbranch_execz .LBB2_4
v_mul_lo_u32 v2, v1, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v4, s2, v2
v_ashrrev_i32_e32 v3, 31, v2
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_clause 0x1
global_load_b32 v4, v[4:5], off
global_load_b32 v5, v[2:3], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v4, v5, v4
global_store_b32 v[2:3], v4, off
.LBB2_4:
s_or_b32 exec_lo, exec_lo, s4
s_cmp_eq_u32 s3, 1
s_mov_b32 s4, 0
s_cbranch_scc1 .LBB2_6
v_cvt_f64_i32_e32 v[2:3], s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[2:3], v[2:3], 0.5
v_ceil_f64_e32 v[2:3], v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f64_e32 v2, v[2:3]
v_readfirstlane_b32 s4, v2
.LBB2_6:
s_lshl_b32 s2, s2, 1
s_delay_alu instid0(VALU_DEP_1)
s_cmp_gt_i32 s4, 0
s_cbranch_scc0 .LBB2_8
s_mov_b32 s3, s4
s_branch .LBB2_2
.LBB2_8:
s_set_inst_prefetch_distance 0x2
s_ashr_i32 s3, s2, 31
s_branch .LBB2_10
.LBB2_9:
s_mov_b64 s[2:3], 1
.LBB2_10:
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB2_12
s_lshl_b64 s[2:3], s[2:3], 2
v_mov_b32_e32 v0, 0
s_add_u32 s2, s0, s2
s_addc_u32 s3, s1, s3
s_clause 0x1
global_load_b32 v1, v0, s[2:3]
global_load_b32 v2, v0, s[0:1]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
.LBB2_12:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7gpu_sumPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 5
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z7gpu_sumPi, .Lfunc_end2-_Z7gpu_sumPi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z12mean_diff_sqPff
.globl _Z12mean_diff_sqPff
.p2align 8
.type _Z12mean_diff_sqPff,@function
_Z12mean_diff_sqPff:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x8
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v0, s[2:3]
s_waitcnt vmcnt(0)
v_subrev_f32_e32 v1, s0, v1
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v1, v1, v1
global_store_b32 v0, v1, s[2:3]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12mean_diff_sqPff
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 12
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z12mean_diff_sqPff, .Lfunc_end3-_Z12mean_diff_sqPff
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z6gpu_sdPf
.globl _Z6gpu_sdPf
.p2align 8
.type _Z6gpu_sdPf,@function
_Z6gpu_sdPf:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x14
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_cmp_eq_u16_e64 s3, s2, 0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s3
s_cbranch_vccnz .LBB4_9
v_lshlrev_b32_e32 v1, 1, v0
s_and_b32 s3, 0xffff, s2
s_mov_b32 s2, 1
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB4_2:
s_mov_b32 s4, exec_lo
v_cmpx_gt_u32_e64 s3, v0
s_cbranch_execz .LBB4_4
v_mul_lo_u32 v2, v1, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v4, s2, v2
v_ashrrev_i32_e32 v3, 31, v2
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_clause 0x1
global_load_b32 v4, v[4:5], off
global_load_b32 v5, v[2:3], off
s_waitcnt vmcnt(0)
v_add_f32_e32 v4, v4, v5
global_store_b32 v[2:3], v4, off
.LBB4_4:
s_or_b32 exec_lo, exec_lo, s4
s_cmp_eq_u32 s3, 1
s_mov_b32 s4, 0
s_cbranch_scc1 .LBB4_6
v_cvt_f64_i32_e32 v[2:3], s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[2:3], v[2:3], 0.5
v_ceil_f64_e32 v[2:3], v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f64_e32 v2, v[2:3]
v_readfirstlane_b32 s4, v2
.LBB4_6:
s_lshl_b32 s2, s2, 1
s_delay_alu instid0(VALU_DEP_1)
s_cmp_gt_i32 s4, 0
s_cbranch_scc0 .LBB4_8
s_mov_b32 s3, s4
s_branch .LBB4_2
.LBB4_8:
s_set_inst_prefetch_distance 0x2
s_ashr_i32 s3, s2, 31
s_branch .LBB4_10
.LBB4_9:
s_mov_b64 s[2:3], 1
.LBB4_10:
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB4_12
s_lshl_b64 s[2:3], s[2:3], 2
v_mov_b32_e32 v0, 0
s_add_u32 s2, s0, s2
s_addc_u32 s3, s1, s3
s_clause 0x1
global_load_b32 v1, v0, s[2:3]
global_load_b32 v2, v0, s[0:1]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
.LBB4_12:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6gpu_sdPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 5
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end4:
.size _Z6gpu_sdPf, .Lfunc_end4-_Z6gpu_sdPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7maximumPi
.private_segment_fixed_size: 0
.sgpr_count: 7
.sgpr_spill_count: 0
.symbol: _Z7maximumPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7minimumPii
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z7minimumPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7gpu_sumPi
.private_segment_fixed_size: 0
.sgpr_count: 7
.sgpr_spill_count: 0
.symbol: _Z7gpu_sumPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 12
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12mean_diff_sqPff
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z12mean_diff_sqPff.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6gpu_sdPf
.private_segment_fixed_size: 0
.sgpr_count: 7
.sgpr_spill_count: 0
.symbol: _Z6gpu_sdPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <chrono>
#include <bits/stdc++.h>
using namespace std;
using namespace std::chrono;
__global__ void maximum(int *input) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if(input[second] > input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void minimum(int *input, int n) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if((first < n && second < n) && input[second] < input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void gpu_sum(int *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
__global__ void mean_diff_sq(float *input, float mean) {
input[threadIdx.x] -= mean;
input[threadIdx.x] *= input[threadIdx.x];
}
void copy_int_to_float(float *dest, int *src, int size){
for(int i = 0; i < size; i++)
dest[i] = (float)src[i];
}
__global__ void gpu_sd(float *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
long cpu_sum(int *input, int n) {
long sum = 0;
for(int i = 0 ; i < n ; i++) {
sum += input[i];
}
return sum;
}
long cpu_min(int *arr, int n) {
int min = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] < min)
min = arr[i];
}
return min;
}
long cpu_max(int *arr, int n) {
int max = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] > max)
max = arr[i];
}
return max;
}
double cpu_sd(int *arr, int n, float mean) {
float *arr_std = new float[n];
for(int i = 0 ; i < n ; i++) {
arr_std[i] = pow(((float)arr[i] - mean),2);
}
double total = 0;
for(int i = 0 ; i < n ; i++) {
total += arr_std[i];
}
total = total / n;
return sqrt(total);
}
void random_init(int *arr, int n) {
for(int i = 0 ; i < n ; i++) {
arr[i] = rand()%1000;
}
}
int main() {
int *d;
int n = 80;
int *arr = new int[n];
int result;
int size = n * sizeof(int);
random_init(arr,n);
cout<<"Input Array: [";
for(int i = 0 ; i < n ; i++) {
cout<<arr[i]<<", ";
}
cout<<"]"<<endl;
cout<<"======================================="<<endl;
hipMalloc((void **)&d,size);
hipMemcpy(d,arr,size,hipMemcpyHostToDevice);
float gpu_elapsed_time;
hipEvent_t gpu_start,gpu_stop;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_stop);
hipEventRecord(gpu_start,0);
gpu_sum<<<1,n/2>>>(d);
hipEventRecord(gpu_stop, 0);
hipEventSynchronize(gpu_stop);
hipEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
hipEventDestroy(gpu_start);
hipEventDestroy(gpu_stop);
hipMemcpy(&result,d,sizeof(int),hipMemcpyDeviceToHost);
cout<<"GPU Sum is: "<<result<<"\n";
float mean = (double)result/n;
cout<<"GPU Mean is: "<<mean<<endl;
float *arr_float = new float[n];
float *arr_std, std;
hipMalloc((void **)&arr_std,n*sizeof(float));
copy_int_to_float(arr_float, arr, n);
hipMemcpy(arr_std,arr_float,n*sizeof(float),hipMemcpyHostToDevice);
mean_diff_sq <<<1,n>>>(arr_std, mean);
gpu_sd <<<1,n/2>>>(arr_std);
hipMemcpy(&std,arr_std,sizeof(float),hipMemcpyDeviceToHost);
cout<<"GPU Standard Deviation: "<<sqrt(std/n)<<endl;
cout<<"======================================="<<endl;
auto start = high_resolution_clock::now();
ios_base::sync_with_stdio(false);
result = cpu_sum(arr,n);
cout<<"CPU Sum is: "<<result<<"\n";
auto stop = high_resolution_clock::now();
double time_taken = chrono::duration_cast<chrono::milliseconds>(stop - start).count();
time_taken *= 1e-9;
mean = (float)result/n;
cout<<"CPU Mean is: "<<mean<<endl;
std = cpu_sd(arr, n, mean);
cout<<"CPU Standard Deviation: "<<std<<endl;
cout<<"======================================="<<endl;
result = 0;
hipMemcpy(d,arr,size,hipMemcpyHostToDevice);
minimum<<<1,n/2>>>(d,n);
hipMemcpy(&result,d,sizeof(int),hipMemcpyDeviceToHost);
cout<<"GPU Min is: "<<result<<endl;
result = cpu_min(arr,n);
cout<<"CPU Min is: "<<result<<"\n";
cout<<"======================================="<<endl;
hipMemcpy(d,arr,size,hipMemcpyHostToDevice);
maximum<<<1,n/2>>>(d);
int gMax;
hipMemcpy(&result,d,sizeof(int),hipMemcpyDeviceToHost);
cout<<"GPU Max is: "<<result<<endl;
result = cpu_max(arr,n);
cout<<"CPU Max is: "<<result<<"\n";
cout<<"======================================="<<endl;
return 0;
} | .text
.file "Assignment1.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z22__device_stub__maximumPi # -- Begin function _Z22__device_stub__maximumPi
.p2align 4, 0x90
.type _Z22__device_stub__maximumPi,@function
_Z22__device_stub__maximumPi: # @_Z22__device_stub__maximumPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z7maximumPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z22__device_stub__maximumPi, .Lfunc_end0-_Z22__device_stub__maximumPi
.cfi_endproc
# -- End function
.globl _Z22__device_stub__minimumPii # -- Begin function _Z22__device_stub__minimumPii
.p2align 4, 0x90
.type _Z22__device_stub__minimumPii,@function
_Z22__device_stub__minimumPii: # @_Z22__device_stub__minimumPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z7minimumPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z22__device_stub__minimumPii, .Lfunc_end1-_Z22__device_stub__minimumPii
.cfi_endproc
# -- End function
.globl _Z22__device_stub__gpu_sumPi # -- Begin function _Z22__device_stub__gpu_sumPi
.p2align 4, 0x90
.type _Z22__device_stub__gpu_sumPi,@function
_Z22__device_stub__gpu_sumPi: # @_Z22__device_stub__gpu_sumPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z7gpu_sumPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end2:
.size _Z22__device_stub__gpu_sumPi, .Lfunc_end2-_Z22__device_stub__gpu_sumPi
.cfi_endproc
# -- End function
.globl _Z27__device_stub__mean_diff_sqPff # -- Begin function _Z27__device_stub__mean_diff_sqPff
.p2align 4, 0x90
.type _Z27__device_stub__mean_diff_sqPff,@function
_Z27__device_stub__mean_diff_sqPff: # @_Z27__device_stub__mean_diff_sqPff
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movss %xmm0, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z12mean_diff_sqPff, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end3:
.size _Z27__device_stub__mean_diff_sqPff, .Lfunc_end3-_Z27__device_stub__mean_diff_sqPff
.cfi_endproc
# -- End function
.globl _Z17copy_int_to_floatPfPii # -- Begin function _Z17copy_int_to_floatPfPii
.p2align 4, 0x90
.type _Z17copy_int_to_floatPfPii,@function
_Z17copy_int_to_floatPfPii: # @_Z17copy_int_to_floatPfPii
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB4_3
# %bb.1: # %.lr.ph.preheader
movl %edx, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB4_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ssl (%rsi,%rcx,4), %xmm0
movss %xmm0, (%rdi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB4_2
.LBB4_3: # %._crit_edge
retq
.Lfunc_end4:
.size _Z17copy_int_to_floatPfPii, .Lfunc_end4-_Z17copy_int_to_floatPfPii
.cfi_endproc
# -- End function
.globl _Z21__device_stub__gpu_sdPf # -- Begin function _Z21__device_stub__gpu_sdPf
.p2align 4, 0x90
.type _Z21__device_stub__gpu_sdPf,@function
_Z21__device_stub__gpu_sdPf: # @_Z21__device_stub__gpu_sdPf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z6gpu_sdPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end5:
.size _Z21__device_stub__gpu_sdPf, .Lfunc_end5-_Z21__device_stub__gpu_sdPf
.cfi_endproc
# -- End function
.globl _Z7cpu_sumPii # -- Begin function _Z7cpu_sumPii
.p2align 4, 0x90
.type _Z7cpu_sumPii,@function
_Z7cpu_sumPii: # @_Z7cpu_sumPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB6_1
# %bb.3: # %.lr.ph.preheader
movl %esi, %ecx
xorl %edx, %edx
xorl %eax, %eax
.p2align 4, 0x90
.LBB6_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movslq (%rdi,%rdx,4), %rsi
addq %rsi, %rax
incq %rdx
cmpq %rdx, %rcx
jne .LBB6_4
# %bb.2: # %._crit_edge
retq
.LBB6_1:
xorl %eax, %eax
retq
.Lfunc_end6:
.size _Z7cpu_sumPii, .Lfunc_end6-_Z7cpu_sumPii
.cfi_endproc
# -- End function
.globl _Z7cpu_minPii # -- Begin function _Z7cpu_minPii
.p2align 4, 0x90
.type _Z7cpu_minPii,@function
_Z7cpu_minPii: # @_Z7cpu_minPii
.cfi_startproc
# %bb.0:
movl (%rdi), %eax
cmpl $2, %esi
jl .LBB7_3
# %bb.1: # %.lr.ph.preheader
movl %esi, %ecx
movl $1, %edx
.p2align 4, 0x90
.LBB7_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%rdi,%rdx,4), %esi
cmpl %eax, %esi
cmovll %esi, %eax
incq %rdx
cmpq %rdx, %rcx
jne .LBB7_2
.LBB7_3: # %._crit_edge
cltq
retq
.Lfunc_end7:
.size _Z7cpu_minPii, .Lfunc_end7-_Z7cpu_minPii
.cfi_endproc
# -- End function
.globl _Z7cpu_maxPii # -- Begin function _Z7cpu_maxPii
.p2align 4, 0x90
.type _Z7cpu_maxPii,@function
_Z7cpu_maxPii: # @_Z7cpu_maxPii
.cfi_startproc
# %bb.0:
movl (%rdi), %eax
cmpl $2, %esi
jl .LBB8_3
# %bb.1: # %.lr.ph.preheader
movl %esi, %ecx
movl $1, %edx
.p2align 4, 0x90
.LBB8_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%rdi,%rdx,4), %esi
cmpl %eax, %esi
cmovgl %esi, %eax
incq %rdx
cmpq %rdx, %rcx
jne .LBB8_2
.LBB8_3: # %._crit_edge
cltq
retq
.Lfunc_end8:
.size _Z7cpu_maxPii, .Lfunc_end8-_Z7cpu_maxPii
.cfi_endproc
# -- End function
.globl _Z6cpu_sdPiif # -- Begin function _Z6cpu_sdPiif
.p2align 4, 0x90
.type _Z6cpu_sdPiif,@function
_Z6cpu_sdPiif: # @_Z6cpu_sdPiif
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $16, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movss %xmm0, 12(%rsp) # 4-byte Spill
movl %esi, %ebx
movq %rdi, %r14
movslq %esi, %r15
leaq (,%r15,4), %rax
testl %r15d, %r15d
movq $-1, %rdi
cmovnsq %rax, %rdi
callq _Znam
movss 12(%rsp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
movl %ebx, %ecx
testl %r15d, %r15d
jle .LBB9_3
# %bb.1: # %.lr.ph.preheader
xorl %edx, %edx
.p2align 4, 0x90
.LBB9_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ssl (%r14,%rdx,4), %xmm0
subss %xmm1, %xmm0
mulss %xmm0, %xmm0
movss %xmm0, (%rax,%rdx,4)
incq %rdx
cmpq %rdx, %rcx
jne .LBB9_2
.LBB9_3: # %.preheader
xorps %xmm1, %xmm1
xorps %xmm0, %xmm0
testl %ebx, %ebx
jle .LBB9_6
# %bb.4: # %.lr.ph23.preheader
xorl %edx, %edx
.p2align 4, 0x90
.LBB9_5: # %.lr.ph23
# =>This Inner Loop Header: Depth=1
movss (%rax,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
addsd %xmm2, %xmm0
incq %rdx
cmpq %rdx, %rcx
jne .LBB9_5
.LBB9_6: # %._crit_edge
xorps %xmm2, %xmm2
cvtsi2sd %ebx, %xmm2
divsd %xmm2, %xmm0
ucomisd %xmm1, %xmm0
jb .LBB9_8
# %bb.7: # %._crit_edge.split
sqrtsd %xmm0, %xmm0
addq $16, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB9_8: # %call.sqrt
.cfi_def_cfa_offset 48
addq $16, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
jmp sqrt # TAILCALL
.Lfunc_end9:
.size _Z6cpu_sdPiif, .Lfunc_end9-_Z6cpu_sdPiif
.cfi_endproc
# -- End function
.globl _Z11random_initPii # -- Begin function _Z11random_initPii
.p2align 4, 0x90
.type _Z11random_initPii,@function
_Z11random_initPii: # @_Z11random_initPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB10_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB10_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB10_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB10_4: # %._crit_edge
retq
.Lfunc_end10:
.size _Z11random_initPii, .Lfunc_end10-_Z11random_initPii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI11_0:
.quad 0x4054000000000000 # double 80
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI11_1:
.long 0x42a00000 # float 80
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $144, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $320, %edi # imm = 0x140
callq _Znam
movq %rax, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB11_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%rbx,%r14,4)
incq %r14
cmpq $80, %r14
jne .LBB11_1
# %bb.2: # %_Z11random_initPii.exit
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB11_3: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r14,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.1, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r14
cmpq $80, %r14
jne .LBB11_3
# %bb.4:
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB11_81
# %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r14)
je .LBB11_7
# %bb.6:
movzbl 67(%r14), %eax
jmp .LBB11_8
.LBB11_7:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB11_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $39, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB11_81
# %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i113
cmpb $0, 56(%r14)
je .LBB11_11
# %bb.10:
movzbl 67(%r14), %eax
jmp .LBB11_12
.LBB11_11:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB11_12: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit116
movabsq $4294967297, %r14 # imm = 0x100000001
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
leaq 32(%rsp), %rdi
movl $320, %esi # imm = 0x140
callq hipMalloc
movq 32(%rsp), %rdi
movl $320, %edx # imm = 0x140
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
leaq 128(%rsp), %rdi
callq hipEventCreate
leaq 120(%rsp), %rdi
callq hipEventCreate
movq 128(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
leaq 39(%r14), %r15
movq %r14, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB11_14
# %bb.13:
movq 32(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 16(%rsp)
leaq 96(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z7gpu_sumPi, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB11_14:
movq 120(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 120(%rsp), %rdi
callq hipEventSynchronize
movq 128(%rsp), %rsi
movq 120(%rsp), %rdx
leaq 140(%rsp), %rdi
callq hipEventElapsedTime
movq 128(%rsp), %rdi
callq hipEventDestroy
movq 120(%rsp), %rdi
callq hipEventDestroy
movq 32(%rsp), %rsi
leaq 8(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str.4, %esi
movl $12, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 8(%rsp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.5, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
cvtsi2sdl 8(%rsp), %xmm0
divsd .LCPI11_0(%rip), %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 12(%rsp) # 4-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str.6, %esi
movl $13, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB11_81
# %bb.15: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i118
cmpb $0, 56(%r12)
je .LBB11_17
# %bb.16:
movzbl 67(%r12), %ecx
jmp .LBB11_18
.LBB11_17:
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB11_18: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit121
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $320, %edi # imm = 0x140
callq _Znam
movq %rax, %r12
leaq 112(%rsp), %rdi
movl $320, %esi # imm = 0x140
callq hipMalloc
xorl %eax, %eax
.p2align 4, 0x90
.LBB11_19: # %.lr.ph.i66
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ssl (%rbx,%rax,4), %xmm0
movss %xmm0, (%r12,%rax,4)
incq %rax
cmpq $80, %rax
jne .LBB11_19
# %bb.20: # %_Z17copy_int_to_floatPfPii.exit
movq 112(%rsp), %rdi
movl $320, %edx # imm = 0x140
movq %r12, %rsi
movl $1, %ecx
callq hipMemcpy
leaq 79(%r14), %rdx
movq %r14, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB11_22
# %bb.21:
movq 112(%rsp), %rax
movq %rax, 48(%rsp)
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 92(%rsp)
leaq 48(%rsp), %rax
movq %rax, 96(%rsp)
leaq 92(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12mean_diff_sqPff, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB11_22:
movq %r14, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB11_24
# %bb.23:
movq 112(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 16(%rsp)
leaq 96(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z6gpu_sdPf, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB11_24:
movq 112(%rsp), %rsi
leaq 88(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str.7, %esi
movl $24, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 88(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI11_1(%rip), %xmm0
xorps %xmm1, %xmm1
ucomiss %xmm1, %xmm0
jb .LBB11_26
# %bb.25:
sqrtss %xmm0, %xmm0
jmp .LBB11_27
.LBB11_26: # %call.sqrt
callq sqrtf
.LBB11_27: # %.split
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB11_81
# %bb.28: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i123
cmpb $0, 56(%r12)
je .LBB11_30
# %bb.29:
movzbl 67(%r12), %ecx
jmp .LBB11_31
.LBB11_30:
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB11_31: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit126
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $39, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB11_81
# %bb.32: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i128
cmpb $0, 56(%r12)
je .LBB11_34
# %bb.33:
movzbl 67(%r12), %eax
jmp .LBB11_35
.LBB11_34:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB11_35: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit131
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
callq _ZNSt6chrono3_V212system_clock3nowEv
xorl %r12d, %r12d
xorl %edi, %edi
callq _ZNSt8ios_base15sync_with_stdioEb
xorl %eax, %eax
.p2align 4, 0x90
.LBB11_36: # %.lr.ph.i82
# =>This Inner Loop Header: Depth=1
movl (%rbx,%r12,4), %ecx
addq %rcx, %rax
incq %r12
cmpq $80, %r12
jne .LBB11_36
# %bb.37: # %_Z7cpu_sumPii.exit
movl %eax, 8(%rsp)
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $12, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 8(%rsp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.5, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
callq _ZNSt6chrono3_V212system_clock3nowEv
xorps %xmm0, %xmm0
cvtsi2ssl 8(%rsp), %xmm0
divss .LCPI11_1(%rip), %xmm0
movss %xmm0, 12(%rsp) # 4-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str.9, %esi
movl $13, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB11_81
# %bb.38: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i133
cmpb $0, 56(%r12)
je .LBB11_40
# %bb.39:
movzbl 67(%r12), %ecx
jmp .LBB11_41
.LBB11_40:
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB11_41: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit136
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $320, %edi # imm = 0x140
callq _Znam
xorl %ecx, %ecx
movss 12(%rsp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB11_42: # %.lr.ph.i86
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ssl (%rbx,%rcx,4), %xmm0
subss %xmm1, %xmm0
mulss %xmm0, %xmm0
movss %xmm0, (%rax,%rcx,4)
incq %rcx
cmpq $80, %rcx
jne .LBB11_42
# %bb.43: # %.lr.ph23.i.preheader
xorps %xmm0, %xmm0
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB11_44: # %.lr.ph23.i
# =>This Inner Loop Header: Depth=1
movss (%rax,%rcx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
addsd %xmm1, %xmm0
incq %rcx
cmpq $80, %rcx
jne .LBB11_44
# %bb.45: # %_Z6cpu_sdPiif.exit
divsd .LCPI11_0(%rip), %xmm0
xorps %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb .LBB11_47
# %bb.46:
sqrtsd %xmm0, %xmm0
jmp .LBB11_48
.LBB11_47: # %call.sqrt218
callq sqrt
.LBB11_48: # %_Z6cpu_sdPiif.exit.split
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 88(%rsp)
movl $_ZSt4cout, %edi
movl $.L.str.10, %esi
movl $24, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 88(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB11_81
# %bb.49: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i138
cmpb $0, 56(%r12)
je .LBB11_51
# %bb.50:
movzbl 67(%r12), %ecx
jmp .LBB11_52
.LBB11_51:
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB11_52: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit141
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $39, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB11_81
# %bb.53: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i143
cmpb $0, 56(%r12)
je .LBB11_55
# %bb.54:
movzbl 67(%r12), %eax
jmp .LBB11_56
.LBB11_55:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB11_56: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit146
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $0, 8(%rsp)
movq 32(%rsp), %rdi
movl $320, %edx # imm = 0x140
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq %r14, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB11_58
# %bb.57:
movq 32(%rsp), %rax
movq %rax, 48(%rsp)
movl $80, 92(%rsp)
leaq 48(%rsp), %rax
movq %rax, 96(%rsp)
leaq 92(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7minimumPii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB11_58:
movq 32(%rsp), %rsi
leaq 8(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str.11, %esi
movl $12, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 8(%rsp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB11_81
# %bb.59: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i148
cmpb $0, 56(%r12)
je .LBB11_61
# %bb.60:
movzbl 67(%r12), %ecx
jmp .LBB11_62
.LBB11_61:
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB11_62: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit151
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl (%rbx), %eax
movl $1, %ecx
.p2align 4, 0x90
.LBB11_63: # %.lr.ph.i96
# =>This Inner Loop Header: Depth=1
movl (%rbx,%rcx,4), %edx
cmpl %eax, %edx
cmovll %edx, %eax
incq %rcx
cmpq $80, %rcx
jne .LBB11_63
# %bb.64: # %_Z7cpu_minPii.exit
movl %eax, 8(%rsp)
movl $_ZSt4cout, %edi
movl $.L.str.12, %esi
movl $12, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 8(%rsp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.5, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $39, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB11_81
# %bb.65: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i153
cmpb $0, 56(%r12)
je .LBB11_67
# %bb.66:
movzbl 67(%r12), %eax
jmp .LBB11_68
.LBB11_67:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB11_68: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit156
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 32(%rsp), %rdi
movl $320, %edx # imm = 0x140
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq %r14, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB11_70
# %bb.69:
movq 32(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 16(%rsp)
leaq 96(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z7maximumPi, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB11_70:
movq 32(%rsp), %rsi
leaq 8(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str.13, %esi
movl $12, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 8(%rsp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB11_81
# %bb.71: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i158
cmpb $0, 56(%r14)
je .LBB11_73
# %bb.72:
movzbl 67(%r14), %ecx
jmp .LBB11_74
.LBB11_73:
movq %r14, %rdi
movq %rax, %r15
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r15, %rax
.LBB11_74: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit161
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl (%rbx), %eax
movl $1, %ecx
.p2align 4, 0x90
.LBB11_75: # %.lr.ph.i106
# =>This Inner Loop Header: Depth=1
movl (%rbx,%rcx,4), %edx
cmpl %eax, %edx
cmovgl %edx, %eax
incq %rcx
cmpq $80, %rcx
jne .LBB11_75
# %bb.76: # %_Z7cpu_maxPii.exit
movl %eax, 8(%rsp)
movl $_ZSt4cout, %edi
movl $.L.str.14, %esi
movl $12, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 8(%rsp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.5, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $39, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB11_81
# %bb.77: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i163
cmpb $0, 56(%rbx)
je .LBB11_79
# %bb.78:
movzbl 67(%rbx), %eax
jmp .LBB11_80
.LBB11_79:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB11_80: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit166
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $144, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB11_81:
.cfi_def_cfa_offset 192
callq _ZSt16__throw_bad_castv
.Lfunc_end11:
.size main, .Lfunc_end11-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB12_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB12_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7maximumPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7minimumPii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7gpu_sumPi, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12mean_diff_sqPff, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6gpu_sdPf, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end12:
.size __hip_module_ctor, .Lfunc_end12-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB13_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB13_2:
retq
.Lfunc_end13:
.size __hip_module_dtor, .Lfunc_end13-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7maximumPi,@object # @_Z7maximumPi
.section .rodata,"a",@progbits
.globl _Z7maximumPi
.p2align 3, 0x0
_Z7maximumPi:
.quad _Z22__device_stub__maximumPi
.size _Z7maximumPi, 8
.type _Z7minimumPii,@object # @_Z7minimumPii
.globl _Z7minimumPii
.p2align 3, 0x0
_Z7minimumPii:
.quad _Z22__device_stub__minimumPii
.size _Z7minimumPii, 8
.type _Z7gpu_sumPi,@object # @_Z7gpu_sumPi
.globl _Z7gpu_sumPi
.p2align 3, 0x0
_Z7gpu_sumPi:
.quad _Z22__device_stub__gpu_sumPi
.size _Z7gpu_sumPi, 8
.type _Z12mean_diff_sqPff,@object # @_Z12mean_diff_sqPff
.globl _Z12mean_diff_sqPff
.p2align 3, 0x0
_Z12mean_diff_sqPff:
.quad _Z27__device_stub__mean_diff_sqPff
.size _Z12mean_diff_sqPff, 8
.type _Z6gpu_sdPf,@object # @_Z6gpu_sdPf
.globl _Z6gpu_sdPf
.p2align 3, 0x0
_Z6gpu_sdPf:
.quad _Z21__device_stub__gpu_sdPf
.size _Z6gpu_sdPf, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Input Array: ["
.size .L.str, 15
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz ", "
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "]"
.size .L.str.2, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "======================================="
.size .L.str.3, 40
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "GPU Sum is: "
.size .L.str.4, 13
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "\n"
.size .L.str.5, 2
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "GPU Mean is: "
.size .L.str.6, 14
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "GPU Standard Deviation: "
.size .L.str.7, 25
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "CPU Sum is: "
.size .L.str.8, 13
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "CPU Mean is: "
.size .L.str.9, 14
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "CPU Standard Deviation: "
.size .L.str.10, 25
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "GPU Min is: "
.size .L.str.11, 13
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "CPU Min is: "
.size .L.str.12, 13
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "GPU Max is: "
.size .L.str.13, 13
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "CPU Max is: "
.size .L.str.14, 13
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7maximumPi"
.size .L__unnamed_1, 13
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z7minimumPii"
.size .L__unnamed_2, 14
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z7gpu_sumPi"
.size .L__unnamed_3, 13
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z12mean_diff_sqPff"
.size .L__unnamed_4, 20
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z6gpu_sdPf"
.size .L__unnamed_5, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__maximumPi
.addrsig_sym _Z22__device_stub__minimumPii
.addrsig_sym _Z22__device_stub__gpu_sumPi
.addrsig_sym _Z27__device_stub__mean_diff_sqPff
.addrsig_sym _Z21__device_stub__gpu_sdPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7maximumPi
.addrsig_sym _Z7minimumPii
.addrsig_sym _Z7gpu_sumPi
.addrsig_sym _Z12mean_diff_sqPff
.addrsig_sym _Z6gpu_sdPf
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // Hilos y Bloques
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define COLUMNAS 10 // Nro de columnas -> eje x
#define FILAS 6 // Nro de filas -> eje y
// Kernel Bidimensional (x, y)
__global__
void MathFinal(int *entrada, int *salida)
{
// indice de la columna: eje x
int columna = threadIdx.x;
// indice de la fila: eje y
int fila = threadIdx.y;
// Kernel de un solo bloque:
// indice lineal
int globalID = columna + fila * COLUMNAS;
// Indice Lineal Traspuesto
int idTrasp = fila + columna * FILAS;
// Escritura en la matriz final
salida[idTrasp] = entrada[globalID];
}
int main(int argc, char** argv) {
// declaraciones
int *hst_Entrada, *hst_Salida;
int *dev_Entrada, *dev_Salida;
// reserva en el host
hst_Entrada = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
hst_Salida = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
// reserva en el device
cudaMalloc((void**)&dev_Entrada, FILAS*COLUMNAS * sizeof(int));
cudaMalloc((void**)&dev_Salida, FILAS*COLUMNAS * sizeof(int));
// inicializacion
for (int i=0; i<(FILAS*COLUMNAS); i++) {
hst_Entrada[i] = i + 1; // numeros secuenciales desde el 1
hst_Salida[i] = 0;
}
// dimensiones del kernel
// 1 Bloque
dim3 Nbloques(1);
// bloque bidimensional (x,y)
// Eje x-> COLUMNAS
// Eje y-> FILAS
dim3 hilosB(COLUMNAS, FILAS);
// copia de datos hacia el device
cudaMemcpy(dev_Entrada, hst_Entrada, FILAS*COLUMNAS * sizeof(int), cudaMemcpyHostToDevice);
// Numero de hilos
printf("> KERNEL de 1 BLOQUE con %d HILOS:\n", COLUMNAS*FILAS);
printf(" eje x -> %2d hilos\n eje y -> %2d hilos\n", COLUMNAS, FILAS);
// Lanzamiento del Kernel
MathFinal <<< Nbloques, hilosB >>>(dev_Entrada, dev_Salida);
// recogida de datos desde el device
cudaMemcpy(hst_Salida, dev_Salida, FILAS*COLUMNAS * sizeof(int), cudaMemcpyDeviceToHost);
// impresion de resultados
printf("> MATRIZ ORIGINAL:\n");
for (int i = 0; i<FILAS; i++) {
for (int j = 0; j<COLUMNAS; j++) {
printf("%3d ", hst_Entrada[j + i*COLUMNAS]);
}
printf("\n");
}
printf("\n");
printf("> MATRIZ FINAL:\n");
for (int i = 0; i<COLUMNAS; i++) {
for (int j = 0; j<FILAS; j++) {
printf("%3d ", hst_Salida[j + i*FILAS]);
}
printf("\n");
}
// salida del programa
printf("\n<pulsa [INTRO] para finalizar>\n");
getchar();
return 0;
} | code for sm_80
Function : _Z9MathFinalPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e240000002200 */
/*0050*/ IMAD R2, R5, 0xa, R4 ; /* 0x0000000a05027824 */
/* 0x001fca00078e0204 */
/*0060*/ IMAD.WIDE R2, R2, R7, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0207 */
/*0070*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD R4, R4, 0x6, R5 ; /* 0x0000000604047824 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD.WIDE R4, R4, R7, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fca00078e0207 */
/*00a0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // Hilos y Bloques
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define COLUMNAS 10 // Nro de columnas -> eje x
#define FILAS 6 // Nro de filas -> eje y
// Kernel Bidimensional (x, y)
__global__
void MathFinal(int *entrada, int *salida)
{
// indice de la columna: eje x
int columna = threadIdx.x;
// indice de la fila: eje y
int fila = threadIdx.y;
// Kernel de un solo bloque:
// indice lineal
int globalID = columna + fila * COLUMNAS;
// Indice Lineal Traspuesto
int idTrasp = fila + columna * FILAS;
// Escritura en la matriz final
salida[idTrasp] = entrada[globalID];
}
int main(int argc, char** argv) {
// declaraciones
int *hst_Entrada, *hst_Salida;
int *dev_Entrada, *dev_Salida;
// reserva en el host
hst_Entrada = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
hst_Salida = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
// reserva en el device
cudaMalloc((void**)&dev_Entrada, FILAS*COLUMNAS * sizeof(int));
cudaMalloc((void**)&dev_Salida, FILAS*COLUMNAS * sizeof(int));
// inicializacion
for (int i=0; i<(FILAS*COLUMNAS); i++) {
hst_Entrada[i] = i + 1; // numeros secuenciales desde el 1
hst_Salida[i] = 0;
}
// dimensiones del kernel
// 1 Bloque
dim3 Nbloques(1);
// bloque bidimensional (x,y)
// Eje x-> COLUMNAS
// Eje y-> FILAS
dim3 hilosB(COLUMNAS, FILAS);
// copia de datos hacia el device
cudaMemcpy(dev_Entrada, hst_Entrada, FILAS*COLUMNAS * sizeof(int), cudaMemcpyHostToDevice);
// Numero de hilos
printf("> KERNEL de 1 BLOQUE con %d HILOS:\n", COLUMNAS*FILAS);
printf(" eje x -> %2d hilos\n eje y -> %2d hilos\n", COLUMNAS, FILAS);
// Lanzamiento del Kernel
MathFinal <<< Nbloques, hilosB >>>(dev_Entrada, dev_Salida);
// recogida de datos desde el device
cudaMemcpy(hst_Salida, dev_Salida, FILAS*COLUMNAS * sizeof(int), cudaMemcpyDeviceToHost);
// impresion de resultados
printf("> MATRIZ ORIGINAL:\n");
for (int i = 0; i<FILAS; i++) {
for (int j = 0; j<COLUMNAS; j++) {
printf("%3d ", hst_Entrada[j + i*COLUMNAS]);
}
printf("\n");
}
printf("\n");
printf("> MATRIZ FINAL:\n");
for (int i = 0; i<COLUMNAS; i++) {
for (int j = 0; j<FILAS; j++) {
printf("%3d ", hst_Salida[j + i*FILAS]);
}
printf("\n");
}
// salida del programa
printf("\n<pulsa [INTRO] para finalizar>\n");
getchar();
return 0;
} | .file "tmpxft_00191548_00000000-6_ejemplo4.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z9MathFinalPiS_PiS_
.type _Z30__device_stub__Z9MathFinalPiS_PiS_, @function
_Z30__device_stub__Z9MathFinalPiS_PiS_:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z9MathFinalPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z30__device_stub__Z9MathFinalPiS_PiS_, .-_Z30__device_stub__Z9MathFinalPiS_PiS_
.globl _Z9MathFinalPiS_
.type _Z9MathFinalPiS_, @function
_Z9MathFinalPiS_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z9MathFinalPiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9MathFinalPiS_, .-_Z9MathFinalPiS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "> KERNEL de 1 BLOQUE con %d HILOS:\n"
.align 8
.LC1:
.string " eje x -> %2d hilos\n eje y -> %2d hilos\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "> MATRIZ ORIGINAL:\n"
.LC3:
.string "%3d "
.LC4:
.string "\n"
.LC5:
.string "> MATRIZ FINAL:\n"
.section .rodata.str1.8
.align 8
.LC6:
.string "\n<pulsa [INTRO] para finalizar>\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $240, %edi
call malloc@PLT
movq %rax, %r13
movl $240, %edi
call malloc@PLT
movq %rax, %r12
movq %rsp, %rdi
movl $240, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $240, %esi
call cudaMalloc@PLT
movl $1, %eax
.L12:
movl %eax, -4(%r13,%rax,4)
movl $0, -4(%r12,%rax,4)
addq $1, %rax
cmpq $61, %rax
jne .L12
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $10, 28(%rsp)
movl $6, 32(%rsp)
movl $1, 36(%rsp)
movl $1, %ecx
movl $240, %edx
movq %r13, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $60, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $6, %ecx
movl $10, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 36(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movq 16(%rsp), %rdi
movl 24(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L13:
movl $2, %ecx
movl $240, %edx
movq 8(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 40(%r13), %rbp
addq $280, %r13
leaq .LC3(%rip), %r14
leaq .LC4(%rip), %r15
.L14:
leaq -40(%rbp), %rbx
.L15:
movl (%rbx), %edx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L15
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $40, %rbp
cmpq %r13, %rbp
jne .L14
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %rbp
addq $240, %r12
leaq .LC3(%rip), %r13
leaq .LC4(%rip), %r14
jmp .L17
.L25:
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z30__device_stub__Z9MathFinalPiS_PiS_
jmp .L13
.L26:
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $24, %rbp
cmpq %r12, %rbp
je .L19
.L17:
movl $0, %ebx
.L18:
movl 0(%rbp,%rbx,4), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $6, %rbx
jne .L18
jmp .L26
.L19:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq stdin(%rip), %rdi
call getc@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L27
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z9MathFinalPiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z9MathFinalPiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // Hilos y Bloques
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define COLUMNAS 10 // Nro de columnas -> eje x
#define FILAS 6 // Nro de filas -> eje y
// Kernel Bidimensional (x, y)
__global__
void MathFinal(int *entrada, int *salida)
{
// indice de la columna: eje x
int columna = threadIdx.x;
// indice de la fila: eje y
int fila = threadIdx.y;
// Kernel de un solo bloque:
// indice lineal
int globalID = columna + fila * COLUMNAS;
// Indice Lineal Traspuesto
int idTrasp = fila + columna * FILAS;
// Escritura en la matriz final
salida[idTrasp] = entrada[globalID];
}
int main(int argc, char** argv) {
// declaraciones
int *hst_Entrada, *hst_Salida;
int *dev_Entrada, *dev_Salida;
// reserva en el host
hst_Entrada = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
hst_Salida = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
// reserva en el device
cudaMalloc((void**)&dev_Entrada, FILAS*COLUMNAS * sizeof(int));
cudaMalloc((void**)&dev_Salida, FILAS*COLUMNAS * sizeof(int));
// inicializacion
for (int i=0; i<(FILAS*COLUMNAS); i++) {
hst_Entrada[i] = i + 1; // numeros secuenciales desde el 1
hst_Salida[i] = 0;
}
// dimensiones del kernel
// 1 Bloque
dim3 Nbloques(1);
// bloque bidimensional (x,y)
// Eje x-> COLUMNAS
// Eje y-> FILAS
dim3 hilosB(COLUMNAS, FILAS);
// copia de datos hacia el device
cudaMemcpy(dev_Entrada, hst_Entrada, FILAS*COLUMNAS * sizeof(int), cudaMemcpyHostToDevice);
// Numero de hilos
printf("> KERNEL de 1 BLOQUE con %d HILOS:\n", COLUMNAS*FILAS);
printf(" eje x -> %2d hilos\n eje y -> %2d hilos\n", COLUMNAS, FILAS);
// Lanzamiento del Kernel
MathFinal <<< Nbloques, hilosB >>>(dev_Entrada, dev_Salida);
// recogida de datos desde el device
cudaMemcpy(hst_Salida, dev_Salida, FILAS*COLUMNAS * sizeof(int), cudaMemcpyDeviceToHost);
// impresion de resultados
printf("> MATRIZ ORIGINAL:\n");
for (int i = 0; i<FILAS; i++) {
for (int j = 0; j<COLUMNAS; j++) {
printf("%3d ", hst_Entrada[j + i*COLUMNAS]);
}
printf("\n");
}
printf("\n");
printf("> MATRIZ FINAL:\n");
for (int i = 0; i<COLUMNAS; i++) {
for (int j = 0; j<FILAS; j++) {
printf("%3d ", hst_Salida[j + i*FILAS]);
}
printf("\n");
}
// salida del programa
printf("\n<pulsa [INTRO] para finalizar>\n");
getchar();
return 0;
} | // Hilos y Bloques
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define COLUMNAS 10 // Nro de columnas -> eje x
#define FILAS 6 // Nro de filas -> eje y
// Kernel Bidimensional (x, y)
__global__
void MathFinal(int *entrada, int *salida)
{
// indice de la columna: eje x
int columna = threadIdx.x;
// indice de la fila: eje y
int fila = threadIdx.y;
// Kernel de un solo bloque:
// indice lineal
int globalID = columna + fila * COLUMNAS;
// Indice Lineal Traspuesto
int idTrasp = fila + columna * FILAS;
// Escritura en la matriz final
salida[idTrasp] = entrada[globalID];
}
int main(int argc, char** argv) {
// declaraciones
int *hst_Entrada, *hst_Salida;
int *dev_Entrada, *dev_Salida;
// reserva en el host
hst_Entrada = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
hst_Salida = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
// reserva en el device
hipMalloc((void**)&dev_Entrada, FILAS*COLUMNAS * sizeof(int));
hipMalloc((void**)&dev_Salida, FILAS*COLUMNAS * sizeof(int));
// inicializacion
for (int i=0; i<(FILAS*COLUMNAS); i++) {
hst_Entrada[i] = i + 1; // numeros secuenciales desde el 1
hst_Salida[i] = 0;
}
// dimensiones del kernel
// 1 Bloque
dim3 Nbloques(1);
// bloque bidimensional (x,y)
// Eje x-> COLUMNAS
// Eje y-> FILAS
dim3 hilosB(COLUMNAS, FILAS);
// copia de datos hacia el device
hipMemcpy(dev_Entrada, hst_Entrada, FILAS*COLUMNAS * sizeof(int), hipMemcpyHostToDevice);
// Numero de hilos
printf("> KERNEL de 1 BLOQUE con %d HILOS:\n", COLUMNAS*FILAS);
printf(" eje x -> %2d hilos\n eje y -> %2d hilos\n", COLUMNAS, FILAS);
// Lanzamiento del Kernel
MathFinal <<< Nbloques, hilosB >>>(dev_Entrada, dev_Salida);
// recogida de datos desde el device
hipMemcpy(hst_Salida, dev_Salida, FILAS*COLUMNAS * sizeof(int), hipMemcpyDeviceToHost);
// impresion de resultados
printf("> MATRIZ ORIGINAL:\n");
for (int i = 0; i<FILAS; i++) {
for (int j = 0; j<COLUMNAS; j++) {
printf("%3d ", hst_Entrada[j + i*COLUMNAS]);
}
printf("\n");
}
printf("\n");
printf("> MATRIZ FINAL:\n");
for (int i = 0; i<COLUMNAS; i++) {
for (int j = 0; j<FILAS; j++) {
printf("%3d ", hst_Salida[j + i*FILAS]);
}
printf("\n");
}
// salida del programa
printf("\n<pulsa [INTRO] para finalizar>\n");
getchar();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // Hilos y Bloques
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define COLUMNAS 10 // Nro de columnas -> eje x
#define FILAS 6 // Nro de filas -> eje y
// Kernel Bidimensional (x, y)
__global__
void MathFinal(int *entrada, int *salida)
{
// indice de la columna: eje x
int columna = threadIdx.x;
// indice de la fila: eje y
int fila = threadIdx.y;
// Kernel de un solo bloque:
// indice lineal
int globalID = columna + fila * COLUMNAS;
// Indice Lineal Traspuesto
int idTrasp = fila + columna * FILAS;
// Escritura en la matriz final
salida[idTrasp] = entrada[globalID];
}
int main(int argc, char** argv) {
// declaraciones
int *hst_Entrada, *hst_Salida;
int *dev_Entrada, *dev_Salida;
// reserva en el host
hst_Entrada = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
hst_Salida = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
// reserva en el device
hipMalloc((void**)&dev_Entrada, FILAS*COLUMNAS * sizeof(int));
hipMalloc((void**)&dev_Salida, FILAS*COLUMNAS * sizeof(int));
// inicializacion
for (int i=0; i<(FILAS*COLUMNAS); i++) {
hst_Entrada[i] = i + 1; // numeros secuenciales desde el 1
hst_Salida[i] = 0;
}
// dimensiones del kernel
// 1 Bloque
dim3 Nbloques(1);
// bloque bidimensional (x,y)
// Eje x-> COLUMNAS
// Eje y-> FILAS
dim3 hilosB(COLUMNAS, FILAS);
// copia de datos hacia el device
hipMemcpy(dev_Entrada, hst_Entrada, FILAS*COLUMNAS * sizeof(int), hipMemcpyHostToDevice);
// Numero de hilos
printf("> KERNEL de 1 BLOQUE con %d HILOS:\n", COLUMNAS*FILAS);
printf(" eje x -> %2d hilos\n eje y -> %2d hilos\n", COLUMNAS, FILAS);
// Lanzamiento del Kernel
MathFinal <<< Nbloques, hilosB >>>(dev_Entrada, dev_Salida);
// recogida de datos desde el device
hipMemcpy(hst_Salida, dev_Salida, FILAS*COLUMNAS * sizeof(int), hipMemcpyDeviceToHost);
// impresion de resultados
printf("> MATRIZ ORIGINAL:\n");
for (int i = 0; i<FILAS; i++) {
for (int j = 0; j<COLUMNAS; j++) {
printf("%3d ", hst_Entrada[j + i*COLUMNAS]);
}
printf("\n");
}
printf("\n");
printf("> MATRIZ FINAL:\n");
for (int i = 0; i<COLUMNAS; i++) {
for (int j = 0; j<FILAS; j++) {
printf("%3d ", hst_Salida[j + i*FILAS]);
}
printf("\n");
}
// salida del programa
printf("\n<pulsa [INTRO] para finalizar>\n");
getchar();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9MathFinalPiS_
.globl _Z9MathFinalPiS_
.p2align 8
.type _Z9MathFinalPiS_,@function
_Z9MathFinalPiS_:
s_load_b128 s[0:3], s[0:1], 0x0
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_u32_u24_e32 v2, 10, v1
v_add_lshl_u32 v2, v2, v0, 2
v_mul_u32_u24_e32 v0, 6, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_lshl_u32 v0, v0, v1, 2
s_waitcnt lgkmcnt(0)
global_load_b32 v2, v2, s[0:1]
s_waitcnt vmcnt(0)
global_store_b32 v0, v2, s[2:3]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9MathFinalPiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9MathFinalPiS_, .Lfunc_end0-_Z9MathFinalPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9MathFinalPiS_
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z9MathFinalPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // Hilos y Bloques
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define COLUMNAS 10 // Nro de columnas -> eje x
#define FILAS 6 // Nro de filas -> eje y
// Kernel Bidimensional (x, y)
__global__
void MathFinal(int *entrada, int *salida)
{
// indice de la columna: eje x
int columna = threadIdx.x;
// indice de la fila: eje y
int fila = threadIdx.y;
// Kernel de un solo bloque:
// indice lineal
int globalID = columna + fila * COLUMNAS;
// Indice Lineal Traspuesto
int idTrasp = fila + columna * FILAS;
// Escritura en la matriz final
salida[idTrasp] = entrada[globalID];
}
int main(int argc, char** argv) {
// declaraciones
int *hst_Entrada, *hst_Salida;
int *dev_Entrada, *dev_Salida;
// reserva en el host
hst_Entrada = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
hst_Salida = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
// reserva en el device
hipMalloc((void**)&dev_Entrada, FILAS*COLUMNAS * sizeof(int));
hipMalloc((void**)&dev_Salida, FILAS*COLUMNAS * sizeof(int));
// inicializacion
for (int i=0; i<(FILAS*COLUMNAS); i++) {
hst_Entrada[i] = i + 1; // numeros secuenciales desde el 1
hst_Salida[i] = 0;
}
// dimensiones del kernel
// 1 Bloque
dim3 Nbloques(1);
// bloque bidimensional (x,y)
// Eje x-> COLUMNAS
// Eje y-> FILAS
dim3 hilosB(COLUMNAS, FILAS);
// copia de datos hacia el device
hipMemcpy(dev_Entrada, hst_Entrada, FILAS*COLUMNAS * sizeof(int), hipMemcpyHostToDevice);
// Numero de hilos
printf("> KERNEL de 1 BLOQUE con %d HILOS:\n", COLUMNAS*FILAS);
printf(" eje x -> %2d hilos\n eje y -> %2d hilos\n", COLUMNAS, FILAS);
// Lanzamiento del Kernel
MathFinal <<< Nbloques, hilosB >>>(dev_Entrada, dev_Salida);
// recogida de datos desde el device
hipMemcpy(hst_Salida, dev_Salida, FILAS*COLUMNAS * sizeof(int), hipMemcpyDeviceToHost);
// impresion de resultados
printf("> MATRIZ ORIGINAL:\n");
for (int i = 0; i<FILAS; i++) {
for (int j = 0; j<COLUMNAS; j++) {
printf("%3d ", hst_Entrada[j + i*COLUMNAS]);
}
printf("\n");
}
printf("\n");
printf("> MATRIZ FINAL:\n");
for (int i = 0; i<COLUMNAS; i++) {
for (int j = 0; j<FILAS; j++) {
printf("%3d ", hst_Salida[j + i*FILAS]);
}
printf("\n");
}
// salida del programa
printf("\n<pulsa [INTRO] para finalizar>\n");
getchar();
return 0;
} | .text
.file "ejemplo4.hip"
.globl _Z24__device_stub__MathFinalPiS_ # -- Begin function _Z24__device_stub__MathFinalPiS_
.p2align 4, 0x90
.type _Z24__device_stub__MathFinalPiS_,@function
_Z24__device_stub__MathFinalPiS_: # @_Z24__device_stub__MathFinalPiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z9MathFinalPiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z24__device_stub__MathFinalPiS_, .Lfunc_end0-_Z24__device_stub__MathFinalPiS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $104, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $240, %edi
callq malloc
movq %rax, %r14
movl $240, %edi
callq malloc
movq %rax, %rbx
leaq 8(%rsp), %rdi
movl $240, %esi
callq hipMalloc
movq %rsp, %rdi
movl $240, %esi
callq hipMalloc
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 16(%rbx)
movups %xmm0, 32(%rbx)
movups %xmm0, 48(%rbx)
movups %xmm0, 64(%rbx)
movups %xmm0, 80(%rbx)
movups %xmm0, 96(%rbx)
movups %xmm0, 112(%rbx)
movups %xmm0, 128(%rbx)
movups %xmm0, 144(%rbx)
movups %xmm0, 160(%rbx)
movups %xmm0, 176(%rbx)
movups %xmm0, 192(%rbx)
movups %xmm0, 208(%rbx)
movups %xmm0, 224(%rbx)
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
leaq 1(%rax), %rcx
movl %ecx, (%r14,%rax,4)
movq %rcx, %rax
cmpq $60, %rcx
jne .LBB1_1
# %bb.2:
movq 8(%rsp), %rdi
movl $240, %edx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl $.L.str, %edi
movl $60, %esi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $10, %esi
movl $6, %edx
xorl %eax, %eax
callq printf
movabsq $4294967297, %rdi # imm = 0x100000001
movabsq $25769803786, %rdx # imm = 0x60000000A
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 8(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9MathFinalPiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
movl $240, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_5: # %.preheader39
# =>This Loop Header: Depth=1
# Child Loop BB1_6 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_6: # Parent Loop BB1_5 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r14,%r12,4), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
incq %r12
cmpq $10, %r12
jne .LBB1_6
# %bb.7: # in Loop: Header=BB1_5 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r15
addq $40, %r14
cmpq $6, %r15
jne .LBB1_5
# %bb.8:
movl $10, %edi
callq putchar@PLT
movl $.Lstr.1, %edi
callq puts@PLT
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_9: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_10 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_10: # Parent Loop BB1_9 Depth=1
# => This Inner Loop Header: Depth=2
movl (%rbx,%r15,4), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
incq %r15
cmpq $6, %r15
jne .LBB1_10
# %bb.11: # in Loop: Header=BB1_9 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $24, %rbx
cmpq $10, %r14
jne .LBB1_9
# %bb.12:
movl $.Lstr.2, %edi
callq puts@PLT
movq stdin(%rip), %rdi
callq getc
xorl %eax, %eax
addq $104, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9MathFinalPiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9MathFinalPiS_,@object # @_Z9MathFinalPiS_
.section .rodata,"a",@progbits
.globl _Z9MathFinalPiS_
.p2align 3, 0x0
_Z9MathFinalPiS_:
.quad _Z24__device_stub__MathFinalPiS_
.size _Z9MathFinalPiS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "> KERNEL de 1 BLOQUE con %d HILOS:\n"
.size .L.str, 36
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " eje x -> %2d hilos\n eje y -> %2d hilos\n"
.size .L.str.1, 43
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%3d "
.size .L.str.3, 5
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9MathFinalPiS_"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "> MATRIZ ORIGINAL:"
.size .Lstr, 19
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "> MATRIZ FINAL:"
.size .Lstr.1, 16
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "\n<pulsa [INTRO] para finalizar>"
.size .Lstr.2, 32
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__MathFinalPiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9MathFinalPiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9MathFinalPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e240000002200 */
/*0050*/ IMAD R2, R5, 0xa, R4 ; /* 0x0000000a05027824 */
/* 0x001fca00078e0204 */
/*0060*/ IMAD.WIDE R2, R2, R7, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0207 */
/*0070*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD R4, R4, 0x6, R5 ; /* 0x0000000604047824 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD.WIDE R4, R4, R7, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fca00078e0207 */
/*00a0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9MathFinalPiS_
.globl _Z9MathFinalPiS_
.p2align 8
.type _Z9MathFinalPiS_,@function
_Z9MathFinalPiS_:
s_load_b128 s[0:3], s[0:1], 0x0
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_u32_u24_e32 v2, 10, v1
v_add_lshl_u32 v2, v2, v0, 2
v_mul_u32_u24_e32 v0, 6, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_lshl_u32 v0, v0, v1, 2
s_waitcnt lgkmcnt(0)
global_load_b32 v2, v2, s[0:1]
s_waitcnt vmcnt(0)
global_store_b32 v0, v2, s[2:3]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9MathFinalPiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9MathFinalPiS_, .Lfunc_end0-_Z9MathFinalPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9MathFinalPiS_
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z9MathFinalPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00191548_00000000-6_ejemplo4.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z9MathFinalPiS_PiS_
.type _Z30__device_stub__Z9MathFinalPiS_PiS_, @function
_Z30__device_stub__Z9MathFinalPiS_PiS_:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z9MathFinalPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z30__device_stub__Z9MathFinalPiS_PiS_, .-_Z30__device_stub__Z9MathFinalPiS_PiS_
.globl _Z9MathFinalPiS_
.type _Z9MathFinalPiS_, @function
_Z9MathFinalPiS_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z9MathFinalPiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9MathFinalPiS_, .-_Z9MathFinalPiS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "> KERNEL de 1 BLOQUE con %d HILOS:\n"
.align 8
.LC1:
.string " eje x -> %2d hilos\n eje y -> %2d hilos\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "> MATRIZ ORIGINAL:\n"
.LC3:
.string "%3d "
.LC4:
.string "\n"
.LC5:
.string "> MATRIZ FINAL:\n"
.section .rodata.str1.8
.align 8
.LC6:
.string "\n<pulsa [INTRO] para finalizar>\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $240, %edi
call malloc@PLT
movq %rax, %r13
movl $240, %edi
call malloc@PLT
movq %rax, %r12
movq %rsp, %rdi
movl $240, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $240, %esi
call cudaMalloc@PLT
movl $1, %eax
.L12:
movl %eax, -4(%r13,%rax,4)
movl $0, -4(%r12,%rax,4)
addq $1, %rax
cmpq $61, %rax
jne .L12
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $10, 28(%rsp)
movl $6, 32(%rsp)
movl $1, 36(%rsp)
movl $1, %ecx
movl $240, %edx
movq %r13, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $60, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $6, %ecx
movl $10, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 36(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movq 16(%rsp), %rdi
movl 24(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L13:
movl $2, %ecx
movl $240, %edx
movq 8(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 40(%r13), %rbp
addq $280, %r13
leaq .LC3(%rip), %r14
leaq .LC4(%rip), %r15
.L14:
leaq -40(%rbp), %rbx
.L15:
movl (%rbx), %edx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L15
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $40, %rbp
cmpq %r13, %rbp
jne .L14
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %rbp
addq $240, %r12
leaq .LC3(%rip), %r13
leaq .LC4(%rip), %r14
jmp .L17
.L25:
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z30__device_stub__Z9MathFinalPiS_PiS_
jmp .L13
.L26:
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $24, %rbp
cmpq %r12, %rbp
je .L19
.L17:
movl $0, %ebx
.L18:
movl 0(%rbp,%rbx,4), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $6, %rbx
jne .L18
jmp .L26
.L19:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq stdin(%rip), %rdi
call getc@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L27
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z9MathFinalPiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z9MathFinalPiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "ejemplo4.hip"
.globl _Z24__device_stub__MathFinalPiS_ # -- Begin function _Z24__device_stub__MathFinalPiS_
.p2align 4, 0x90
.type _Z24__device_stub__MathFinalPiS_,@function
_Z24__device_stub__MathFinalPiS_: # @_Z24__device_stub__MathFinalPiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z9MathFinalPiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z24__device_stub__MathFinalPiS_, .Lfunc_end0-_Z24__device_stub__MathFinalPiS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $104, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $240, %edi
callq malloc
movq %rax, %r14
movl $240, %edi
callq malloc
movq %rax, %rbx
leaq 8(%rsp), %rdi
movl $240, %esi
callq hipMalloc
movq %rsp, %rdi
movl $240, %esi
callq hipMalloc
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 16(%rbx)
movups %xmm0, 32(%rbx)
movups %xmm0, 48(%rbx)
movups %xmm0, 64(%rbx)
movups %xmm0, 80(%rbx)
movups %xmm0, 96(%rbx)
movups %xmm0, 112(%rbx)
movups %xmm0, 128(%rbx)
movups %xmm0, 144(%rbx)
movups %xmm0, 160(%rbx)
movups %xmm0, 176(%rbx)
movups %xmm0, 192(%rbx)
movups %xmm0, 208(%rbx)
movups %xmm0, 224(%rbx)
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
leaq 1(%rax), %rcx
movl %ecx, (%r14,%rax,4)
movq %rcx, %rax
cmpq $60, %rcx
jne .LBB1_1
# %bb.2:
movq 8(%rsp), %rdi
movl $240, %edx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl $.L.str, %edi
movl $60, %esi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $10, %esi
movl $6, %edx
xorl %eax, %eax
callq printf
movabsq $4294967297, %rdi # imm = 0x100000001
movabsq $25769803786, %rdx # imm = 0x60000000A
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 8(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9MathFinalPiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
movl $240, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_5: # %.preheader39
# =>This Loop Header: Depth=1
# Child Loop BB1_6 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_6: # Parent Loop BB1_5 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r14,%r12,4), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
incq %r12
cmpq $10, %r12
jne .LBB1_6
# %bb.7: # in Loop: Header=BB1_5 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r15
addq $40, %r14
cmpq $6, %r15
jne .LBB1_5
# %bb.8:
movl $10, %edi
callq putchar@PLT
movl $.Lstr.1, %edi
callq puts@PLT
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_9: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_10 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_10: # Parent Loop BB1_9 Depth=1
# => This Inner Loop Header: Depth=2
movl (%rbx,%r15,4), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
incq %r15
cmpq $6, %r15
jne .LBB1_10
# %bb.11: # in Loop: Header=BB1_9 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $24, %rbx
cmpq $10, %r14
jne .LBB1_9
# %bb.12:
movl $.Lstr.2, %edi
callq puts@PLT
movq stdin(%rip), %rdi
callq getc
xorl %eax, %eax
addq $104, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9MathFinalPiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9MathFinalPiS_,@object # @_Z9MathFinalPiS_
.section .rodata,"a",@progbits
.globl _Z9MathFinalPiS_
.p2align 3, 0x0
_Z9MathFinalPiS_:
.quad _Z24__device_stub__MathFinalPiS_
.size _Z9MathFinalPiS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "> KERNEL de 1 BLOQUE con %d HILOS:\n"
.size .L.str, 36
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " eje x -> %2d hilos\n eje y -> %2d hilos\n"
.size .L.str.1, 43
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%3d "
.size .L.str.3, 5
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9MathFinalPiS_"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "> MATRIZ ORIGINAL:"
.size .Lstr, 19
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "> MATRIZ FINAL:"
.size .Lstr.1, 16
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "\n<pulsa [INTRO] para finalizar>"
.size .Lstr.2, 32
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__MathFinalPiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9MathFinalPiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //pass
//--blockDim=32 --gridDim=2
#include <cuda.h>
__global__ void test_Prog(int *A, int N) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
for (int d = N/2; d > 0; d = d / 2) {
if (idx < d) {
A[idx] += A[idx + d];
}
}
} | code for sm_80
Function : _Z9test_ProgPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff027624 */
/* 0x000fc600078e00ff */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e640000002100 */
/*0040*/ ISETP.GE.AND P0, PT, R2, 0x2, PT ; /* 0x000000020200780c */
/* 0x000fda0003f06270 */
/*0050*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x003fe200078e0203 */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0090*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */
/* 0x000fce00078e00ff */
/*00a0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0205 */
/*00b0*/ LEA.HI R5, R4.reuse, R4, RZ, 0x1 ; /* 0x0000000404057211 */
/* 0x040fe200078f08ff */
/*00c0*/ BSSY B0, 0x170 ; /* 0x000000a000007945 */
/* 0x000fe20003800000 */
/*00d0*/ ISETP.GT.AND P1, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe40003f24270 */
/*00e0*/ SHF.R.S32.HI R9, RZ, 0x1, R5 ; /* 0x00000001ff097819 */
/* 0x000fc80000011405 */
/*00f0*/ ISETP.GE.AND P0, PT, R0, R9, PT ; /* 0x000000090000720c */
/* 0x000fda0003f06270 */
/*0100*/ @P0 BRA 0x160 ; /* 0x0000005000000947 */
/* 0x001fea0003800000 */
/*0110*/ IMAD.WIDE R4, R9, 0x4, R2 ; /* 0x0000000409047825 */
/* 0x000fe200078e0202 */
/*0120*/ LDG.E R6, [R2.64] ; /* 0x0000000402067981 */
/* 0x000eaa000c1e1900 */
/*0130*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea4000c1e1900 */
/*0140*/ IADD3 R7, R6, R5, RZ ; /* 0x0000000506077210 */
/* 0x004fca0007ffe0ff */
/*0150*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e4000c101904 */
/*0160*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0170*/ IMAD.MOV.U32 R4, RZ, RZ, R9 ; /* 0x000000ffff047224 */
/* 0x000fe200078e0009 */
/*0180*/ @P1 BRA 0xb0 ; /* 0xffffff2000001947 */
/* 0x000fea000383ffff */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //pass
//--blockDim=32 --gridDim=2
#include <cuda.h>
__global__ void test_Prog(int *A, int N) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
for (int d = N/2; d > 0; d = d / 2) {
if (idx < d) {
A[idx] += A[idx + d];
}
}
} | .file "tmpxft_000ce322_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z9test_ProgPiiPii
.type _Z29__device_stub__Z9test_ProgPiiPii, @function
_Z29__device_stub__Z9test_ProgPiiPii:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z9test_ProgPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z29__device_stub__Z9test_ProgPiiPii, .-_Z29__device_stub__Z9test_ProgPiiPii
.globl _Z9test_ProgPii
.type _Z9test_ProgPii, @function
_Z9test_ProgPii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z9test_ProgPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9test_ProgPii, .-_Z9test_ProgPii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9test_ProgPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9test_ProgPii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //pass
//--blockDim=32 --gridDim=2
#include <cuda.h>
__global__ void test_Prog(int *A, int N) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
for (int d = N/2; d > 0; d = d / 2) {
if (idx < d) {
A[idx] += A[idx + d];
}
}
} | //pass
//--blockDim=32 --gridDim=2
#include <hip/hip_runtime.h>
__global__ void test_Prog(int *A, int N) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
for (int d = N/2; d > 0; d = d / 2) {
if (idx < d) {
A[idx] += A[idx + d];
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //pass
//--blockDim=32 --gridDim=2
#include <hip/hip_runtime.h>
__global__ void test_Prog(int *A, int N) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
for (int d = N/2; d > 0; d = d / 2) {
if (idx < d) {
A[idx] += A[idx + d];
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9test_ProgPii
.globl _Z9test_ProgPii
.p2align 8
.type _Z9test_ProgPii,@function
_Z9test_ProgPii:
s_load_b32 s2, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 2
s_cbranch_scc1 .LBB0_5
s_clause 0x1
s_load_b32 s3, s[0:1], 0x1c
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s4
s_cmp_lt_u32 s2, 4
s_mov_b32 s2, s3
s_cbranch_scc1 .LBB0_5
.LBB0_3:
s_lshr_b32 s3, s2, 1
s_mov_b32 s4, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v4, s3, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
s_clause 0x1
global_load_b32 v0, v[4:5], off
global_load_b32 v4, v[2:3], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v0, v4, v0
global_store_b32 v[2:3], v0, off
s_branch .LBB0_2
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9test_ProgPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9test_ProgPii, .Lfunc_end0-_Z9test_ProgPii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9test_ProgPii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9test_ProgPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //pass
//--blockDim=32 --gridDim=2
#include <hip/hip_runtime.h>
__global__ void test_Prog(int *A, int N) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
for (int d = N/2; d > 0; d = d / 2) {
if (idx < d) {
A[idx] += A[idx + d];
}
}
} | .text
.file "kernel.hip"
.globl _Z24__device_stub__test_ProgPii # -- Begin function _Z24__device_stub__test_ProgPii
.p2align 4, 0x90
.type _Z24__device_stub__test_ProgPii,@function
_Z24__device_stub__test_ProgPii: # @_Z24__device_stub__test_ProgPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z9test_ProgPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z24__device_stub__test_ProgPii, .Lfunc_end0-_Z24__device_stub__test_ProgPii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9test_ProgPii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9test_ProgPii,@object # @_Z9test_ProgPii
.section .rodata,"a",@progbits
.globl _Z9test_ProgPii
.p2align 3, 0x0
_Z9test_ProgPii:
.quad _Z24__device_stub__test_ProgPii
.size _Z9test_ProgPii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9test_ProgPii"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__test_ProgPii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9test_ProgPii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9test_ProgPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff027624 */
/* 0x000fc600078e00ff */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e640000002100 */
/*0040*/ ISETP.GE.AND P0, PT, R2, 0x2, PT ; /* 0x000000020200780c */
/* 0x000fda0003f06270 */
/*0050*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x003fe200078e0203 */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0090*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */
/* 0x000fce00078e00ff */
/*00a0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0205 */
/*00b0*/ LEA.HI R5, R4.reuse, R4, RZ, 0x1 ; /* 0x0000000404057211 */
/* 0x040fe200078f08ff */
/*00c0*/ BSSY B0, 0x170 ; /* 0x000000a000007945 */
/* 0x000fe20003800000 */
/*00d0*/ ISETP.GT.AND P1, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe40003f24270 */
/*00e0*/ SHF.R.S32.HI R9, RZ, 0x1, R5 ; /* 0x00000001ff097819 */
/* 0x000fc80000011405 */
/*00f0*/ ISETP.GE.AND P0, PT, R0, R9, PT ; /* 0x000000090000720c */
/* 0x000fda0003f06270 */
/*0100*/ @P0 BRA 0x160 ; /* 0x0000005000000947 */
/* 0x001fea0003800000 */
/*0110*/ IMAD.WIDE R4, R9, 0x4, R2 ; /* 0x0000000409047825 */
/* 0x000fe200078e0202 */
/*0120*/ LDG.E R6, [R2.64] ; /* 0x0000000402067981 */
/* 0x000eaa000c1e1900 */
/*0130*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea4000c1e1900 */
/*0140*/ IADD3 R7, R6, R5, RZ ; /* 0x0000000506077210 */
/* 0x004fca0007ffe0ff */
/*0150*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e4000c101904 */
/*0160*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0170*/ IMAD.MOV.U32 R4, RZ, RZ, R9 ; /* 0x000000ffff047224 */
/* 0x000fe200078e0009 */
/*0180*/ @P1 BRA 0xb0 ; /* 0xffffff2000001947 */
/* 0x000fea000383ffff */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9test_ProgPii
.globl _Z9test_ProgPii
.p2align 8
.type _Z9test_ProgPii,@function
_Z9test_ProgPii:
s_load_b32 s2, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 2
s_cbranch_scc1 .LBB0_5
s_clause 0x1
s_load_b32 s3, s[0:1], 0x1c
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s4
s_cmp_lt_u32 s2, 4
s_mov_b32 s2, s3
s_cbranch_scc1 .LBB0_5
.LBB0_3:
s_lshr_b32 s3, s2, 1
s_mov_b32 s4, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v4, s3, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
s_clause 0x1
global_load_b32 v0, v[4:5], off
global_load_b32 v4, v[2:3], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v0, v4, v0
global_store_b32 v[2:3], v0, off
s_branch .LBB0_2
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9test_ProgPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9test_ProgPii, .Lfunc_end0-_Z9test_ProgPii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9test_ProgPii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9test_ProgPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000ce322_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z9test_ProgPiiPii
.type _Z29__device_stub__Z9test_ProgPiiPii, @function
_Z29__device_stub__Z9test_ProgPiiPii:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z9test_ProgPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z29__device_stub__Z9test_ProgPiiPii, .-_Z29__device_stub__Z9test_ProgPiiPii
.globl _Z9test_ProgPii
.type _Z9test_ProgPii, @function
_Z9test_ProgPii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z9test_ProgPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9test_ProgPii, .-_Z9test_ProgPii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9test_ProgPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9test_ProgPii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.globl _Z24__device_stub__test_ProgPii # -- Begin function _Z24__device_stub__test_ProgPii
.p2align 4, 0x90
.type _Z24__device_stub__test_ProgPii,@function
_Z24__device_stub__test_ProgPii: # @_Z24__device_stub__test_ProgPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z9test_ProgPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z24__device_stub__test_ProgPii, .Lfunc_end0-_Z24__device_stub__test_ProgPii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9test_ProgPii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9test_ProgPii,@object # @_Z9test_ProgPii
.section .rodata,"a",@progbits
.globl _Z9test_ProgPii
.p2align 3, 0x0
_Z9test_ProgPii:
.quad _Z24__device_stub__test_ProgPii
.size _Z9test_ProgPii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9test_ProgPii"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__test_ProgPii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9test_ProgPii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda_runtime.h>
#include <stdio.h>
int main(int argc, char** argv) {
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda_runtime.h>
#include <stdio.h>
int main(int argc, char** argv) {
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
} | .file "tmpxft_001824f7_00000000-6_checkDeviceInfo.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%s Starting...\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq (%rsi), %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $0, 4(%rsp)
leaq 4(%rsp), %rdi
call cudaGetDeviceCount@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L6
movl $0, %eax
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda_runtime.h>
#include <stdio.h>
int main(int argc, char** argv) {
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
} | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char** argv) {
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char** argv) {
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char** argv) {
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
} | .text
.file "checkDeviceInfo.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movq (%rsi), %rsi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $0, 4(%rsp)
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
xorl %eax, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%s Starting...\n"
.size .L.str, 16
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001824f7_00000000-6_checkDeviceInfo.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%s Starting...\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq (%rsi), %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $0, 4(%rsp)
leaq 4(%rsp), %rdi
call cudaGetDeviceCount@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L6
movl $0, %eax
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "checkDeviceInfo.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movq (%rsi), %rsi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $0, 4(%rsp)
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
xorl %eax, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%s Starting...\n"
.size .L.str, 16
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // Include packages and also CUDA packages
#include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include <cuda.h>
#include <cuda_runtime.h>
// Result from last compute of world.
extern unsigned char *g_resultData;
// Current state of world.
extern unsigned char *g_data;
// ----- SAVE RECEIVING ROWS FROM OTHER GPUS ----- //
// "Above" row
extern unsigned char *g_aboveRow;
// "Below" row
extern unsigned char *g_belowRow;
// "Above" row
extern unsigned char *g_resultAboveRow;
// "Below" row
extern unsigned char *g_resultBelowRow;
// ----- DECLARE KERNEL ----- //
__global__ void HL_kernel(unsigned int worldWidth, unsigned int worldHeight);
// Define number of Processors
int cudaDeviceCount;
cudaError_t cE;
static inline void HL_initAllZeros(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
}
static inline void HL_initAllOnes(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
int i;
// set all rows of world to true
for( i = 0; i < total_world_size; i++)
{
g_data[i] = 1;
// Set above and below rows
if (i < worldWidth){
g_aboveRow[i] = 1;
g_belowRow[i] = 1;
}
}
}
static inline void HL_initOnesInMiddle(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
int i;
for(i = worldWidth * (worldHeight - 1) + 128; i < worldWidth * (worldHeight - 1) + 139; i++){
g_data[i] = 1;
}
}
static inline void HL_initOnesAtCorners(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
if(myrank == 0){
g_data[0] = 1; // upper left
g_data[worldWidth-1]=1; // upper right
g_aboveRow[0] = 1; // upper left
g_aboveRow[worldWidth-1]=1; // upper right
}
if(myrank == cudaDeviceCount - 1){
g_data[(worldHeight * (worldWidth-1))]=1; // lower left
g_data[(worldHeight * (worldWidth-1)) + worldWidth-1]=1; // lower right
g_belowRow[0] = 1;
g_belowRow[worldWidth - 1] = 1;
}
}
static inline void HL_initSpinnerAtCorner(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
if( myrank == 0 ){
g_data[0] = 1; // upper left
g_data[1] = 1; // upper left +1
g_data[worldWidth-1]=1; // upper right
g_aboveRow[0] = 1; // upper left
g_aboveRow[1] = 1; // upper left +1
g_aboveRow[worldWidth-1]=1; // upper right
}
}
static inline void HL_initReplicator(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
size_t x, y;
x = worldWidth/2;
y = worldHeight/2;
g_data[x + y*worldWidth + 1] = 1;
g_data[x + y*worldWidth + 2] = 1;
g_data[x + y*worldWidth + 3] = 1;
g_data[x + (y+1)*worldWidth] = 1;
g_data[x + (y+2)*worldWidth] = 1;
g_data[x + (y+3)*worldWidth] = 1;
}
// ---------- EXPORT TO APPROPRIATE COMPILER ---------- //
extern "C" void HL_initMaster( unsigned int pattern, size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
// INITIALIZE THE CUDA WORLD
if( (cE = cudaGetDeviceCount( &cudaDeviceCount)) != cudaSuccess )
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount );
exit(-1);
}
if( (cE = cudaSetDevice( myrank % cudaDeviceCount )) != cudaSuccess )
{
printf(" Unable to have myrank %d set to cuda device %d, error is %d \n", myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
// INITIALIZE THE PATTERN
switch(pattern)
{
case 0:
HL_initAllZeros( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 1:
HL_initAllOnes( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 2:
HL_initOnesInMiddle( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 3:
HL_initOnesAtCorners( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 4:
HL_initSpinnerAtCorner( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 5:
HL_initReplicator( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
default:
printf("Pattern %u has not been implemented \n", pattern);
exit(-1);
}
}
// MAIN KERNEL FUNCTION THAT DOES ALL OF THE WORK
__global__ void HL_kernel( unsigned char* d_data, unsigned char* d_resultData, unsigned char* d_aboveRow, unsigned char* d_belowRow, unsigned int worldWidth, unsigned int worldHeight){
// Store index value
size_t index;
// Loop over the threads
for(index = blockIdx.x * blockDim.x + threadIdx.x; index < worldWidth*worldHeight; index += blockDim.x * gridDim.x){
// Allocate space
int y0 = ((index + worldHeight - 1) % worldHeight) * worldWidth;
int y1 = index * worldWidth;
int y2 = ((index + 1) % worldHeight) * worldWidth;
// Get the current block and thread
int x;
// Loop over corresponding COLUMNS
for (x = 0; x < worldWidth; ++x){
// Set current column, left column, and right column
int x1 = x;
int x0 = (x1 + worldWidth - 1) % worldWidth;
int x2 = (x1 + 1) % worldWidth;
// Get the status of the current cell to determine logic of life span
int is_alive = d_data[x1+y1];
// Count the number of alive neighbors
int num_alive = 0;
// Check above and below row cases
if (x1+y1 < worldWidth) {
num_alive = d_aboveRow[x0] + d_aboveRow[x1] + d_aboveRow[x2] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2];
}
else if (x1+y1 > worldWidth*worldHeight - worldWidth - 1) {
num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_belowRow[x0] + d_belowRow[x1] + d_belowRow[x2];
}
else {
num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2];
}
// Logic for updating values
if (is_alive == 1){
// Cell is alive!
if (num_alive < 2){
// Underpopulated
d_resultData[x1+y1] = 0;
}
else if (num_alive == 2 || num_alive == 3){
// Just the right amount of neighbors
d_resultData[x1+y1] = 1;
}
else {
// Overpopulated
d_resultData[x1+y1] = 0;
}
}
else {
// Cell is dead :(
if (num_alive == 3 || num_alive == 6) {
// #Resurrected
d_resultData[x1+y1] = 1;
}
else {
// We stay dead
d_resultData[x1+y1] = 0;
}
}// End logic for staying dead
} // End x loop
} // End loop over each thread
// ----- SWAP DATA IN ABOVE ROWS AND BELOW ROWS ----- //
int j;
for(j = 0; j < worldWidth; j++){
d_aboveRow[j] = d_resultData[j];
d_belowRow[j] = d_resultData[j + worldWidth*(worldHeight - 1)];
}
// Synchronize the threads?
__syncthreads();
}
// LAUNCH KERNEL FUNCTION
extern "C" void HL_kernelLaunch( unsigned char** d_data, unsigned char** d_resultData, unsigned char** d_aboveRow, unsigned char** d_belowRow, int block_count, int thread_count, unsigned int worldWidth, unsigned int worldHeight, int myrank){
// Call the kernel
HL_kernel<<<block_count,thread_count>>>(*d_data, *d_resultData, *d_aboveRow, *d_belowRow, worldWidth, worldHeight);
// Synchronize the CUDA devices
cudaDeviceSynchronize();
}
// Free memory
extern "C" void freeCudaArrays(int myrank){
cudaFree(g_data);
cudaFree(g_resultData);
cudaFree(g_aboveRow);
cudaFree(g_belowRow);
} | .file "tmpxft_00190d5d_00000000-6_highlifeCuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2081:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2081:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string " Unable to determine cuda device count, error is %d, count is %d\n"
.align 8
.LC1:
.string " Unable to have myrank %d set to cuda device %d, error is %d \n"
.align 8
.LC2:
.string "Pattern %u has not been implemented \n"
.text
.globl HL_initMaster
.type HL_initMaster, @function
HL_initMaster:
.LFB2076:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $16, %rsp
.cfi_def_cfa_offset 64
movl %edi, %r12d
movq %rsi, %rbp
movq %rdx, %rbx
movl %ecx, %r13d
movl %r8d, 12(%rsp)
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl %eax, cE(%rip)
testl %eax, %eax
jne .L22
movl %r13d, %eax
cltd
idivl 12(%rsp)
movl %edx, %edi
call cudaSetDevice@PLT
movl %eax, %r8d
movl %eax, cE(%rip)
testl %eax, %eax
jne .L23
cmpl $5, %r12d
ja .L6
movl %r12d, %r12d
leaq .L8(%rip), %rdx
movslq (%rdx,%r12,4), %rax
addq %rdx, %rax
notrack jmp *%rax
.section .rodata
.align 4
.align 4
.L8:
.long .L13-.L8
.long .L12-.L8
.long .L11-.L8
.long .L10-.L8
.long .L9-.L8
.long .L7-.L8
.text
.L22:
movl 12(%rsp), %ecx
movl %eax, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.L23:
movl %r13d, %eax
cltd
idivl 12(%rsp)
movl %edx, %ecx
movl %r13d, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.L13:
imulq %rbp, %rbx
movl $1, %edx
movq %rbx, %rsi
leaq g_data(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbx, %rdx
movl $0, %esi
movq g_data(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbx, %rsi
leaq g_resultData(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbx, %rdx
movl $0, %esi
movq g_resultData(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_aboveRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_aboveRow(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_belowRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_belowRow(%rip), %rdi
call cudaMemset@PLT
.L3:
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
imulq %rbp, %rbx
movl $1, %edx
movq %rbx, %rsi
leaq g_data(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbx, %rdx
movl $0, %esi
movq g_data(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbx, %rsi
leaq g_resultData(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbx, %rdx
movl $0, %esi
movq g_resultData(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_aboveRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_aboveRow(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_belowRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_belowRow(%rip), %rdi
call cudaMemset@PLT
testq %rbx, %rbx
je .L3
movl $0, %eax
jmp .L16
.L15:
addq $1, %rax
cmpq %rax, %rbx
je .L3
.L16:
movq g_data(%rip), %rdx
movb $1, (%rdx,%rax)
cmpq %rbp, %rax
jnb .L15
movq g_aboveRow(%rip), %rdx
movb $1, (%rdx,%rax)
movq g_belowRow(%rip), %rdx
movb $1, (%rdx,%rax)
jmp .L15
.L11:
movq %rbp, %r12
imulq %rbx, %r12
movl $1, %edx
movq %r12, %rsi
leaq g_data(%rip), %rdi
call cudaMallocManaged@PLT
movq %r12, %rdx
movl $0, %esi
movq g_data(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %r12, %rsi
leaq g_resultData(%rip), %rdi
call cudaMallocManaged@PLT
movq %r12, %rdx
movl $0, %esi
movq g_resultData(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_aboveRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_aboveRow(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_belowRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_belowRow(%rip), %rdi
call cudaMemset@PLT
subq $1, %rbx
movl %ebx, %eax
imull %ebp, %eax
subl $-128, %eax
cltq
imulq %rbp, %rbx
addq $139, %rbx
cmpq %rbx, %rax
jnb .L3
.L17:
movq g_data(%rip), %rdx
movb $1, (%rdx,%rax)
addq $1, %rax
cmpq %rbx, %rax
jne .L17
jmp .L3
.L10:
movl 12(%rsp), %r14d
movq %rbp, %r12
imulq %rbx, %r12
movl $1, %edx
movq %r12, %rsi
leaq g_data(%rip), %rdi
call cudaMallocManaged@PLT
movq %r12, %rdx
movl $0, %esi
movq g_data(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %r12, %rsi
leaq g_resultData(%rip), %rdi
call cudaMallocManaged@PLT
movq %r12, %rdx
movl $0, %esi
movq g_resultData(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_aboveRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_aboveRow(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_belowRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_belowRow(%rip), %rdi
call cudaMemset@PLT
testl %r13d, %r13d
jne .L18
movq g_data(%rip), %rax
movb $1, (%rax)
movq g_data(%rip), %rax
movb $1, -1(%rax,%rbp)
movq g_aboveRow(%rip), %rax
movb $1, (%rax)
movq g_aboveRow(%rip), %rax
movb $1, -1(%rax,%rbp)
.L18:
subl $1, %r14d
cmpl %r14d, %r13d
jne .L3
leaq -1(%rbp), %rax
imulq %rax, %rbx
movq g_data(%rip), %rdx
movb $1, (%rdx,%rbx)
addq g_data(%rip), %rax
movb $1, (%rax,%rbx)
movq g_belowRow(%rip), %rax
movb $1, (%rax)
movq g_belowRow(%rip), %rax
movb $1, -1(%rax,%rbp)
jmp .L3
.L9:
imulq %rbp, %rbx
movl $1, %edx
movq %rbx, %rsi
leaq g_data(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbx, %rdx
movl $0, %esi
movq g_data(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbx, %rsi
leaq g_resultData(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbx, %rdx
movl $0, %esi
movq g_resultData(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_aboveRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_aboveRow(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_belowRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_belowRow(%rip), %rdi
call cudaMemset@PLT
testl %r13d, %r13d
jne .L3
movq g_data(%rip), %rax
movb $1, (%rax)
movq g_data(%rip), %rax
movb $1, 1(%rax)
movq g_data(%rip), %rax
movb $1, -1(%rax,%rbp)
movq g_aboveRow(%rip), %rax
movb $1, (%rax)
movq g_aboveRow(%rip), %rax
movb $1, 1(%rax)
movq g_aboveRow(%rip), %rax
movb $1, -1(%rax,%rbp)
jmp .L3
.L7:
movq %rbp, %r12
imulq %rbx, %r12
movl $1, %edx
movq %r12, %rsi
leaq g_data(%rip), %rdi
call cudaMallocManaged@PLT
movq %r12, %rdx
movl $0, %esi
movq g_data(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %r12, %rsi
leaq g_resultData(%rip), %rdi
call cudaMallocManaged@PLT
movq %r12, %rdx
movl $0, %esi
movq g_resultData(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_aboveRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_aboveRow(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_belowRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_belowRow(%rip), %rdi
call cudaMemset@PLT
movq %rbp, %rdx
shrq %rdx
shrq %rbx
imulq %rbp, %rbx
leaq (%rdx,%rbx), %rax
movq g_data(%rip), %rcx
movb $1, 1(%rcx,%rax)
movq g_data(%rip), %rcx
movb $1, 2(%rcx,%rax)
movq g_data(%rip), %rcx
movb $1, 3(%rcx,%rax)
leaq 0(%rbp,%rbx), %rax
movq %rdx, %rcx
addq g_data(%rip), %rcx
movb $1, (%rcx,%rax)
addq %rbp, %rax
movq %rdx, %rcx
addq g_data(%rip), %rcx
movb $1, (%rcx,%rax)
addq g_data(%rip), %rdx
addq %rax, %rdx
movb $1, (%rdx,%rbp)
jmp .L3
.L6:
movl %r12d, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2076:
.size HL_initMaster, .-HL_initMaster
.globl freeCudaArrays
.type freeCudaArrays, @function
freeCudaArrays:
.LFB2078:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq g_data(%rip), %rdi
call cudaFree@PLT
movq g_resultData(%rip), %rdi
call cudaFree@PLT
movq g_aboveRow(%rip), %rdi
call cudaFree@PLT
movq g_belowRow(%rip), %rdi
call cudaFree@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2078:
.size freeCudaArrays, .-freeCudaArrays
.globl _Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj
.type _Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj, @function
_Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj:
.LFB2103:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L30
.L26:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L31
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z9HL_kernelPhS_S_S_jj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L26
.L31:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2103:
.size _Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj, .-_Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj
.globl _Z9HL_kernelPhS_S_S_jj
.type _Z9HL_kernelPhS_S_S_jj, @function
_Z9HL_kernelPhS_S_S_jj:
.LFB2104:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2104:
.size _Z9HL_kernelPhS_S_S_jj, .-_Z9HL_kernelPhS_S_S_jj
.globl HL_kernelLaunch
.type HL_kernelLaunch, @function
HL_kernelLaunch:
.LFB2077:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbx
movq %rsi, %rbp
movq %rdx, %r12
movq %rcx, %r13
movl %r9d, 20(%rsp)
movl $1, 24(%rsp)
movl %r8d, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L37
.L35:
call cudaDeviceSynchronize@PLT
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
movq 0(%r13), %rcx
movq (%r12), %rdx
movq 0(%rbp), %rsi
movq (%rbx), %rdi
movl 88(%rsp), %r9d
movl 80(%rsp), %r8d
call _Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj
jmp .L35
.cfi_endproc
.LFE2077:
.size HL_kernelLaunch, .-HL_kernelLaunch
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "_Z9HL_kernelPhS_S_S_jj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2106:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9HL_kernelPhS_S_S_jj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2106:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl cE
.bss
.align 4
.type cE, @object
.size cE, 4
cE:
.zero 4
.globl cudaDeviceCount
.align 4
.type cudaDeviceCount, @object
.size cudaDeviceCount, 4
cudaDeviceCount:
.zero 4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // Include packages and also CUDA packages
#include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include <cuda.h>
#include <cuda_runtime.h>
// Result from last compute of world.
extern unsigned char *g_resultData;
// Current state of world.
extern unsigned char *g_data;
// ----- SAVE RECEIVING ROWS FROM OTHER GPUS ----- //
// "Above" row
extern unsigned char *g_aboveRow;
// "Below" row
extern unsigned char *g_belowRow;
// "Above" row
extern unsigned char *g_resultAboveRow;
// "Below" row
extern unsigned char *g_resultBelowRow;
// ----- DECLARE KERNEL ----- //
__global__ void HL_kernel(unsigned int worldWidth, unsigned int worldHeight);
// Define number of Processors
int cudaDeviceCount;
cudaError_t cE;
static inline void HL_initAllZeros(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
}
static inline void HL_initAllOnes(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
int i;
// set all rows of world to true
for( i = 0; i < total_world_size; i++)
{
g_data[i] = 1;
// Set above and below rows
if (i < worldWidth){
g_aboveRow[i] = 1;
g_belowRow[i] = 1;
}
}
}
static inline void HL_initOnesInMiddle(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
int i;
for(i = worldWidth * (worldHeight - 1) + 128; i < worldWidth * (worldHeight - 1) + 139; i++){
g_data[i] = 1;
}
}
static inline void HL_initOnesAtCorners(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
if(myrank == 0){
g_data[0] = 1; // upper left
g_data[worldWidth-1]=1; // upper right
g_aboveRow[0] = 1; // upper left
g_aboveRow[worldWidth-1]=1; // upper right
}
if(myrank == cudaDeviceCount - 1){
g_data[(worldHeight * (worldWidth-1))]=1; // lower left
g_data[(worldHeight * (worldWidth-1)) + worldWidth-1]=1; // lower right
g_belowRow[0] = 1;
g_belowRow[worldWidth - 1] = 1;
}
}
static inline void HL_initSpinnerAtCorner(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
if( myrank == 0 ){
g_data[0] = 1; // upper left
g_data[1] = 1; // upper left +1
g_data[worldWidth-1]=1; // upper right
g_aboveRow[0] = 1; // upper left
g_aboveRow[1] = 1; // upper left +1
g_aboveRow[worldWidth-1]=1; // upper right
}
}
static inline void HL_initReplicator(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
size_t x, y;
x = worldWidth/2;
y = worldHeight/2;
g_data[x + y*worldWidth + 1] = 1;
g_data[x + y*worldWidth + 2] = 1;
g_data[x + y*worldWidth + 3] = 1;
g_data[x + (y+1)*worldWidth] = 1;
g_data[x + (y+2)*worldWidth] = 1;
g_data[x + (y+3)*worldWidth] = 1;
}
// ---------- EXPORT TO APPROPRIATE COMPILER ---------- //
extern "C" void HL_initMaster( unsigned int pattern, size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
// INITIALIZE THE CUDA WORLD
if( (cE = cudaGetDeviceCount( &cudaDeviceCount)) != cudaSuccess )
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount );
exit(-1);
}
if( (cE = cudaSetDevice( myrank % cudaDeviceCount )) != cudaSuccess )
{
printf(" Unable to have myrank %d set to cuda device %d, error is %d \n", myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
// INITIALIZE THE PATTERN
switch(pattern)
{
case 0:
HL_initAllZeros( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 1:
HL_initAllOnes( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 2:
HL_initOnesInMiddle( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 3:
HL_initOnesAtCorners( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 4:
HL_initSpinnerAtCorner( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 5:
HL_initReplicator( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
default:
printf("Pattern %u has not been implemented \n", pattern);
exit(-1);
}
}
// MAIN KERNEL FUNCTION THAT DOES ALL OF THE WORK
__global__ void HL_kernel( unsigned char* d_data, unsigned char* d_resultData, unsigned char* d_aboveRow, unsigned char* d_belowRow, unsigned int worldWidth, unsigned int worldHeight){
// Store index value
size_t index;
// Loop over the threads
for(index = blockIdx.x * blockDim.x + threadIdx.x; index < worldWidth*worldHeight; index += blockDim.x * gridDim.x){
// Allocate space
int y0 = ((index + worldHeight - 1) % worldHeight) * worldWidth;
int y1 = index * worldWidth;
int y2 = ((index + 1) % worldHeight) * worldWidth;
// Get the current block and thread
int x;
// Loop over corresponding COLUMNS
for (x = 0; x < worldWidth; ++x){
// Set current column, left column, and right column
int x1 = x;
int x0 = (x1 + worldWidth - 1) % worldWidth;
int x2 = (x1 + 1) % worldWidth;
// Get the status of the current cell to determine logic of life span
int is_alive = d_data[x1+y1];
// Count the number of alive neighbors
int num_alive = 0;
// Check above and below row cases
if (x1+y1 < worldWidth) {
num_alive = d_aboveRow[x0] + d_aboveRow[x1] + d_aboveRow[x2] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2];
}
else if (x1+y1 > worldWidth*worldHeight - worldWidth - 1) {
num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_belowRow[x0] + d_belowRow[x1] + d_belowRow[x2];
}
else {
num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2];
}
// Logic for updating values
if (is_alive == 1){
// Cell is alive!
if (num_alive < 2){
// Underpopulated
d_resultData[x1+y1] = 0;
}
else if (num_alive == 2 || num_alive == 3){
// Just the right amount of neighbors
d_resultData[x1+y1] = 1;
}
else {
// Overpopulated
d_resultData[x1+y1] = 0;
}
}
else {
// Cell is dead :(
if (num_alive == 3 || num_alive == 6) {
// #Resurrected
d_resultData[x1+y1] = 1;
}
else {
// We stay dead
d_resultData[x1+y1] = 0;
}
}// End logic for staying dead
} // End x loop
} // End loop over each thread
// ----- SWAP DATA IN ABOVE ROWS AND BELOW ROWS ----- //
int j;
for(j = 0; j < worldWidth; j++){
d_aboveRow[j] = d_resultData[j];
d_belowRow[j] = d_resultData[j + worldWidth*(worldHeight - 1)];
}
// Synchronize the threads?
__syncthreads();
}
// LAUNCH KERNEL FUNCTION
extern "C" void HL_kernelLaunch( unsigned char** d_data, unsigned char** d_resultData, unsigned char** d_aboveRow, unsigned char** d_belowRow, int block_count, int thread_count, unsigned int worldWidth, unsigned int worldHeight, int myrank){
// Call the kernel
HL_kernel<<<block_count,thread_count>>>(*d_data, *d_resultData, *d_aboveRow, *d_belowRow, worldWidth, worldHeight);
// Synchronize the CUDA devices
cudaDeviceSynchronize();
}
// Free memory
extern "C" void freeCudaArrays(int myrank){
cudaFree(g_data);
cudaFree(g_resultData);
cudaFree(g_aboveRow);
cudaFree(g_belowRow);
} | // Include packages and also CUDA packages
#include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include <hip/hip_runtime.h>
// Result from last compute of world.
extern unsigned char *g_resultData;
// Current state of world.
extern unsigned char *g_data;
// ----- SAVE RECEIVING ROWS FROM OTHER GPUS ----- //
// "Above" row
extern unsigned char *g_aboveRow;
// "Below" row
extern unsigned char *g_belowRow;
// "Above" row
extern unsigned char *g_resultAboveRow;
// "Below" row
extern unsigned char *g_resultBelowRow;
// ----- DECLARE KERNEL ----- //
__global__ void HL_kernel(unsigned int worldWidth, unsigned int worldHeight);
// Define number of Processors
int cudaDeviceCount;
hipError_t cE;
static inline void HL_initAllZeros(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
}
static inline void HL_initAllOnes(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
int i;
// set all rows of world to true
for( i = 0; i < total_world_size; i++)
{
g_data[i] = 1;
// Set above and below rows
if (i < worldWidth){
g_aboveRow[i] = 1;
g_belowRow[i] = 1;
}
}
}
static inline void HL_initOnesInMiddle(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
int i;
for(i = worldWidth * (worldHeight - 1) + 128; i < worldWidth * (worldHeight - 1) + 139; i++){
g_data[i] = 1;
}
}
static inline void HL_initOnesAtCorners(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
if(myrank == 0){
g_data[0] = 1; // upper left
g_data[worldWidth-1]=1; // upper right
g_aboveRow[0] = 1; // upper left
g_aboveRow[worldWidth-1]=1; // upper right
}
if(myrank == cudaDeviceCount - 1){
g_data[(worldHeight * (worldWidth-1))]=1; // lower left
g_data[(worldHeight * (worldWidth-1)) + worldWidth-1]=1; // lower right
g_belowRow[0] = 1;
g_belowRow[worldWidth - 1] = 1;
}
}
static inline void HL_initSpinnerAtCorner(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
if( myrank == 0 ){
g_data[0] = 1; // upper left
g_data[1] = 1; // upper left +1
g_data[worldWidth-1]=1; // upper right
g_aboveRow[0] = 1; // upper left
g_aboveRow[1] = 1; // upper left +1
g_aboveRow[worldWidth-1]=1; // upper right
}
}
static inline void HL_initReplicator(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
size_t x, y;
x = worldWidth/2;
y = worldHeight/2;
g_data[x + y*worldWidth + 1] = 1;
g_data[x + y*worldWidth + 2] = 1;
g_data[x + y*worldWidth + 3] = 1;
g_data[x + (y+1)*worldWidth] = 1;
g_data[x + (y+2)*worldWidth] = 1;
g_data[x + (y+3)*worldWidth] = 1;
}
// ---------- EXPORT TO APPROPRIATE COMPILER ---------- //
extern "C" void HL_initMaster( unsigned int pattern, size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
// INITIALIZE THE CUDA WORLD
if( (cE = hipGetDeviceCount( &cudaDeviceCount)) != hipSuccess )
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount );
exit(-1);
}
if( (cE = hipSetDevice( myrank % cudaDeviceCount )) != hipSuccess )
{
printf(" Unable to have myrank %d set to cuda device %d, error is %d \n", myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
// INITIALIZE THE PATTERN
switch(pattern)
{
case 0:
HL_initAllZeros( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 1:
HL_initAllOnes( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 2:
HL_initOnesInMiddle( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 3:
HL_initOnesAtCorners( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 4:
HL_initSpinnerAtCorner( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 5:
HL_initReplicator( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
default:
printf("Pattern %u has not been implemented \n", pattern);
exit(-1);
}
}
// MAIN KERNEL FUNCTION THAT DOES ALL OF THE WORK
__global__ void HL_kernel( unsigned char* d_data, unsigned char* d_resultData, unsigned char* d_aboveRow, unsigned char* d_belowRow, unsigned int worldWidth, unsigned int worldHeight){
// Store index value
size_t index;
// Loop over the threads
for(index = blockIdx.x * blockDim.x + threadIdx.x; index < worldWidth*worldHeight; index += blockDim.x * gridDim.x){
// Allocate space
int y0 = ((index + worldHeight - 1) % worldHeight) * worldWidth;
int y1 = index * worldWidth;
int y2 = ((index + 1) % worldHeight) * worldWidth;
// Get the current block and thread
int x;
// Loop over corresponding COLUMNS
for (x = 0; x < worldWidth; ++x){
// Set current column, left column, and right column
int x1 = x;
int x0 = (x1 + worldWidth - 1) % worldWidth;
int x2 = (x1 + 1) % worldWidth;
// Get the status of the current cell to determine logic of life span
int is_alive = d_data[x1+y1];
// Count the number of alive neighbors
int num_alive = 0;
// Check above and below row cases
if (x1+y1 < worldWidth) {
num_alive = d_aboveRow[x0] + d_aboveRow[x1] + d_aboveRow[x2] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2];
}
else if (x1+y1 > worldWidth*worldHeight - worldWidth - 1) {
num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_belowRow[x0] + d_belowRow[x1] + d_belowRow[x2];
}
else {
num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2];
}
// Logic for updating values
if (is_alive == 1){
// Cell is alive!
if (num_alive < 2){
// Underpopulated
d_resultData[x1+y1] = 0;
}
else if (num_alive == 2 || num_alive == 3){
// Just the right amount of neighbors
d_resultData[x1+y1] = 1;
}
else {
// Overpopulated
d_resultData[x1+y1] = 0;
}
}
else {
// Cell is dead :(
if (num_alive == 3 || num_alive == 6) {
// #Resurrected
d_resultData[x1+y1] = 1;
}
else {
// We stay dead
d_resultData[x1+y1] = 0;
}
}// End logic for staying dead
} // End x loop
} // End loop over each thread
// ----- SWAP DATA IN ABOVE ROWS AND BELOW ROWS ----- //
int j;
for(j = 0; j < worldWidth; j++){
d_aboveRow[j] = d_resultData[j];
d_belowRow[j] = d_resultData[j + worldWidth*(worldHeight - 1)];
}
// Synchronize the threads?
__syncthreads();
}
// LAUNCH KERNEL FUNCTION
extern "C" void HL_kernelLaunch( unsigned char** d_data, unsigned char** d_resultData, unsigned char** d_aboveRow, unsigned char** d_belowRow, int block_count, int thread_count, unsigned int worldWidth, unsigned int worldHeight, int myrank){
// Call the kernel
HL_kernel<<<block_count,thread_count>>>(*d_data, *d_resultData, *d_aboveRow, *d_belowRow, worldWidth, worldHeight);
// Synchronize the CUDA devices
hipDeviceSynchronize();
}
// Free memory
extern "C" void freeCudaArrays(int myrank){
hipFree(g_data);
hipFree(g_resultData);
hipFree(g_aboveRow);
hipFree(g_belowRow);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // Include packages and also CUDA packages
#include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include <hip/hip_runtime.h>
// Result from last compute of world.
extern unsigned char *g_resultData;
// Current state of world.
extern unsigned char *g_data;
// ----- SAVE RECEIVING ROWS FROM OTHER GPUS ----- //
// "Above" row
extern unsigned char *g_aboveRow;
// "Below" row
extern unsigned char *g_belowRow;
// "Above" row
extern unsigned char *g_resultAboveRow;
// "Below" row
extern unsigned char *g_resultBelowRow;
// ----- DECLARE KERNEL ----- //
__global__ void HL_kernel(unsigned int worldWidth, unsigned int worldHeight);
// Define number of Processors
int cudaDeviceCount;
hipError_t cE;
static inline void HL_initAllZeros(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
}
static inline void HL_initAllOnes(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
int i;
// set all rows of world to true
for( i = 0; i < total_world_size; i++)
{
g_data[i] = 1;
// Set above and below rows
if (i < worldWidth){
g_aboveRow[i] = 1;
g_belowRow[i] = 1;
}
}
}
static inline void HL_initOnesInMiddle(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
int i;
for(i = worldWidth * (worldHeight - 1) + 128; i < worldWidth * (worldHeight - 1) + 139; i++){
g_data[i] = 1;
}
}
static inline void HL_initOnesAtCorners(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
if(myrank == 0){
g_data[0] = 1; // upper left
g_data[worldWidth-1]=1; // upper right
g_aboveRow[0] = 1; // upper left
g_aboveRow[worldWidth-1]=1; // upper right
}
if(myrank == cudaDeviceCount - 1){
g_data[(worldHeight * (worldWidth-1))]=1; // lower left
g_data[(worldHeight * (worldWidth-1)) + worldWidth-1]=1; // lower right
g_belowRow[0] = 1;
g_belowRow[worldWidth - 1] = 1;
}
}
static inline void HL_initSpinnerAtCorner(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
if( myrank == 0 ){
g_data[0] = 1; // upper left
g_data[1] = 1; // upper left +1
g_data[worldWidth-1]=1; // upper right
g_aboveRow[0] = 1; // upper left
g_aboveRow[1] = 1; // upper left +1
g_aboveRow[worldWidth-1]=1; // upper right
}
}
static inline void HL_initReplicator(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
size_t x, y;
x = worldWidth/2;
y = worldHeight/2;
g_data[x + y*worldWidth + 1] = 1;
g_data[x + y*worldWidth + 2] = 1;
g_data[x + y*worldWidth + 3] = 1;
g_data[x + (y+1)*worldWidth] = 1;
g_data[x + (y+2)*worldWidth] = 1;
g_data[x + (y+3)*worldWidth] = 1;
}
// ---------- EXPORT TO APPROPRIATE COMPILER ---------- //
extern "C" void HL_initMaster( unsigned int pattern, size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
// INITIALIZE THE CUDA WORLD
if( (cE = hipGetDeviceCount( &cudaDeviceCount)) != hipSuccess )
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount );
exit(-1);
}
if( (cE = hipSetDevice( myrank % cudaDeviceCount )) != hipSuccess )
{
printf(" Unable to have myrank %d set to cuda device %d, error is %d \n", myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
// INITIALIZE THE PATTERN
switch(pattern)
{
case 0:
HL_initAllZeros( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 1:
HL_initAllOnes( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 2:
HL_initOnesInMiddle( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 3:
HL_initOnesAtCorners( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 4:
HL_initSpinnerAtCorner( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 5:
HL_initReplicator( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
default:
printf("Pattern %u has not been implemented \n", pattern);
exit(-1);
}
}
// MAIN KERNEL FUNCTION THAT DOES ALL OF THE WORK
__global__ void HL_kernel( unsigned char* d_data, unsigned char* d_resultData, unsigned char* d_aboveRow, unsigned char* d_belowRow, unsigned int worldWidth, unsigned int worldHeight){
// Store index value
size_t index;
// Loop over the threads
for(index = blockIdx.x * blockDim.x + threadIdx.x; index < worldWidth*worldHeight; index += blockDim.x * gridDim.x){
// Allocate space
int y0 = ((index + worldHeight - 1) % worldHeight) * worldWidth;
int y1 = index * worldWidth;
int y2 = ((index + 1) % worldHeight) * worldWidth;
// Get the current block and thread
int x;
// Loop over corresponding COLUMNS
for (x = 0; x < worldWidth; ++x){
// Set current column, left column, and right column
int x1 = x;
int x0 = (x1 + worldWidth - 1) % worldWidth;
int x2 = (x1 + 1) % worldWidth;
// Get the status of the current cell to determine logic of life span
int is_alive = d_data[x1+y1];
// Count the number of alive neighbors
int num_alive = 0;
// Check above and below row cases
if (x1+y1 < worldWidth) {
num_alive = d_aboveRow[x0] + d_aboveRow[x1] + d_aboveRow[x2] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2];
}
else if (x1+y1 > worldWidth*worldHeight - worldWidth - 1) {
num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_belowRow[x0] + d_belowRow[x1] + d_belowRow[x2];
}
else {
num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2];
}
// Logic for updating values
if (is_alive == 1){
// Cell is alive!
if (num_alive < 2){
// Underpopulated
d_resultData[x1+y1] = 0;
}
else if (num_alive == 2 || num_alive == 3){
// Just the right amount of neighbors
d_resultData[x1+y1] = 1;
}
else {
// Overpopulated
d_resultData[x1+y1] = 0;
}
}
else {
// Cell is dead :(
if (num_alive == 3 || num_alive == 6) {
// #Resurrected
d_resultData[x1+y1] = 1;
}
else {
// We stay dead
d_resultData[x1+y1] = 0;
}
}// End logic for staying dead
} // End x loop
} // End loop over each thread
// ----- SWAP DATA IN ABOVE ROWS AND BELOW ROWS ----- //
int j;
for(j = 0; j < worldWidth; j++){
d_aboveRow[j] = d_resultData[j];
d_belowRow[j] = d_resultData[j + worldWidth*(worldHeight - 1)];
}
// Synchronize the threads?
__syncthreads();
}
// LAUNCH KERNEL FUNCTION
extern "C" void HL_kernelLaunch( unsigned char** d_data, unsigned char** d_resultData, unsigned char** d_aboveRow, unsigned char** d_belowRow, int block_count, int thread_count, unsigned int worldWidth, unsigned int worldHeight, int myrank){
// Call the kernel
HL_kernel<<<block_count,thread_count>>>(*d_data, *d_resultData, *d_aboveRow, *d_belowRow, worldWidth, worldHeight);
// Synchronize the CUDA devices
hipDeviceSynchronize();
}
// Free memory
extern "C" void freeCudaArrays(int myrank){
hipFree(g_data);
hipFree(g_resultData);
hipFree(g_aboveRow);
hipFree(g_belowRow);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9HL_kernelPhS_S_S_jj
.globl _Z9HL_kernelPhS_S_S_jj
.p2align 8
.type _Z9HL_kernelPhS_S_S_jj,@function
_Z9HL_kernelPhS_S_S_jj:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b256 s[4:11], s[0:1], 0x8
s_add_u32 s12, s0, 40
s_addc_u32 s13, s1, 0
s_mov_b32 s14, exec_lo
s_waitcnt lgkmcnt(0)
s_and_b32 s22, s2, 0xffff
s_mul_i32 s2, s11, s10
v_mad_u64_u32 v[1:2], null, s15, s22, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s2, v1
s_cbranch_execz .LBB0_42
s_load_b32 s23, s[12:13], 0x0
s_load_b64 s[12:13], s[0:1], 0x0
s_add_u32 s15, s11, -1
v_mul_lo_u32 v0, s10, v1
s_addc_u32 s16, 0, -1
s_cmp_lg_u32 s10, 0
v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, 0
s_mov_b32 s3, 0
s_cselect_b32 s19, -1, 0
s_not_b32 s0, s10
s_mov_b32 s1, s11
s_mov_b32 s17, s3
s_mov_b32 s18, s3
s_add_i32 s20, s10, -1
s_sub_i32 s21, 0, s10
s_mov_b32 s25, s3
s_waitcnt lgkmcnt(0)
s_mul_i32 s22, s23, s22
s_add_i32 s23, s2, s0
s_mul_i32 s24, s22, s10
s_branch .LBB0_3
.LBB0_2:
v_add_co_u32 v1, vcc_lo, v1, s22
v_add_co_ci_u32_e32 v2, vcc_lo, s18, v2, vcc_lo
v_add_nc_u32_e32 v0, s24, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_u64_e32 vcc_lo, s[2:3], v[1:2]
s_or_b32 s25, vcc_lo, s25
s_and_not1_b32 exec_lo, exec_lo, s25
s_cbranch_execz .LBB0_42
.LBB0_3:
v_add_co_u32 v7, vcc_lo, s15, v1
v_add_co_ci_u32_e32 v4, vcc_lo, s16, v2, vcc_lo
s_mov_b32 s0, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_ne_u64_e32 0, v[3:4]
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_5
v_cvt_f32_u32_e32 v5, s1
s_sub_u32 s26, 0, s1
s_subb_u32 s27, 0, s17
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e64 v5, 0, 0x4f800000
v_rcp_f32_e32 v5, v5
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v5, 0x5f7ffffc, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v6, 0x2f800000, v5
v_trunc_f32_e32 v6, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fmac_f32_e32 v5, 0xcf800000, v6
v_cvt_u32_f32_e32 v6, v6
v_cvt_u32_f32_e32 v5, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v8, s26, v6
v_mul_hi_u32 v9, s26, v5
v_mul_lo_u32 v10, s27, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v8, v9, v8
v_mul_lo_u32 v9, s26, v5
v_add_nc_u32_e32 v8, v8, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v10, v5, v9
v_mul_lo_u32 v11, v5, v8
v_mul_hi_u32 v12, v5, v8
v_mul_hi_u32 v13, v6, v9
v_mul_lo_u32 v9, v6, v9
v_mul_hi_u32 v14, v6, v8
v_mul_lo_u32 v8, v6, v8
v_add_co_u32 v10, vcc_lo, v10, v11
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v12, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v9, vcc_lo, v10, v9
v_add_co_ci_u32_e32 v9, vcc_lo, v11, v13, vcc_lo
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v14, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, v9, v8
v_add_co_ci_u32_e32 v9, vcc_lo, 0, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v5, vcc_lo, v5, v8
v_add_co_ci_u32_e32 v6, vcc_lo, v6, v9, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_hi_u32 v8, s26, v5
v_mul_lo_u32 v10, s27, v5
v_mul_lo_u32 v9, s26, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v8, v8, v9
v_mul_lo_u32 v9, s26, v5
v_add_nc_u32_e32 v8, v8, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v10, v5, v9
v_mul_lo_u32 v11, v5, v8
v_mul_hi_u32 v12, v5, v8
v_mul_hi_u32 v13, v6, v9
v_mul_lo_u32 v9, v6, v9
v_mul_hi_u32 v14, v6, v8
v_mul_lo_u32 v8, v6, v8
v_add_co_u32 v10, vcc_lo, v10, v11
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v12, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v9, vcc_lo, v10, v9
v_add_co_ci_u32_e32 v9, vcc_lo, v11, v13, vcc_lo
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v14, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, v9, v8
v_add_co_ci_u32_e32 v9, vcc_lo, 0, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, v5, v8
v_add_co_ci_u32_e32 v12, vcc_lo, v6, v9, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_hi_u32 v13, v7, v10
v_mad_u64_u32 v[8:9], null, v4, v10, 0
v_mad_u64_u32 v[5:6], null, v7, v12, 0
v_mad_u64_u32 v[10:11], null, v4, v12, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v5, vcc_lo, v13, v5
v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v5, vcc_lo, v5, v8
v_add_co_ci_u32_e32 v5, vcc_lo, v6, v9, vcc_lo
v_add_co_ci_u32_e32 v6, vcc_lo, 0, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, v5, v10
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v6, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[5:6], null, s1, v8, 0
v_mad_u64_u32 v[8:9], null, s1, v10, v[6:7]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_co_u32 v5, vcc_lo, v7, v5
v_sub_co_ci_u32_e32 v4, vcc_lo, v4, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_co_u32 v6, vcc_lo, v5, s1
v_subrev_co_ci_u32_e32 v7, vcc_lo, 0, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
v_cmp_le_u32_e32 vcc_lo, s1, v6
v_cndmask_b32_e64 v8, 0, -1, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s1, v5
v_cndmask_b32_e64 v9, 0, -1, vcc_lo
v_cmp_eq_u32_e32 vcc_lo, 0, v7
v_cndmask_b32_e32 v7, -1, v8, vcc_lo
v_sub_co_u32 v8, vcc_lo, v6, s1
v_cmp_eq_u32_e32 vcc_lo, 0, v4
v_cndmask_b32_e32 v4, -1, v9, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cmp_ne_u32_e32 vcc_lo, 0, v7
v_cndmask_b32_e32 v6, v6, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_ne_u32_e32 vcc_lo, 0, v4
v_cndmask_b32_e32 v5, v5, v6, vcc_lo
.LBB0_5:
s_and_not1_saveexec_b32 s0, s0
s_cbranch_execz .LBB0_7
v_cvt_f32_u32_e32 v4, s1
s_sub_i32 s26, 0, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v4, v4
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v4, 0x4f7ffffe, v4
v_cvt_u32_f32_e32 v4, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, s26, v4
v_mul_hi_u32 v5, v4, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v4, v4, v5
v_mul_hi_u32 v4, v7, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, s1
v_sub_nc_u32_e32 v4, v7, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s1, v4
v_cmp_le_u32_e32 vcc_lo, s1, v4
v_cndmask_b32_e32 v4, v4, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s1, v4
v_cmp_le_u32_e32 vcc_lo, s1, v4
v_cndmask_b32_e32 v5, v4, v5, vcc_lo
.LBB0_7:
s_or_b32 exec_lo, exec_lo, s0
v_add_co_u32 v8, vcc_lo, v1, 1
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v2, vcc_lo
s_mov_b32 s0, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_ne_u64_e32 0, v[3:4]
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_9
v_cvt_f32_u32_e32 v6, s1
s_sub_u32 s26, 0, s1
s_subb_u32 s27, 0, s17
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e64 v6, 0, 0x4f800000
v_rcp_f32_e32 v6, v6
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v6, 0x5f7ffffc, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v7, 0x2f800000, v6
v_trunc_f32_e32 v7, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fmac_f32_e32 v6, 0xcf800000, v7
v_cvt_u32_f32_e32 v7, v7
v_cvt_u32_f32_e32 v6, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v9, s26, v7
v_mul_hi_u32 v10, s26, v6
v_mul_lo_u32 v11, s27, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v9, v10, v9
v_mul_lo_u32 v10, s26, v6
v_add_nc_u32_e32 v9, v9, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v11, v6, v10
v_mul_lo_u32 v12, v6, v9
v_mul_hi_u32 v13, v6, v9
v_mul_hi_u32 v14, v7, v10
v_mul_lo_u32 v10, v7, v10
v_mul_hi_u32 v15, v7, v9
v_mul_lo_u32 v9, v7, v9
v_add_co_u32 v11, vcc_lo, v11, v12
v_add_co_ci_u32_e32 v12, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, v11, v10
v_add_co_ci_u32_e32 v10, vcc_lo, v12, v14, vcc_lo
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v15, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v9, vcc_lo, v10, v9
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v6, vcc_lo, v6, v9
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_hi_u32 v9, s26, v6
v_mul_lo_u32 v11, s27, v6
v_mul_lo_u32 v10, s26, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v9, v9, v10
v_mul_lo_u32 v10, s26, v6
v_add_nc_u32_e32 v9, v9, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v11, v6, v10
v_mul_lo_u32 v12, v6, v9
v_mul_hi_u32 v13, v6, v9
v_mul_hi_u32 v14, v7, v10
v_mul_lo_u32 v10, v7, v10
v_mul_hi_u32 v15, v7, v9
v_mul_lo_u32 v9, v7, v9
v_add_co_u32 v11, vcc_lo, v11, v12
v_add_co_ci_u32_e32 v12, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, v11, v10
v_add_co_ci_u32_e32 v10, vcc_lo, v12, v14, vcc_lo
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v15, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v9, vcc_lo, v10, v9
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v11, vcc_lo, v6, v9
v_add_co_ci_u32_e32 v13, vcc_lo, v7, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_hi_u32 v14, v8, v11
v_mad_u64_u32 v[9:10], null, v4, v11, 0
v_mad_u64_u32 v[6:7], null, v8, v13, 0
v_mad_u64_u32 v[11:12], null, v4, v13, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, v14, v6
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v6, vcc_lo, v6, v9
v_add_co_ci_u32_e32 v6, vcc_lo, v7, v10, vcc_lo
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v12, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v9, vcc_lo, v6, v11
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[6:7], null, s1, v9, 0
v_mad_u64_u32 v[9:10], null, s1, v11, v[7:8]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_co_u32 v6, vcc_lo, v8, v6
v_sub_co_ci_u32_e32 v4, vcc_lo, v4, v9, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_co_u32 v7, vcc_lo, v6, s1
v_subrev_co_ci_u32_e32 v8, vcc_lo, 0, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
v_cmp_le_u32_e32 vcc_lo, s1, v7
v_cndmask_b32_e64 v9, 0, -1, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s1, v6
v_cndmask_b32_e64 v10, 0, -1, vcc_lo
v_cmp_eq_u32_e32 vcc_lo, 0, v8
v_cndmask_b32_e32 v8, -1, v9, vcc_lo
v_sub_co_u32 v9, vcc_lo, v7, s1
v_cmp_eq_u32_e32 vcc_lo, 0, v4
v_cndmask_b32_e32 v4, -1, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cmp_ne_u32_e32 vcc_lo, 0, v8
v_cndmask_b32_e32 v7, v7, v9, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_ne_u32_e32 vcc_lo, 0, v4
v_cndmask_b32_e32 v6, v6, v7, vcc_lo
.LBB0_9:
s_and_not1_saveexec_b32 s0, s0
s_cbranch_execz .LBB0_11
v_cvt_f32_u32_e32 v4, s1
s_sub_i32 s26, 0, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v4, v4
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v4, 0x4f7ffffe, v4
v_cvt_u32_f32_e32 v4, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v6, s26, v4
v_mul_hi_u32 v6, v4, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v4, v4, v6
v_mul_hi_u32 v4, v8, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, s1
v_sub_nc_u32_e32 v4, v8, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v6, s1, v4
v_cmp_le_u32_e32 vcc_lo, s1, v4
v_cndmask_b32_e32 v4, v4, v6, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v6, s1, v4
v_cmp_le_u32_e32 vcc_lo, s1, v4
v_cndmask_b32_e32 v6, v4, v6, vcc_lo
.LBB0_11:
s_or_b32 exec_lo, exec_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s19
s_cbranch_vccnz .LBB0_2
s_delay_alu instid0(VALU_DEP_1)
v_mul_lo_u32 v8, v6, s10
v_mul_lo_u32 v9, v5, s10
v_mul_lo_u32 v10, v1, s10
s_mov_b32 s0, 0
s_mov_b32 s26, s20
s_branch .LBB0_15
.LBB0_13:
s_or_b32 exec_lo, exec_lo, s0
.LBB0_14:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s28
s_add_i32 s26, s26, 1
s_cmp_lg_u32 s10, s27
s_mov_b32 s0, s27
s_cbranch_scc0 .LBB0_2
.LBB0_15:
v_add_nc_u32_e32 v4, s0, v0
s_mov_b32 s30, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v5, 31, v4
v_add_co_u32 v6, vcc_lo, s12, v4
v_add_co_ci_u32_e32 v7, vcc_lo, s13, v5, vcc_lo
global_load_u8 v11, v[6:7], off
v_cvt_f32_u32_e32 v6, s10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v6, v6
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v6, 0x4f7ffffe, v6
v_cvt_u32_f32_e32 v6, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s27, v6
s_mul_i32 s28, s21, s27
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_hi_u32 s28, s27, s28
s_add_i32 s27, s27, s28
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_hi_u32 s27, s26, s27
s_not_b32 s28, s27
s_mul_i32 s27, s21, s27
s_mul_i32 s28, s10, s28
s_add_i32 s27, s26, s27
s_add_i32 s28, s26, s28
s_cmp_ge_u32 s27, s10
s_cselect_b32 s27, s28, s27
s_delay_alu instid0(SALU_CYCLE_1)
s_sub_i32 s28, s27, s10
s_cmp_ge_u32 s27, s10
s_cselect_b32 s29, s28, s27
s_add_i32 s27, s0, 1
s_cmp_lg_u32 s20, s0
v_add_nc_u32_e32 v13, s29, v10
s_cselect_b32 s28, s27, 0
s_delay_alu instid0(SALU_CYCLE_1)
v_add_nc_u32_e32 v12, s28, v10
v_cmpx_le_u32_e64 s10, v4
s_xor_b32 s30, exec_lo, s30
s_cbranch_execz .LBB0_21
v_add_nc_u32_e32 v6, s29, v9
v_add_nc_u32_e32 v14, s0, v9
v_add_nc_u32_e32 v16, s28, v9
v_ashrrev_i32_e32 v19, 31, v13
v_ashrrev_i32_e32 v20, 31, v12
v_ashrrev_i32_e32 v7, 31, v6
v_ashrrev_i32_e32 v15, 31, v14
v_add_co_u32 v6, vcc_lo, s12, v6
v_ashrrev_i32_e32 v17, 31, v16
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v7, vcc_lo, s13, v7, vcc_lo
v_add_co_u32 v14, vcc_lo, s12, v14
v_add_co_ci_u32_e32 v15, vcc_lo, s13, v15, vcc_lo
v_add_co_u32 v16, vcc_lo, s12, v16
v_add_co_ci_u32_e32 v17, vcc_lo, s13, v17, vcc_lo
v_add_co_u32 v18, vcc_lo, s12, v13
v_add_co_ci_u32_e32 v19, vcc_lo, s13, v19, vcc_lo
v_add_co_u32 v12, vcc_lo, s12, v12
v_add_co_ci_u32_e32 v13, vcc_lo, s13, v20, vcc_lo
s_clause 0x4
global_load_u8 v6, v[6:7], off
global_load_u8 v7, v[14:15], off
global_load_u8 v14, v[16:17], off
global_load_u8 v15, v[18:19], off
global_load_u8 v12, v[12:13], off
s_mov_b32 s31, exec_lo
s_waitcnt vmcnt(2)
v_add3_u32 v6, v7, v6, v14
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add3_u32 v12, v6, v15, v12
v_cmpx_ge_u32_e64 s23, v4
s_xor_b32 s31, exec_lo, s31
s_cbranch_execz .LBB0_18
v_add_nc_u32_e32 v6, s29, v8
v_add_nc_u32_e32 v13, s0, v8
v_add_nc_u32_e32 v15, s28, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v7, 31, v6
v_ashrrev_i32_e32 v14, 31, v13
v_add_co_u32 v6, vcc_lo, s12, v6
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e32 v7, vcc_lo, s13, v7, vcc_lo
v_add_co_u32 v13, vcc_lo, s12, v13
v_add_co_ci_u32_e32 v14, vcc_lo, s13, v14, vcc_lo
s_clause 0x1
global_load_u8 v6, v[6:7], off
global_load_u8 v7, v[13:14], off
s_waitcnt vmcnt(0)
v_add3_u32 v14, v12, v6, v7
.LBB0_18:
s_or_saveexec_b32 s31, s31
v_dual_mov_b32 v6, s12 :: v_dual_mov_b32 v7, s13
s_xor_b32 exec_lo, exec_lo, s31
s_cbranch_execz .LBB0_20
s_ashr_i32 s33, s29, 31
s_add_u32 s34, s8, s29
s_addc_u32 s35, s9, s33
s_ashr_i32 s33, s0, 31
s_add_u32 s36, s8, s0
s_addc_u32 s37, s9, s33
s_clause 0x1
global_load_u8 v13, v3, s[34:35]
global_load_u8 v14, v3, s[36:37]
v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
v_mov_b32_e32 v15, s28
s_waitcnt vmcnt(0)
v_add3_u32 v14, v14, v13, v12
.LBB0_20:
s_or_b32 exec_lo, exec_lo, s31
.LBB0_21:
s_and_not1_saveexec_b32 s30, s30
s_cbranch_execz .LBB0_23
s_ashr_i32 s31, s29, 31
s_add_u32 s34, s6, s29
v_ashrrev_i32_e32 v7, 31, v13
s_addc_u32 s35, s7, s31
s_ashr_i32 s31, s0, 31
v_add_nc_u32_e32 v14, s29, v8
s_add_u32 s36, s6, s0
s_addc_u32 s37, s7, s31
s_ashr_i32 s31, s28, 31
v_add_co_u32 v6, vcc_lo, s12, v13
v_ashrrev_i32_e32 v13, 31, v12
s_add_u32 s38, s6, s28
v_add_nc_u32_e32 v16, s0, v8
s_addc_u32 s39, s7, s31
s_clause 0x2
global_load_u8 v18, v3, s[34:35]
global_load_u8 v19, v3, s[36:37]
global_load_u8 v20, v3, s[38:39]
v_add_co_ci_u32_e32 v7, vcc_lo, s13, v7, vcc_lo
v_add_co_u32 v12, vcc_lo, s12, v12
v_ashrrev_i32_e32 v15, 31, v14
v_add_co_ci_u32_e32 v13, vcc_lo, s13, v13, vcc_lo
v_ashrrev_i32_e32 v17, 31, v16
v_add_co_u32 v14, vcc_lo, s12, v14
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e32 v15, vcc_lo, s13, v15, vcc_lo
v_add_co_u32 v16, vcc_lo, s12, v16
v_add_co_ci_u32_e32 v17, vcc_lo, s13, v17, vcc_lo
s_clause 0x3
global_load_u8 v6, v[6:7], off
global_load_u8 v7, v[12:13], off
global_load_u8 v12, v[14:15], off
global_load_u8 v13, v[16:17], off
v_add_nc_u32_e32 v15, s28, v8
s_waitcnt vmcnt(5)
v_add_nc_u32_e32 v14, v19, v18
s_waitcnt vmcnt(4)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v14, v14, v20
s_waitcnt vmcnt(2)
v_add3_u32 v6, v14, v6, v7
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add3_u32 v14, v6, v12, v13
v_dual_mov_b32 v6, s12 :: v_dual_mov_b32 v7, s13
.LBB0_23:
s_or_b32 exec_lo, exec_lo, s30
v_ashrrev_i32_e32 v12, 31, v15
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_co_u32 v6, vcc_lo, v6, v15
s_mov_b32 s0, exec_lo
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v12, vcc_lo
global_load_u8 v6, v[6:7], off
s_waitcnt vmcnt(1)
v_and_b32_e32 v7, 0xff, v11
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v6, v14, v6
s_delay_alu instid0(VALU_DEP_2)
v_cmpx_ne_u16_e32 1, v7
s_xor_b32 s28, exec_lo, s0
s_cbranch_execz .LBB0_33
s_mov_b32 s30, 0
s_mov_b32 s29, 0
s_mov_b32 s0, exec_lo
v_cmpx_lt_i32_e32 5, v6
s_xor_b32 s0, exec_lo, s0
v_cmp_ne_u32_e32 vcc_lo, 6, v6
s_mov_b32 s29, exec_lo
s_and_b32 s30, vcc_lo, exec_lo
s_and_not1_saveexec_b32 s31, s0
v_cmp_eq_u32_e32 vcc_lo, 3, v6
v_cmp_ne_u32_e64 s0, 3, v6
s_and_not1_b32 s29, s29, exec_lo
s_and_not1_b32 s30, s30, exec_lo
s_and_b32 s33, vcc_lo, exec_lo
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 s0, s0, exec_lo
s_or_b32 s29, s29, s33
s_or_b32 s30, s30, s0
s_or_b32 exec_lo, exec_lo, s31
s_and_saveexec_b32 s0, s30
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_30
v_add_co_u32 v6, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v5, vcc_lo
v_mov_b32_e32 v11, 0
s_and_not1_b32 s29, s29, exec_lo
global_store_b8 v[6:7], v11, off
.LBB0_30:
s_or_b32 exec_lo, exec_lo, s0
s_and_saveexec_b32 s0, s29
s_cbranch_execz .LBB0_32
v_add_co_u32 v4, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
v_mov_b32_e32 v6, 1
global_store_b8 v[4:5], v6, off
.LBB0_32:
s_or_b32 exec_lo, exec_lo, s0
.LBB0_33:
s_and_not1_saveexec_b32 s28, s28
s_cbranch_execz .LBB0_14
s_mov_b32 s0, exec_lo
v_cmpx_lt_i32_e32 1, v6
s_xor_b32 s29, exec_lo, s0
s_cbranch_execz .LBB0_40
v_add_co_u32 v4, s0, s4, v4
v_and_b32_e32 v6, 0x7ffffffe, v6
v_add_co_ci_u32_e64 v5, s0, s5, v5, s0
s_mov_b32 s0, exec_lo
s_delay_alu instid0(VALU_DEP_2)
v_cmpx_ne_u32_e32 2, v6
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_37
v_mov_b32_e32 v6, 0
global_store_b8 v[4:5], v6, off
.LBB0_37:
s_and_not1_saveexec_b32 s0, s0
s_cbranch_execz .LBB0_39
v_mov_b32_e32 v6, 1
global_store_b8 v[4:5], v6, off
.LBB0_39:
s_or_b32 exec_lo, exec_lo, s0
.LBB0_40:
s_and_not1_saveexec_b32 s0, s29
s_cbranch_execz .LBB0_13
v_add_co_u32 v4, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
v_mov_b32_e32 v6, 0
global_store_b8 v[4:5], v6, off
s_branch .LBB0_13
.LBB0_42:
s_or_b32 exec_lo, exec_lo, s14
s_cmp_eq_u32 s10, 0
s_cbranch_scc1 .LBB0_45
v_mov_b32_e32 v0, 0
s_add_i32 s0, s11, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_mul_i32 s2, s0, s10
s_mov_b64 s[0:1], s[4:5]
.p2align 6
.LBB0_44:
global_load_u8 v1, v0, s[0:1]
v_mov_b32_e32 v2, s2
s_add_i32 s10, s10, -1
s_add_i32 s2, s2, 1
s_add_u32 s0, s0, 1
s_addc_u32 s1, s1, 0
s_waitcnt vmcnt(0)
global_store_b8 v0, v1, s[6:7]
global_load_u8 v1, v2, s[4:5]
s_add_u32 s6, s6, 1
s_addc_u32 s7, s7, 0
s_waitcnt vmcnt(0)
global_store_b8 v0, v1, s[8:9]
s_add_u32 s8, s8, 1
s_addc_u32 s9, s9, 0
s_cmp_lg_u32 s10, 0
s_cbranch_scc1 .LBB0_44
.LBB0_45:
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9HL_kernelPhS_S_S_jj
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 21
.amdhsa_next_free_sgpr 40
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9HL_kernelPhS_S_S_jj, .Lfunc_end0-_Z9HL_kernelPhS_S_S_jj
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9HL_kernelPhS_S_S_jj
.private_segment_fixed_size: 0
.sgpr_count: 42
.sgpr_spill_count: 0
.symbol: _Z9HL_kernelPhS_S_S_jj.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 21
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // Include packages and also CUDA packages
#include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include <hip/hip_runtime.h>
// Result from last compute of world.
extern unsigned char *g_resultData;
// Current state of world.
extern unsigned char *g_data;
// ----- SAVE RECEIVING ROWS FROM OTHER GPUS ----- //
// "Above" row
extern unsigned char *g_aboveRow;
// "Below" row
extern unsigned char *g_belowRow;
// "Above" row
extern unsigned char *g_resultAboveRow;
// "Below" row
extern unsigned char *g_resultBelowRow;
// ----- DECLARE KERNEL ----- //
__global__ void HL_kernel(unsigned int worldWidth, unsigned int worldHeight);
// Define number of Processors
int cudaDeviceCount;
hipError_t cE;
static inline void HL_initAllZeros(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
}
static inline void HL_initAllOnes(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
int i;
// set all rows of world to true
for( i = 0; i < total_world_size; i++)
{
g_data[i] = 1;
// Set above and below rows
if (i < worldWidth){
g_aboveRow[i] = 1;
g_belowRow[i] = 1;
}
}
}
static inline void HL_initOnesInMiddle(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
int i;
for(i = worldWidth * (worldHeight - 1) + 128; i < worldWidth * (worldHeight - 1) + 139; i++){
g_data[i] = 1;
}
}
static inline void HL_initOnesAtCorners(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
if(myrank == 0){
g_data[0] = 1; // upper left
g_data[worldWidth-1]=1; // upper right
g_aboveRow[0] = 1; // upper left
g_aboveRow[worldWidth-1]=1; // upper right
}
if(myrank == cudaDeviceCount - 1){
g_data[(worldHeight * (worldWidth-1))]=1; // lower left
g_data[(worldHeight * (worldWidth-1)) + worldWidth-1]=1; // lower right
g_belowRow[0] = 1;
g_belowRow[worldWidth - 1] = 1;
}
}
static inline void HL_initSpinnerAtCorner(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
if( myrank == 0 ){
g_data[0] = 1; // upper left
g_data[1] = 1; // upper left +1
g_data[worldWidth-1]=1; // upper right
g_aboveRow[0] = 1; // upper left
g_aboveRow[1] = 1; // upper left +1
g_aboveRow[worldWidth-1]=1; // upper right
}
}
static inline void HL_initReplicator(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
size_t total_world_size = worldWidth * worldHeight;
// Initialize the data
hipMallocManaged(&g_data, (total_world_size * sizeof(unsigned char)));
hipMemset(g_data, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the resulting data
hipMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char)));
hipMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char)));
// Initialize the above row
hipMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char)));
// Initialize the below row
hipMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char)));
hipMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char)));
size_t x, y;
x = worldWidth/2;
y = worldHeight/2;
g_data[x + y*worldWidth + 1] = 1;
g_data[x + y*worldWidth + 2] = 1;
g_data[x + y*worldWidth + 3] = 1;
g_data[x + (y+1)*worldWidth] = 1;
g_data[x + (y+2)*worldWidth] = 1;
g_data[x + (y+3)*worldWidth] = 1;
}
// ---------- EXPORT TO APPROPRIATE COMPILER ---------- //
extern "C" void HL_initMaster( unsigned int pattern, size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount )
{
// INITIALIZE THE CUDA WORLD
if( (cE = hipGetDeviceCount( &cudaDeviceCount)) != hipSuccess )
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount );
exit(-1);
}
if( (cE = hipSetDevice( myrank % cudaDeviceCount )) != hipSuccess )
{
printf(" Unable to have myrank %d set to cuda device %d, error is %d \n", myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
// INITIALIZE THE PATTERN
switch(pattern)
{
case 0:
HL_initAllZeros( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 1:
HL_initAllOnes( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 2:
HL_initOnesInMiddle( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 3:
HL_initOnesAtCorners( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 4:
HL_initSpinnerAtCorner( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
case 5:
HL_initReplicator( worldWidth, worldHeight, myrank, cudaDeviceCount );
break;
default:
printf("Pattern %u has not been implemented \n", pattern);
exit(-1);
}
}
// MAIN KERNEL FUNCTION THAT DOES ALL OF THE WORK
__global__ void HL_kernel( unsigned char* d_data, unsigned char* d_resultData, unsigned char* d_aboveRow, unsigned char* d_belowRow, unsigned int worldWidth, unsigned int worldHeight){
// Store index value
size_t index;
// Loop over the threads
for(index = blockIdx.x * blockDim.x + threadIdx.x; index < worldWidth*worldHeight; index += blockDim.x * gridDim.x){
// Allocate space
int y0 = ((index + worldHeight - 1) % worldHeight) * worldWidth;
int y1 = index * worldWidth;
int y2 = ((index + 1) % worldHeight) * worldWidth;
// Get the current block and thread
int x;
// Loop over corresponding COLUMNS
for (x = 0; x < worldWidth; ++x){
// Set current column, left column, and right column
int x1 = x;
int x0 = (x1 + worldWidth - 1) % worldWidth;
int x2 = (x1 + 1) % worldWidth;
// Get the status of the current cell to determine logic of life span
int is_alive = d_data[x1+y1];
// Count the number of alive neighbors
int num_alive = 0;
// Check above and below row cases
if (x1+y1 < worldWidth) {
num_alive = d_aboveRow[x0] + d_aboveRow[x1] + d_aboveRow[x2] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2];
}
else if (x1+y1 > worldWidth*worldHeight - worldWidth - 1) {
num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_belowRow[x0] + d_belowRow[x1] + d_belowRow[x2];
}
else {
num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2];
}
// Logic for updating values
if (is_alive == 1){
// Cell is alive!
if (num_alive < 2){
// Underpopulated
d_resultData[x1+y1] = 0;
}
else if (num_alive == 2 || num_alive == 3){
// Just the right amount of neighbors
d_resultData[x1+y1] = 1;
}
else {
// Overpopulated
d_resultData[x1+y1] = 0;
}
}
else {
// Cell is dead :(
if (num_alive == 3 || num_alive == 6) {
// #Resurrected
d_resultData[x1+y1] = 1;
}
else {
// We stay dead
d_resultData[x1+y1] = 0;
}
}// End logic for staying dead
} // End x loop
} // End loop over each thread
// ----- SWAP DATA IN ABOVE ROWS AND BELOW ROWS ----- //
int j;
for(j = 0; j < worldWidth; j++){
d_aboveRow[j] = d_resultData[j];
d_belowRow[j] = d_resultData[j + worldWidth*(worldHeight - 1)];
}
// Synchronize the threads?
__syncthreads();
}
// LAUNCH KERNEL FUNCTION
extern "C" void HL_kernelLaunch( unsigned char** d_data, unsigned char** d_resultData, unsigned char** d_aboveRow, unsigned char** d_belowRow, int block_count, int thread_count, unsigned int worldWidth, unsigned int worldHeight, int myrank){
// Call the kernel
HL_kernel<<<block_count,thread_count>>>(*d_data, *d_resultData, *d_aboveRow, *d_belowRow, worldWidth, worldHeight);
// Synchronize the CUDA devices
hipDeviceSynchronize();
}
// Free memory
extern "C" void freeCudaArrays(int myrank){
hipFree(g_data);
hipFree(g_resultData);
hipFree(g_aboveRow);
hipFree(g_belowRow);
} | .text
.file "highlifeCuda.hip"
.globl HL_initMaster # -- Begin function HL_initMaster
.p2align 4, 0x90
.type HL_initMaster,@function
HL_initMaster: # @HL_initMaster
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $16, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %ebp
movq %rdx, %r14
movq %rsi, %rbx
movl %edi, %r15d
movl %r8d, 12(%rsp)
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
movl %eax, cE(%rip)
movl 12(%rsp), %ecx
testl %eax, %eax
jne .LBB0_21
# %bb.1:
movl %ebp, %eax
cltd
idivl %ecx
movl %edx, %edi
callq hipSetDevice
movl %eax, cE(%rip)
testl %eax, %eax
jne .LBB0_22
# %bb.2:
cmpl $5, %r15d
ja .LBB0_18
# %bb.3:
movl %r15d, %eax
jmpq *.LJTI0_0(,%rax,8)
.LBB0_19:
imulq %rbx, %r14
movl $g_data, %edi
movq %r14, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_data(%rip), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl $g_resultData, %edi
movq %r14, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_resultData(%rip), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl $g_aboveRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_aboveRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movl $g_belowRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_belowRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
jmp .LBB0_20
.LBB0_15:
imulq %rbx, %r14
movl $g_data, %edi
movq %r14, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_data(%rip), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl $g_resultData, %edi
movq %r14, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_resultData(%rip), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl $g_aboveRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_aboveRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movl $g_belowRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_belowRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
testl %ebp, %ebp
jne .LBB0_20
# %bb.16:
movq g_data(%rip), %rax
movb $1, (%rax)
movq g_data(%rip), %rax
movb $1, 1(%rax)
movq g_data(%rip), %rax
movb $1, -1(%rax,%rbx)
movq g_aboveRow(%rip), %rax
movb $1, (%rax)
movq g_aboveRow(%rip), %rax
movb $1, 1(%rax)
movq g_aboveRow(%rip), %rax
movb $1, -1(%rax,%rbx)
jmp .LBB0_20
.LBB0_9:
movq %r14, %r15
imulq %rbx, %r15
movl $g_data, %edi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_data(%rip), %rdi
xorl %esi, %esi
movq %r15, %rdx
callq hipMemset
movl $g_resultData, %edi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_resultData(%rip), %rdi
xorl %esi, %esi
movq %r15, %rdx
callq hipMemset
movl $g_aboveRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_aboveRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movl $g_belowRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_belowRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
decq %r14
imulq %rbx, %r14
leaq 139(%r14), %rax
subl $-128, %r14d
movslq %r14d, %rcx
cmpq %rcx, %rax
jbe .LBB0_20
.p2align 4, 0x90
.LBB0_10: # %.lr.ph.i25
# =>This Inner Loop Header: Depth=1
movq g_data(%rip), %rdx
movb $1, (%rdx,%rcx)
incq %rcx
cmpq %rcx, %rax
jne .LBB0_10
jmp .LBB0_20
.LBB0_11:
movl 12(%rsp), %r12d
movq %r14, %r15
imulq %rbx, %r15
movl $g_data, %edi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_data(%rip), %rdi
xorl %esi, %esi
movq %r15, %rdx
callq hipMemset
movl $g_resultData, %edi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_resultData(%rip), %rdi
xorl %esi, %esi
movq %r15, %rdx
callq hipMemset
movl $g_aboveRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_aboveRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movl $g_belowRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_belowRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
testl %ebp, %ebp
jne .LBB0_13
# %bb.12:
movq g_data(%rip), %rax
movb $1, (%rax)
movq g_data(%rip), %rax
movb $1, -1(%rax,%rbx)
movq g_aboveRow(%rip), %rax
movb $1, (%rax)
movq g_aboveRow(%rip), %rax
movb $1, -1(%rax,%rbx)
.LBB0_13:
decl %r12d
cmpl %ebp, %r12d
jne .LBB0_20
# %bb.14:
movq g_data(%rip), %rax
leaq -1(%rbx), %rcx
imulq %r14, %rcx
movb $1, (%rax,%rcx)
addq g_data(%rip), %rcx
movb $1, -1(%rbx,%rcx)
movq g_belowRow(%rip), %rax
movb $1, (%rax)
movq g_belowRow(%rip), %rax
movb $1, -1(%rax,%rbx)
jmp .LBB0_20
.LBB0_4:
imulq %rbx, %r14
movl $g_data, %edi
movq %r14, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_data(%rip), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl $g_resultData, %edi
movq %r14, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_resultData(%rip), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl $g_aboveRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_aboveRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movl $g_belowRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_belowRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
testq %r14, %r14
je .LBB0_20
# %bb.5: # %.lr.ph.i.preheader
xorl %eax, %eax
jmp .LBB0_6
.p2align 4, 0x90
.LBB0_8: # in Loop: Header=BB0_6 Depth=1
incq %rax
cmpq %rax, %r14
je .LBB0_20
.LBB0_6: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movq g_data(%rip), %rcx
movb $1, (%rcx,%rax)
cmpq %rbx, %rax
jae .LBB0_8
# %bb.7: # in Loop: Header=BB0_6 Depth=1
movq g_aboveRow(%rip), %rcx
movb $1, (%rcx,%rax)
movq g_belowRow(%rip), %rcx
movb $1, (%rcx,%rax)
jmp .LBB0_8
.LBB0_17:
movq %r14, %r15
imulq %rbx, %r15
movl $g_data, %edi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_data(%rip), %rdi
xorl %esi, %esi
movq %r15, %rdx
callq hipMemset
movl $g_resultData, %edi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_resultData(%rip), %rdi
xorl %esi, %esi
movq %r15, %rdx
callq hipMemset
movl $g_aboveRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_aboveRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movl $g_belowRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_belowRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movq %rbx, %rax
shrq %rax
shrq %r14
movq g_data(%rip), %rcx
movq %r14, %rdx
imulq %rbx, %rdx
addq %rax, %rdx
movb $1, 1(%rcx,%rdx)
movq g_data(%rip), %rcx
movb $1, 2(%rcx,%rdx)
movq g_data(%rip), %rcx
movb $1, 3(%rcx,%rdx)
leaq 1(%r14), %rcx
imulq %rbx, %rcx
movq g_data(%rip), %rdx
addq %rax, %rdx
movb $1, (%rcx,%rdx)
leaq 2(%r14), %rcx
imulq %rbx, %rcx
movq g_data(%rip), %rdx
addq %rax, %rdx
movb $1, (%rcx,%rdx)
addq $3, %r14
imulq %rbx, %r14
addq g_data(%rip), %rax
movb $1, (%r14,%rax)
.LBB0_20: # %_ZL14HL_initAllOnesmmii.exit
addq $16, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_21:
.cfi_def_cfa_offset 64
movl $.L.str, %edi
movl %eax, %esi
movl %ecx, %edx
xorl %eax, %eax
callq printf
movl $-1, %edi
callq exit
.LBB0_22:
movl %eax, %ecx
movl %ebp, %eax
cltd
idivl 12(%rsp)
movl $.L.str.1, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
movl $-1, %edi
callq exit
.LBB0_18:
movl $.L.str.2, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
movl $-1, %edi
callq exit
.Lfunc_end0:
.size HL_initMaster, .Lfunc_end0-HL_initMaster
.cfi_endproc
.section .rodata,"a",@progbits
.p2align 3, 0x0
.LJTI0_0:
.quad .LBB0_19
.quad .LBB0_4
.quad .LBB0_9
.quad .LBB0_11
.quad .LBB0_15
.quad .LBB0_17
# -- End function
.text
.globl _Z24__device_stub__HL_kernelPhS_S_S_jj # -- Begin function _Z24__device_stub__HL_kernelPhS_S_S_jj
.p2align 4, 0x90
.type _Z24__device_stub__HL_kernelPhS_S_S_jj,@function
_Z24__device_stub__HL_kernelPhS_S_S_jj: # @_Z24__device_stub__HL_kernelPhS_S_S_jj
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9HL_kernelPhS_S_S_jj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z24__device_stub__HL_kernelPhS_S_S_jj, .Lfunc_end1-_Z24__device_stub__HL_kernelPhS_S_S_jj
.cfi_endproc
# -- End function
.globl HL_kernelLaunch # -- Begin function HL_kernelLaunch
.p2align 4, 0x90
.type HL_kernelLaunch,@function
HL_kernelLaunch: # @HL_kernelLaunch
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $152, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rcx, %rbx
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
movl %r8d, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl %r9d, %edx
orq %rax, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movl 200(%rsp), %eax
movl 192(%rsp), %ecx
movq (%r12), %rdx
movq (%r15), %rsi
movq (%r14), %rdi
movq (%rbx), %r8
movq %rdx, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdi, 72(%rsp)
movq %r8, 64(%rsp)
movl %ecx, 12(%rsp)
movl %eax, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9HL_kernelPhS_S_S_jj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
callq hipDeviceSynchronize
addq $152, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size HL_kernelLaunch, .Lfunc_end2-HL_kernelLaunch
.cfi_endproc
# -- End function
.globl freeCudaArrays # -- Begin function freeCudaArrays
.p2align 4, 0x90
.type freeCudaArrays,@function
freeCudaArrays: # @freeCudaArrays
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movq g_data(%rip), %rdi
callq hipFree
movq g_resultData(%rip), %rdi
callq hipFree
movq g_aboveRow(%rip), %rdi
callq hipFree
movq g_belowRow(%rip), %rdi
popq %rax
.cfi_def_cfa_offset 8
jmp hipFree # TAILCALL
.Lfunc_end3:
.size freeCudaArrays, .Lfunc_end3-freeCudaArrays
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9HL_kernelPhS_S_S_jj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type cudaDeviceCount,@object # @cudaDeviceCount
.bss
.globl cudaDeviceCount
.p2align 2, 0x0
cudaDeviceCount:
.long 0 # 0x0
.size cudaDeviceCount, 4
.type cE,@object # @cE
.globl cE
.p2align 2, 0x0
cE:
.long 0 # 0x0
.size cE, 4
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " Unable to determine cuda device count, error is %d, count is %d\n"
.size .L.str, 66
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " Unable to have myrank %d set to cuda device %d, error is %d \n"
.size .L.str.1, 63
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Pattern %u has not been implemented \n"
.size .L.str.2, 38
.type _Z9HL_kernelPhS_S_S_jj,@object # @_Z9HL_kernelPhS_S_S_jj
.section .rodata,"a",@progbits
.globl _Z9HL_kernelPhS_S_S_jj
.p2align 3, 0x0
_Z9HL_kernelPhS_S_S_jj:
.quad _Z24__device_stub__HL_kernelPhS_S_S_jj
.size _Z9HL_kernelPhS_S_S_jj, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9HL_kernelPhS_S_S_jj"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__HL_kernelPhS_S_S_jj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9HL_kernelPhS_S_S_jj
.addrsig_sym g_data
.addrsig_sym g_resultData
.addrsig_sym g_aboveRow
.addrsig_sym g_belowRow
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00190d5d_00000000-6_highlifeCuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2081:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2081:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string " Unable to determine cuda device count, error is %d, count is %d\n"
.align 8
.LC1:
.string " Unable to have myrank %d set to cuda device %d, error is %d \n"
.align 8
.LC2:
.string "Pattern %u has not been implemented \n"
.text
.globl HL_initMaster
.type HL_initMaster, @function
HL_initMaster:
.LFB2076:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $16, %rsp
.cfi_def_cfa_offset 64
movl %edi, %r12d
movq %rsi, %rbp
movq %rdx, %rbx
movl %ecx, %r13d
movl %r8d, 12(%rsp)
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl %eax, cE(%rip)
testl %eax, %eax
jne .L22
movl %r13d, %eax
cltd
idivl 12(%rsp)
movl %edx, %edi
call cudaSetDevice@PLT
movl %eax, %r8d
movl %eax, cE(%rip)
testl %eax, %eax
jne .L23
cmpl $5, %r12d
ja .L6
movl %r12d, %r12d
leaq .L8(%rip), %rdx
movslq (%rdx,%r12,4), %rax
addq %rdx, %rax
notrack jmp *%rax
.section .rodata
.align 4
.align 4
.L8:
.long .L13-.L8
.long .L12-.L8
.long .L11-.L8
.long .L10-.L8
.long .L9-.L8
.long .L7-.L8
.text
.L22:
movl 12(%rsp), %ecx
movl %eax, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.L23:
movl %r13d, %eax
cltd
idivl 12(%rsp)
movl %edx, %ecx
movl %r13d, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.L13:
imulq %rbp, %rbx
movl $1, %edx
movq %rbx, %rsi
leaq g_data(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbx, %rdx
movl $0, %esi
movq g_data(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbx, %rsi
leaq g_resultData(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbx, %rdx
movl $0, %esi
movq g_resultData(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_aboveRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_aboveRow(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_belowRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_belowRow(%rip), %rdi
call cudaMemset@PLT
.L3:
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
imulq %rbp, %rbx
movl $1, %edx
movq %rbx, %rsi
leaq g_data(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbx, %rdx
movl $0, %esi
movq g_data(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbx, %rsi
leaq g_resultData(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbx, %rdx
movl $0, %esi
movq g_resultData(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_aboveRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_aboveRow(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_belowRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_belowRow(%rip), %rdi
call cudaMemset@PLT
testq %rbx, %rbx
je .L3
movl $0, %eax
jmp .L16
.L15:
addq $1, %rax
cmpq %rax, %rbx
je .L3
.L16:
movq g_data(%rip), %rdx
movb $1, (%rdx,%rax)
cmpq %rbp, %rax
jnb .L15
movq g_aboveRow(%rip), %rdx
movb $1, (%rdx,%rax)
movq g_belowRow(%rip), %rdx
movb $1, (%rdx,%rax)
jmp .L15
.L11:
movq %rbp, %r12
imulq %rbx, %r12
movl $1, %edx
movq %r12, %rsi
leaq g_data(%rip), %rdi
call cudaMallocManaged@PLT
movq %r12, %rdx
movl $0, %esi
movq g_data(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %r12, %rsi
leaq g_resultData(%rip), %rdi
call cudaMallocManaged@PLT
movq %r12, %rdx
movl $0, %esi
movq g_resultData(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_aboveRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_aboveRow(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_belowRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_belowRow(%rip), %rdi
call cudaMemset@PLT
subq $1, %rbx
movl %ebx, %eax
imull %ebp, %eax
subl $-128, %eax
cltq
imulq %rbp, %rbx
addq $139, %rbx
cmpq %rbx, %rax
jnb .L3
.L17:
movq g_data(%rip), %rdx
movb $1, (%rdx,%rax)
addq $1, %rax
cmpq %rbx, %rax
jne .L17
jmp .L3
.L10:
movl 12(%rsp), %r14d
movq %rbp, %r12
imulq %rbx, %r12
movl $1, %edx
movq %r12, %rsi
leaq g_data(%rip), %rdi
call cudaMallocManaged@PLT
movq %r12, %rdx
movl $0, %esi
movq g_data(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %r12, %rsi
leaq g_resultData(%rip), %rdi
call cudaMallocManaged@PLT
movq %r12, %rdx
movl $0, %esi
movq g_resultData(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_aboveRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_aboveRow(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_belowRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_belowRow(%rip), %rdi
call cudaMemset@PLT
testl %r13d, %r13d
jne .L18
movq g_data(%rip), %rax
movb $1, (%rax)
movq g_data(%rip), %rax
movb $1, -1(%rax,%rbp)
movq g_aboveRow(%rip), %rax
movb $1, (%rax)
movq g_aboveRow(%rip), %rax
movb $1, -1(%rax,%rbp)
.L18:
subl $1, %r14d
cmpl %r14d, %r13d
jne .L3
leaq -1(%rbp), %rax
imulq %rax, %rbx
movq g_data(%rip), %rdx
movb $1, (%rdx,%rbx)
addq g_data(%rip), %rax
movb $1, (%rax,%rbx)
movq g_belowRow(%rip), %rax
movb $1, (%rax)
movq g_belowRow(%rip), %rax
movb $1, -1(%rax,%rbp)
jmp .L3
.L9:
imulq %rbp, %rbx
movl $1, %edx
movq %rbx, %rsi
leaq g_data(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbx, %rdx
movl $0, %esi
movq g_data(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbx, %rsi
leaq g_resultData(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbx, %rdx
movl $0, %esi
movq g_resultData(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_aboveRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_aboveRow(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_belowRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_belowRow(%rip), %rdi
call cudaMemset@PLT
testl %r13d, %r13d
jne .L3
movq g_data(%rip), %rax
movb $1, (%rax)
movq g_data(%rip), %rax
movb $1, 1(%rax)
movq g_data(%rip), %rax
movb $1, -1(%rax,%rbp)
movq g_aboveRow(%rip), %rax
movb $1, (%rax)
movq g_aboveRow(%rip), %rax
movb $1, 1(%rax)
movq g_aboveRow(%rip), %rax
movb $1, -1(%rax,%rbp)
jmp .L3
.L7:
movq %rbp, %r12
imulq %rbx, %r12
movl $1, %edx
movq %r12, %rsi
leaq g_data(%rip), %rdi
call cudaMallocManaged@PLT
movq %r12, %rdx
movl $0, %esi
movq g_data(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %r12, %rsi
leaq g_resultData(%rip), %rdi
call cudaMallocManaged@PLT
movq %r12, %rdx
movl $0, %esi
movq g_resultData(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_aboveRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_aboveRow(%rip), %rdi
call cudaMemset@PLT
movl $1, %edx
movq %rbp, %rsi
leaq g_belowRow(%rip), %rdi
call cudaMallocManaged@PLT
movq %rbp, %rdx
movl $0, %esi
movq g_belowRow(%rip), %rdi
call cudaMemset@PLT
movq %rbp, %rdx
shrq %rdx
shrq %rbx
imulq %rbp, %rbx
leaq (%rdx,%rbx), %rax
movq g_data(%rip), %rcx
movb $1, 1(%rcx,%rax)
movq g_data(%rip), %rcx
movb $1, 2(%rcx,%rax)
movq g_data(%rip), %rcx
movb $1, 3(%rcx,%rax)
leaq 0(%rbp,%rbx), %rax
movq %rdx, %rcx
addq g_data(%rip), %rcx
movb $1, (%rcx,%rax)
addq %rbp, %rax
movq %rdx, %rcx
addq g_data(%rip), %rcx
movb $1, (%rcx,%rax)
addq g_data(%rip), %rdx
addq %rax, %rdx
movb $1, (%rdx,%rbp)
jmp .L3
.L6:
movl %r12d, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2076:
.size HL_initMaster, .-HL_initMaster
.globl freeCudaArrays
.type freeCudaArrays, @function
freeCudaArrays:
.LFB2078:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq g_data(%rip), %rdi
call cudaFree@PLT
movq g_resultData(%rip), %rdi
call cudaFree@PLT
movq g_aboveRow(%rip), %rdi
call cudaFree@PLT
movq g_belowRow(%rip), %rdi
call cudaFree@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2078:
.size freeCudaArrays, .-freeCudaArrays
.globl _Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj
.type _Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj, @function
_Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj:
.LFB2103:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L30
.L26:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L31
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z9HL_kernelPhS_S_S_jj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L26
.L31:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2103:
.size _Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj, .-_Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj
.globl _Z9HL_kernelPhS_S_S_jj
.type _Z9HL_kernelPhS_S_S_jj, @function
_Z9HL_kernelPhS_S_S_jj:
.LFB2104:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2104:
.size _Z9HL_kernelPhS_S_S_jj, .-_Z9HL_kernelPhS_S_S_jj
.globl HL_kernelLaunch
.type HL_kernelLaunch, @function
HL_kernelLaunch:
.LFB2077:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbx
movq %rsi, %rbp
movq %rdx, %r12
movq %rcx, %r13
movl %r9d, 20(%rsp)
movl $1, 24(%rsp)
movl %r8d, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L37
.L35:
call cudaDeviceSynchronize@PLT
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
movq 0(%r13), %rcx
movq (%r12), %rdx
movq 0(%rbp), %rsi
movq (%rbx), %rdi
movl 88(%rsp), %r9d
movl 80(%rsp), %r8d
call _Z36__device_stub__Z9HL_kernelPhS_S_S_jjPhS_S_S_jj
jmp .L35
.cfi_endproc
.LFE2077:
.size HL_kernelLaunch, .-HL_kernelLaunch
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "_Z9HL_kernelPhS_S_S_jj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2106:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9HL_kernelPhS_S_S_jj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2106:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl cE
.bss
.align 4
.type cE, @object
.size cE, 4
cE:
.zero 4
.globl cudaDeviceCount
.align 4
.type cudaDeviceCount, @object
.size cudaDeviceCount, 4
cudaDeviceCount:
.zero 4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "highlifeCuda.hip"
.globl HL_initMaster # -- Begin function HL_initMaster
.p2align 4, 0x90
.type HL_initMaster,@function
HL_initMaster: # @HL_initMaster
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $16, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %ebp
movq %rdx, %r14
movq %rsi, %rbx
movl %edi, %r15d
movl %r8d, 12(%rsp)
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
movl %eax, cE(%rip)
movl 12(%rsp), %ecx
testl %eax, %eax
jne .LBB0_21
# %bb.1:
movl %ebp, %eax
cltd
idivl %ecx
movl %edx, %edi
callq hipSetDevice
movl %eax, cE(%rip)
testl %eax, %eax
jne .LBB0_22
# %bb.2:
cmpl $5, %r15d
ja .LBB0_18
# %bb.3:
movl %r15d, %eax
jmpq *.LJTI0_0(,%rax,8)
.LBB0_19:
imulq %rbx, %r14
movl $g_data, %edi
movq %r14, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_data(%rip), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl $g_resultData, %edi
movq %r14, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_resultData(%rip), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl $g_aboveRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_aboveRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movl $g_belowRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_belowRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
jmp .LBB0_20
.LBB0_15:
imulq %rbx, %r14
movl $g_data, %edi
movq %r14, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_data(%rip), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl $g_resultData, %edi
movq %r14, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_resultData(%rip), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl $g_aboveRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_aboveRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movl $g_belowRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_belowRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
testl %ebp, %ebp
jne .LBB0_20
# %bb.16:
movq g_data(%rip), %rax
movb $1, (%rax)
movq g_data(%rip), %rax
movb $1, 1(%rax)
movq g_data(%rip), %rax
movb $1, -1(%rax,%rbx)
movq g_aboveRow(%rip), %rax
movb $1, (%rax)
movq g_aboveRow(%rip), %rax
movb $1, 1(%rax)
movq g_aboveRow(%rip), %rax
movb $1, -1(%rax,%rbx)
jmp .LBB0_20
.LBB0_9:
movq %r14, %r15
imulq %rbx, %r15
movl $g_data, %edi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_data(%rip), %rdi
xorl %esi, %esi
movq %r15, %rdx
callq hipMemset
movl $g_resultData, %edi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_resultData(%rip), %rdi
xorl %esi, %esi
movq %r15, %rdx
callq hipMemset
movl $g_aboveRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_aboveRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movl $g_belowRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_belowRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
decq %r14
imulq %rbx, %r14
leaq 139(%r14), %rax
subl $-128, %r14d
movslq %r14d, %rcx
cmpq %rcx, %rax
jbe .LBB0_20
.p2align 4, 0x90
.LBB0_10: # %.lr.ph.i25
# =>This Inner Loop Header: Depth=1
movq g_data(%rip), %rdx
movb $1, (%rdx,%rcx)
incq %rcx
cmpq %rcx, %rax
jne .LBB0_10
jmp .LBB0_20
.LBB0_11:
movl 12(%rsp), %r12d
movq %r14, %r15
imulq %rbx, %r15
movl $g_data, %edi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_data(%rip), %rdi
xorl %esi, %esi
movq %r15, %rdx
callq hipMemset
movl $g_resultData, %edi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_resultData(%rip), %rdi
xorl %esi, %esi
movq %r15, %rdx
callq hipMemset
movl $g_aboveRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_aboveRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movl $g_belowRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_belowRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
testl %ebp, %ebp
jne .LBB0_13
# %bb.12:
movq g_data(%rip), %rax
movb $1, (%rax)
movq g_data(%rip), %rax
movb $1, -1(%rax,%rbx)
movq g_aboveRow(%rip), %rax
movb $1, (%rax)
movq g_aboveRow(%rip), %rax
movb $1, -1(%rax,%rbx)
.LBB0_13:
decl %r12d
cmpl %ebp, %r12d
jne .LBB0_20
# %bb.14:
movq g_data(%rip), %rax
leaq -1(%rbx), %rcx
imulq %r14, %rcx
movb $1, (%rax,%rcx)
addq g_data(%rip), %rcx
movb $1, -1(%rbx,%rcx)
movq g_belowRow(%rip), %rax
movb $1, (%rax)
movq g_belowRow(%rip), %rax
movb $1, -1(%rax,%rbx)
jmp .LBB0_20
.LBB0_4:
imulq %rbx, %r14
movl $g_data, %edi
movq %r14, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_data(%rip), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl $g_resultData, %edi
movq %r14, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_resultData(%rip), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl $g_aboveRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_aboveRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movl $g_belowRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_belowRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
testq %r14, %r14
je .LBB0_20
# %bb.5: # %.lr.ph.i.preheader
xorl %eax, %eax
jmp .LBB0_6
.p2align 4, 0x90
.LBB0_8: # in Loop: Header=BB0_6 Depth=1
incq %rax
cmpq %rax, %r14
je .LBB0_20
.LBB0_6: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movq g_data(%rip), %rcx
movb $1, (%rcx,%rax)
cmpq %rbx, %rax
jae .LBB0_8
# %bb.7: # in Loop: Header=BB0_6 Depth=1
movq g_aboveRow(%rip), %rcx
movb $1, (%rcx,%rax)
movq g_belowRow(%rip), %rcx
movb $1, (%rcx,%rax)
jmp .LBB0_8
.LBB0_17:
movq %r14, %r15
imulq %rbx, %r15
movl $g_data, %edi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_data(%rip), %rdi
xorl %esi, %esi
movq %r15, %rdx
callq hipMemset
movl $g_resultData, %edi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_resultData(%rip), %rdi
xorl %esi, %esi
movq %r15, %rdx
callq hipMemset
movl $g_aboveRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_aboveRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movl $g_belowRow, %edi
movq %rbx, %rsi
movl $1, %edx
callq hipMallocManaged
movq g_belowRow(%rip), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq hipMemset
movq %rbx, %rax
shrq %rax
shrq %r14
movq g_data(%rip), %rcx
movq %r14, %rdx
imulq %rbx, %rdx
addq %rax, %rdx
movb $1, 1(%rcx,%rdx)
movq g_data(%rip), %rcx
movb $1, 2(%rcx,%rdx)
movq g_data(%rip), %rcx
movb $1, 3(%rcx,%rdx)
leaq 1(%r14), %rcx
imulq %rbx, %rcx
movq g_data(%rip), %rdx
addq %rax, %rdx
movb $1, (%rcx,%rdx)
leaq 2(%r14), %rcx
imulq %rbx, %rcx
movq g_data(%rip), %rdx
addq %rax, %rdx
movb $1, (%rcx,%rdx)
addq $3, %r14
imulq %rbx, %r14
addq g_data(%rip), %rax
movb $1, (%r14,%rax)
.LBB0_20: # %_ZL14HL_initAllOnesmmii.exit
addq $16, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_21:
.cfi_def_cfa_offset 64
movl $.L.str, %edi
movl %eax, %esi
movl %ecx, %edx
xorl %eax, %eax
callq printf
movl $-1, %edi
callq exit
.LBB0_22:
movl %eax, %ecx
movl %ebp, %eax
cltd
idivl 12(%rsp)
movl $.L.str.1, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
movl $-1, %edi
callq exit
.LBB0_18:
movl $.L.str.2, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
movl $-1, %edi
callq exit
.Lfunc_end0:
.size HL_initMaster, .Lfunc_end0-HL_initMaster
.cfi_endproc
.section .rodata,"a",@progbits
.p2align 3, 0x0
.LJTI0_0:
.quad .LBB0_19
.quad .LBB0_4
.quad .LBB0_9
.quad .LBB0_11
.quad .LBB0_15
.quad .LBB0_17
# -- End function
.text
.globl _Z24__device_stub__HL_kernelPhS_S_S_jj # -- Begin function _Z24__device_stub__HL_kernelPhS_S_S_jj
.p2align 4, 0x90
.type _Z24__device_stub__HL_kernelPhS_S_S_jj,@function
_Z24__device_stub__HL_kernelPhS_S_S_jj: # @_Z24__device_stub__HL_kernelPhS_S_S_jj
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9HL_kernelPhS_S_S_jj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z24__device_stub__HL_kernelPhS_S_S_jj, .Lfunc_end1-_Z24__device_stub__HL_kernelPhS_S_S_jj
.cfi_endproc
# -- End function
.globl HL_kernelLaunch # -- Begin function HL_kernelLaunch
.p2align 4, 0x90
.type HL_kernelLaunch,@function
HL_kernelLaunch: # @HL_kernelLaunch
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $152, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rcx, %rbx
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
movl %r8d, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl %r9d, %edx
orq %rax, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movl 200(%rsp), %eax
movl 192(%rsp), %ecx
movq (%r12), %rdx
movq (%r15), %rsi
movq (%r14), %rdi
movq (%rbx), %r8
movq %rdx, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdi, 72(%rsp)
movq %r8, 64(%rsp)
movl %ecx, 12(%rsp)
movl %eax, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9HL_kernelPhS_S_S_jj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
callq hipDeviceSynchronize
addq $152, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size HL_kernelLaunch, .Lfunc_end2-HL_kernelLaunch
.cfi_endproc
# -- End function
.globl freeCudaArrays # -- Begin function freeCudaArrays
.p2align 4, 0x90
.type freeCudaArrays,@function
freeCudaArrays: # @freeCudaArrays
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movq g_data(%rip), %rdi
callq hipFree
movq g_resultData(%rip), %rdi
callq hipFree
movq g_aboveRow(%rip), %rdi
callq hipFree
movq g_belowRow(%rip), %rdi
popq %rax
.cfi_def_cfa_offset 8
jmp hipFree # TAILCALL
.Lfunc_end3:
.size freeCudaArrays, .Lfunc_end3-freeCudaArrays
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9HL_kernelPhS_S_S_jj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type cudaDeviceCount,@object # @cudaDeviceCount
.bss
.globl cudaDeviceCount
.p2align 2, 0x0
cudaDeviceCount:
.long 0 # 0x0
.size cudaDeviceCount, 4
.type cE,@object # @cE
.globl cE
.p2align 2, 0x0
cE:
.long 0 # 0x0
.size cE, 4
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " Unable to determine cuda device count, error is %d, count is %d\n"
.size .L.str, 66
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " Unable to have myrank %d set to cuda device %d, error is %d \n"
.size .L.str.1, 63
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Pattern %u has not been implemented \n"
.size .L.str.2, 38
.type _Z9HL_kernelPhS_S_S_jj,@object # @_Z9HL_kernelPhS_S_S_jj
.section .rodata,"a",@progbits
.globl _Z9HL_kernelPhS_S_S_jj
.p2align 3, 0x0
_Z9HL_kernelPhS_S_S_jj:
.quad _Z24__device_stub__HL_kernelPhS_S_S_jj
.size _Z9HL_kernelPhS_S_S_jj, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9HL_kernelPhS_S_S_jj"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__HL_kernelPhS_S_S_jj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9HL_kernelPhS_S_S_jj
.addrsig_sym g_data
.addrsig_sym g_resultData
.addrsig_sym g_aboveRow
.addrsig_sym g_belowRow
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #define REORDER 0
#define GOOD_WEATHER 0
#define BAD_WEATHER 1
#define TAG_Car 0
#define TAG_Pedestrian 1
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//#include <random>
//#include <array>
#include <algorithm>
#define NUM_CARS 4096
#define NUM_PEDS 16384
#define NUM_STREETS 500
#define MAX_CONNECTIONS 10
#define MAX_LEN 25
using namespace std;
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
typedef struct
{
float progress;
int street;
float max_velocity;
int tag;
int a1;
int a2;
int a3;
int a4;
int a5;
int a6;
int a7;
int a8;
int a9;
int a10;
// TODO: add more fields here to avoid cache locality
} struct_Actor;
typedef struct
{
float length;
float max_velocity;
int neighbor_array_index;
int s1;
int s2;
int s3;
int s4;
int s5;
} struct_Street;
typedef struct
{
int tag;
int id;
} tag_id_pair;
__device__ struct_Actor *d_Actors;
__device__ struct_Street *d_Streets;
__device__ int *d_Array_Street_arrays;
__device__ int *d_Array_Street_offset;
__device__ int *d_Array_Street_size;
__device__ int *d_input_actor_id;
__device__ int *d_jobs;
__device__ int *d_randomn;
__device__ void method_Car_move(int car_id, int weather)
{
float weather_multiplier;
if (weather == GOOD_WEATHER)
{
weather_multiplier = 1.0;
}
else if (weather == BAD_WEATHER)
{
weather_multiplier = 0.75;
}
float speed = min(d_Actors[car_id].max_velocity, d_Streets[d_Actors[car_id].street].max_velocity) * weather_multiplier;
d_Actors[car_id].progress = d_Actors[car_id].progress + (speed / 60.0); /* 1 tick = 1 minute */
if (d_Actors[car_id].progress >= d_Streets[d_Actors[car_id].street].length)
{
// move to different street
int array_id = d_Streets[d_Actors[car_id].street].neighbor_array_index;
int neighbor_index = d_randomn[d_Actors[car_id].street] % d_Array_Street_size[array_id];
d_Actors[car_id].street = d_Array_Street_arrays[d_Array_Street_offset[array_id] + neighbor_index];
d_Actors[car_id].progress = 0.0f;
}
}
__device__ void method_Pedestrian_move(int ped_id, int weather)
{
float speed = d_randomn[((int) (d_Actors[ped_id].progress*d_Actors[ped_id].progress)) % NUM_STREETS] % 7 - 2;
d_Actors[ped_id].progress = d_Actors[ped_id].progress + (speed / 60.0);
if (d_Actors[ped_id].progress >= d_Streets[d_Actors[ped_id].street].length)
{
// move to different street
int array_id = d_Streets[d_Actors[ped_id].street].neighbor_array_index;
int neighbor_index = d_randomn[d_Actors[ped_id].street] % d_Array_Street_size[array_id];
d_Actors[ped_id].street = d_Array_Street_arrays[d_Array_Street_offset[array_id] + neighbor_index];
d_Actors[ped_id].progress = 0.0f;
}
}
__device__ void block(int actor_id, int weather, int ticks)
{
for (int i = 0; i < ticks; i++)
{
switch (d_Actors[actor_id].tag)
{
case TAG_Car:
method_Car_move(actor_id, weather);
break;
case TAG_Pedestrian:
method_Pedestrian_move(actor_id, weather);
break;
}
}
}
__global__ void kernel(int weather, int ticks,
struct_Actor *v_d_Actors, struct_Street *v_d_Streets,
int *v_d_Array_Street_size, int *v_d_Array_Street_offset, int *v_d_Array_Street_arrays,
int *v_d_input_actor_id, int *v_d_jobs, int *v_d_randomn)
{
d_Actors = v_d_Actors;
d_Streets = v_d_Streets;
d_Array_Street_size = v_d_Array_Street_size;
d_Array_Street_offset = v_d_Array_Street_offset;
d_Array_Street_arrays = v_d_Array_Street_arrays;
d_input_actor_id = v_d_input_actor_id;
d_jobs = v_d_jobs;
d_randomn = v_d_randomn;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
#if (REORDER)
block(d_input_actor_id[d_jobs[tid]], weather, ticks);
#else
block(d_input_actor_id[tid], weather, ticks);
#endif
}
int main()
{
printf("Setting up scenario...\n");
srand(42);
// streets
float *Street_length = new float[NUM_STREETS];
float *Street_max_velocity = new float[NUM_STREETS];
int *Street_neighbors = new int[NUM_STREETS];
for (int i = 0; i < NUM_STREETS; i++)
{
Street_length[i] = rand() % MAX_LEN + 1;
Street_max_velocity[i] = rand() % 40 + 45; /* speed between 45 and 105 */
Street_neighbors[i] = i;
}
// neighbors
int *Array_Street_offset = new int[NUM_STREETS];
int *Array_Street_size = new int[NUM_STREETS];
int num_connections = 0;
for (int i = 0; i < NUM_STREETS; i++)
{
Array_Street_offset[i] = num_connections;
int connections = rand() % MAX_CONNECTIONS + 1;
Array_Street_size[i] = connections;
num_connections += connections;
}
int *Array_Street_arrays = new int[num_connections];
for (int i = 0; i < num_connections; i++)
{
Array_Street_arrays[i] = rand() % NUM_STREETS;
}
// actors
int *Actor_street = new int[NUM_PEDS + NUM_CARS];
float *Actor_progress = new float[NUM_PEDS + NUM_CARS];
float *Car_max_velocity = new float[NUM_CARS + NUM_PEDS];
int *Actor_id = new int[NUM_PEDS + NUM_CARS];
for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
{
Actor_street[i] = rand() % NUM_STREETS;
Actor_progress[i] = rand() % 10;
Car_max_velocity[i] = rand() % 20 + 65;
}
for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
{
Actor_id[i] = i;
}
// jobs (dummy)
int *jobs = new int[NUM_PEDS + NUM_CARS];
for (int i = 0; i < NUM_CARS + NUM_PEDS; i++)
{
jobs[i] = i;
}
// random numbers
int *randomn = new int[NUM_STREETS];
for (int i = 0; i < NUM_STREETS; i++)
{
// TODO: real random
randomn[i] = rand() % NUM_STREETS;
}
printf("Scenario set up.\n");
printf("Converting data to row format...\n");
struct_Actor *actors = new struct_Actor[NUM_CARS + NUM_PEDS];
struct_Street *streets = new struct_Street[NUM_STREETS];
for (int i = 0; i < NUM_PEDS; i++)
{
actors[i].progress = Actor_progress[i];
actors[i].street = Actor_street[i];
actors[i].tag = TAG_Pedestrian;
}
for (int i = NUM_PEDS; i < NUM_CARS + NUM_PEDS; i++)
{
actors[i].progress = Actor_progress[i];
actors[i].street = Actor_street[i];
actors[i].max_velocity = Car_max_velocity[i];
actors[i].tag = TAG_Car;
}
for (int i = 0; i < NUM_STREETS; i++)
{
streets[i].length = Street_length[i];
streets[i].max_velocity = Street_max_velocity[i];
streets[i].neighbor_array_index = Street_neighbors[i];
}
std::srand(42);
#if !(REORDER)
random_shuffle(actors, actors + NUM_CARS + NUM_PEDS);
#endif
printf("Done converting data.\n");
printf("Copying data to GPU...\n");
struct_Actor *v_d_Actors;
struct_Street *v_d_Streets;
int *v_d_Array_Street_size;
int *v_d_Array_Street_offset;
int *v_d_Array_Street_arrays;
int *v_d_input_actor_tag;
int *v_d_input_actor_id;
int *v_d_jobs;
int *v_d_randomn;
CudaSafeCall(cudaMalloc((void**) &v_d_Actors, sizeof(struct_Actor) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(cudaMalloc((void**) &v_d_Streets, sizeof(struct_Street) * NUM_STREETS));
CudaSafeCall(cudaMalloc((void**) &v_d_Array_Street_size, sizeof(int) * NUM_STREETS));
CudaSafeCall(cudaMalloc((void**) &v_d_Array_Street_offset, sizeof(int) * NUM_STREETS));
CudaSafeCall(cudaMalloc((void**) &v_d_Array_Street_arrays, sizeof(int) * num_connections));
CudaSafeCall(cudaMalloc((void**) &v_d_input_actor_tag, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(cudaMalloc((void**) &v_d_input_actor_id, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(cudaMalloc((void**) &v_d_jobs, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(cudaMalloc((void**) &v_d_randomn, sizeof(int) * NUM_STREETS));
CudaSafeCall(cudaMemcpy(v_d_Actors, &actors[0], sizeof(struct_Actor) * (NUM_CARS + NUM_PEDS), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_Streets, &streets[0], sizeof(struct_Street) * NUM_STREETS, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_Array_Street_size, &Array_Street_size[0], sizeof(int) * NUM_STREETS, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_Array_Street_offset, &Array_Street_offset[0], sizeof(int) * NUM_STREETS, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_Array_Street_arrays, &Array_Street_arrays[0], sizeof(int) * num_connections, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_input_actor_id, &Actor_id[0], sizeof(int) * (NUM_PEDS + NUM_CARS), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_jobs, &jobs[0], sizeof(int) * (NUM_PEDS + NUM_CARS), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_randomn, &randomn[0], sizeof(int) * NUM_STREETS, cudaMemcpyHostToDevice));
printf("Finished copying data.\n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("Launching kernel...\n");
cudaEventRecord(start);
kernel<<<dim3(32), dim3((NUM_PEDS + NUM_CARS) / 32)>>>(GOOD_WEATHER, 1000000,
v_d_Actors, v_d_Streets,
v_d_Array_Street_size, v_d_Array_Street_offset, v_d_Array_Street_arrays,
v_d_input_actor_id, v_d_jobs, v_d_randomn);
CudaCheckError();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
CudaCheckError();
printf("Kernel finished.\n");
// cudaMemcpy(Actor_progress, v_d_Actor_progress, sizeof(float) * (NUM_PEDS + NUM_CARS), cudaMemcpyDeviceToHost);
// for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
// {
// printf(" %f ", Actor_progress[i]);
// }
// cudaMemcpy(Actor_street, v_d_Actor_street, sizeof(int) * (NUM_PEDS + NUM_CARS), cudaMemcpyDeviceToHost);
// for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
// {
// printf(" %i ", Actor_street[i]);
// }
printf("\n\n\nElapsed time millis: %f\n", milliseconds);
} | .file "tmpxft_000713ff_00000000-6_benchmark_row_nocache.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2345:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2345:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata._Z14__cudaSafeCall9cudaErrorPKci.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "cudaSafeCall() failed at %s:%i : %s\n"
.section .text._Z14__cudaSafeCall9cudaErrorPKci,"axG",@progbits,_Z14__cudaSafeCall9cudaErrorPKci,comdat
.weak _Z14__cudaSafeCall9cudaErrorPKci
.type _Z14__cudaSafeCall9cudaErrorPKci, @function
_Z14__cudaSafeCall9cudaErrorPKci:
.LFB2337:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rsi, %rbx
movl %edx, %ebp
call cudaGetErrorString@PLT
movq %rax, %r9
movl %ebp, %r8d
movq %rbx, %rcx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2337:
.size _Z14__cudaSafeCall9cudaErrorPKci, .-_Z14__cudaSafeCall9cudaErrorPKci
.section .rodata._Z16__cudaCheckErrorPKci.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "cudaCheckError() failed at %s:%i : %s\n"
.align 8
.LC2:
.string "cudaCheckError() with sync failed at %s:%i : %s\n"
.section .text._Z16__cudaCheckErrorPKci,"axG",@progbits,_Z16__cudaCheckErrorPKci,comdat
.weak _Z16__cudaCheckErrorPKci
.type _Z16__cudaCheckErrorPKci, @function
_Z16__cudaCheckErrorPKci:
.LFB2338:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movl %esi, %ebp
call cudaGetLastError@PLT
testl %eax, %eax
jne .L13
call cudaDeviceSynchronize@PLT
testl %eax, %eax
jne .L14
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl %ebp, %r8d
movq %rbx, %rcx
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L14:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl %ebp, %r8d
movq %rbx, %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2338:
.size _Z16__cudaCheckErrorPKci, .-_Z16__cudaCheckErrorPKci
.text
.globl _Z15method_Car_moveii
.type _Z15method_Car_moveii, @function
_Z15method_Car_moveii:
.LFB2339:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2339:
.size _Z15method_Car_moveii, .-_Z15method_Car_moveii
.globl _Z22method_Pedestrian_moveii
.type _Z22method_Pedestrian_moveii, @function
_Z22method_Pedestrian_moveii:
.LFB2340:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2340:
.size _Z22method_Pedestrian_moveii, .-_Z22method_Pedestrian_moveii
.globl _Z5blockiii
.type _Z5blockiii, @function
_Z5blockiii:
.LFB2341:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2341:
.size _Z5blockiii, .-_Z5blockiii
.globl _Z73__device_stub__Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_iiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.type _Z73__device_stub__Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_iiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_, @function
_Z73__device_stub__Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_iiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_:
.LFB2367:
.cfi_startproc
endbr64
subq $248, %rsp
.cfi_def_cfa_offset 256
movl %edi, 76(%rsp)
movl %esi, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 56(%rsp)
movq %r8, 48(%rsp)
movq %r9, 40(%rsp)
movq 256(%rsp), %rax
movq %rax, 32(%rsp)
movq 264(%rsp), %rax
movq %rax, 24(%rsp)
movq 272(%rsp), %rax
movq %rax, 16(%rsp)
movq 280(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 232(%rsp)
xorl %eax, %eax
leaq 76(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rax
movq %rax, 152(%rsp)
leaq 64(%rsp), %rax
movq %rax, 160(%rsp)
leaq 56(%rsp), %rax
movq %rax, 168(%rsp)
leaq 48(%rsp), %rax
movq %rax, 176(%rsp)
leaq 40(%rsp), %rax
movq %rax, 184(%rsp)
leaq 32(%rsp), %rax
movq %rax, 192(%rsp)
leaq 24(%rsp), %rax
movq %rax, 200(%rsp)
leaq 16(%rsp), %rax
movq %rax, 208(%rsp)
leaq 8(%rsp), %rax
movq %rax, 216(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
leaq 88(%rsp), %rcx
leaq 80(%rsp), %rdx
leaq 108(%rsp), %rsi
leaq 96(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $248, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
pushq 88(%rsp)
.cfi_def_cfa_offset 264
pushq 88(%rsp)
.cfi_def_cfa_offset 272
leaq 160(%rsp), %r9
movq 124(%rsp), %rcx
movl 132(%rsp), %r8d
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
leaq _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 256
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2367:
.size _Z73__device_stub__Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_iiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_, .-_Z73__device_stub__Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_iiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.globl _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.type _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_, @function
_Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_:
.LFB2368:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 40(%rsp)
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
pushq 40(%rsp)
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z73__device_stub__Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_iiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2368:
.size _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_, .-_Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "_Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_"
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "d_Actors"
.LC5:
.string "d_Streets"
.LC6:
.string "d_Array_Street_arrays"
.LC7:
.string "d_Array_Street_offset"
.LC8:
.string "d_Array_Street_size"
.LC9:
.string "d_input_actor_id"
.LC10:
.string "d_jobs"
.LC11:
.string "d_randomn"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2370:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _ZL8d_Actors(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _ZL9d_Streets(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21d_Array_Street_arrays(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21d_Array_Street_offset(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL19d_Array_Street_size(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16d_input_actor_id(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL6d_jobs(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL9d_randomn(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2370:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZSt14random_shuffleIP12struct_ActorEvT_S2_,"axG",@progbits,_ZSt14random_shuffleIP12struct_ActorEvT_S2_,comdat
.weak _ZSt14random_shuffleIP12struct_ActorEvT_S2_
.type _ZSt14random_shuffleIP12struct_ActorEvT_S2_, @function
_ZSt14random_shuffleIP12struct_ActorEvT_S2_:
.LFB2416:
.cfi_startproc
endbr64
cmpq %rsi, %rdi
je .L37
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
leaq 56(%rdi), %rbx
cmpq %rbx, %rsi
je .L31
movq %rsi, (%rsp)
jmp .L34
.L33:
addq $56, %rbx
cmpq %rbx, (%rsp)
je .L31
.L34:
call rand@PLT
cltq
movq %rbx, %rcx
subq %rbp, %rcx
sarq $3, %rcx
movabsq $7905747460161236407, %rsi
imulq %rsi, %rcx
addq $1, %rcx
cqto
idivq %rcx
leaq 0(,%rdx,8), %rax
subq %rdx, %rax
leaq 0(%rbp,%rax,8), %rax
cmpq %rbx, %rax
je .L33
movss (%rbx), %xmm1
movl 4(%rbx), %r15d
movss 8(%rbx), %xmm0
movl 12(%rbx), %r14d
movl 16(%rbx), %r13d
movl 20(%rbx), %r11d
movl 24(%rbx), %r10d
movl 28(%rbx), %r9d
movl 32(%rbx), %r8d
movl 36(%rbx), %edi
movl 40(%rbx), %esi
movl %esi, 12(%rsp)
movl 44(%rbx), %esi
movl 48(%rbx), %ecx
movl 52(%rbx), %edx
movdqu (%rax), %xmm2
movups %xmm2, (%rbx)
movdqu 16(%rax), %xmm3
movups %xmm3, 16(%rbx)
movdqu 32(%rax), %xmm4
movups %xmm4, 32(%rbx)
movq 48(%rax), %r12
movq %r12, 48(%rbx)
movss %xmm1, (%rax)
movl %r15d, 4(%rax)
movss %xmm0, 8(%rax)
movl %r14d, 12(%rax)
movl %r13d, 16(%rax)
movl %r11d, 20(%rax)
movl %r10d, 24(%rax)
movl %r9d, 28(%rax)
movl %r8d, 32(%rax)
movl %edi, 36(%rax)
movl 12(%rsp), %edi
movl %edi, 40(%rax)
movl %esi, 44(%rax)
movl %ecx, 48(%rax)
movl %edx, 52(%rax)
jmp .L33
.L31:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2416:
.size _ZSt14random_shuffleIP12struct_ActorEvT_S2_, .-_ZSt14random_shuffleIP12struct_ActorEvT_S2_
.section .rodata.str1.1
.LC12:
.string "Setting up scenario...\n"
.LC13:
.string "Scenario set up.\n"
.section .rodata.str1.8
.align 8
.LC14:
.string "Converting data to row format...\n"
.section .rodata.str1.1
.LC15:
.string "Done converting data.\n"
.LC16:
.string "Copying data to GPU...\n"
.section .rodata.str1.8
.align 8
.LC17:
.string "/home/ubuntu/Datasets/stackv2/train-structured/prg-titech/array2016-paper/master/benchmarks/benchmark_row_nocache.cu"
.section .rodata.str1.1
.LC18:
.string "Finished copying data.\n"
.LC19:
.string "Launching kernel...\n"
.LC21:
.string "Kernel finished.\n"
.LC22:
.string "\n\n\nElapsed time millis: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2342:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $216, %rsp
.cfi_def_cfa_offset 272
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq .LC12(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $42, %edi
call srand@PLT
movl $2000, %edi
call _Znam@PLT
movq %rax, %r15
movl $2000, %edi
call _Znam@PLT
movq %rax, %rbp
movq %rax, 72(%rsp)
movl $2000, %edi
call _Znam@PLT
movq %rax, %r13
movl $0, %ebx
.L41:
call rand@PLT
movslq %eax, %rdx
imulq $1374389535, %rdx, %rdx
sarq $35, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
leal (%rdx,%rdx,4), %edx
subl %edx, %eax
addl $1, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%r15,%rbx,4)
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $36, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
sall $3, %edx
subl %edx, %eax
addl $45, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rbx,4)
movl %ebx, 0(%r13,%rbx,4)
addq $1, %rbx
cmpq $500, %rbx
jne .L41
movl $2000, %edi
call _Znam@PLT
movq %rax, %r12
movq %rax, 24(%rsp)
movl $2000, %edi
call _Znam@PLT
movq %rax, %r14
movq %rax, 32(%rsp)
movl $0, %ebx
movl $0, %ebp
.L42:
movl %ebp, (%r12,%rbx)
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
addl $1, %eax
movl %eax, (%r14,%rbx)
addl %eax, %ebp
addq $4, %rbx
cmpq $2000, %rbx
jne .L42
movslq %ebp, %rax
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L43
leaq 0(,%rax,4), %r14
movq %r14, 40(%rsp)
movq %r14, %rdi
call _Znam@PLT
movq %rax, 48(%rsp)
movq %rax, %rbx
leaq (%r14,%rax), %r12
testl %ebp, %ebp
jle .L45
.L47:
call rand@PLT
movslq %eax, %rdx
imulq $274877907, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $500, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L47
.L45:
movl $81920, %edi
call _Znam@PLT
movq %rax, %rbp
movl $81920, %edi
call _Znam@PLT
movq %rax, %r12
movl $81920, %edi
call _Znam@PLT
movq %rax, %r14
movl $81920, %edi
call _Znam@PLT
movq %rax, 16(%rsp)
movl $0, %ebx
.L48:
call rand@PLT
movslq %eax, %rdx
imulq $274877907, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $500, %edx, %edx
subl %edx, %eax
movl %eax, 0(%rbp,%rbx)
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%r12,%rbx)
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $35, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
sall $2, %edx
subl %edx, %eax
addl $65, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%r14,%rbx)
addq $4, %rbx
cmpq $81920, %rbx
jne .L48
movl $0, %eax
movq 16(%rsp), %rdx
.L49:
movl %eax, (%rdx,%rax,4)
addq $1, %rax
cmpq $20480, %rax
jne .L49
movl $81920, %edi
call _Znam@PLT
movq %rax, %rdx
movq %rax, 56(%rsp)
movl $0, %eax
.L50:
movl %eax, (%rdx,%rax,4)
addq $1, %rax
cmpq $20480, %rax
jne .L50
movl $2000, %edi
call _Znam@PLT
movq %rax, 64(%rsp)
movq %rax, %rbx
addq $2000, %rax
movq %r15, 8(%rsp)
movq %rax, %r15
.L51:
call rand@PLT
movslq %eax, %rdx
imulq $274877907, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $500, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %r15, %rbx
jne .L51
movq 8(%rsp), %r15
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1146880, %edi
call _Znam@PLT
movq %rax, 8(%rsp)
movl $16000, %edi
call _Znam@PLT
movq %rax, %rbx
movq 8(%rsp), %rdx
movl $0, %eax
.L52:
movss (%r12,%rax), %xmm0
movss %xmm0, (%rdx)
movl 0(%rbp,%rax), %ecx
movl %ecx, 4(%rdx)
movl $1, 12(%rdx)
addq $56, %rdx
addq $4, %rax
cmpq $65536, %rax
jne .L52
movq 8(%rsp), %rsi
leaq 917504(%rsi), %rdx
.L53:
movss (%r12,%rax), %xmm0
movss %xmm0, (%rdx)
movl 0(%rbp,%rax), %ecx
movl %ecx, 4(%rdx)
movss (%r14,%rax), %xmm0
movss %xmm0, 8(%rdx)
movl $0, 12(%rdx)
addq $56, %rdx
addq $4, %rax
cmpq $81920, %rax
jne .L53
movl $0, %eax
movq 72(%rsp), %rcx
.L54:
movss (%r15,%rax), %xmm0
movss %xmm0, (%rbx,%rax,8)
movss (%rcx,%rax), %xmm0
movss %xmm0, 4(%rbx,%rax,8)
movl 0(%r13,%rax), %edx
movl %edx, 8(%rbx,%rax,8)
addq $4, %rax
cmpq $2000, %rax
jne .L54
movl $42, %edi
call srand@PLT
movq 8(%rsp), %r14
leaq 1146880(%r14), %rsi
movq %r14, %rdi
call _ZSt14random_shuffleIP12struct_ActorEvT_S2_
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 88(%rsp), %rdi
movl $1146880, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $310, %edx
leaq .LC17(%rip), %rbp
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
leaq 96(%rsp), %rdi
movl $16000, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $311, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
leaq 104(%rsp), %rdi
movl $2000, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $312, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
leaq 112(%rsp), %rdi
movl $2000, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $313, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
leaq 120(%rsp), %rdi
movq 40(%rsp), %r15
movq %r15, %rsi
call cudaMalloc@PLT
movl %eax, %edi
movl $314, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
leaq 128(%rsp), %rdi
movl $81920, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $315, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
leaq 136(%rsp), %rdi
movl $81920, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $316, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
leaq 144(%rsp), %rdi
movl $81920, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $317, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
leaq 152(%rsp), %rdi
movl $2000, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $318, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl $1, %ecx
movl $1146880, %edx
movq %r14, %rsi
movq 88(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $320, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl $1, %ecx
movl $16000, %edx
movq %rbx, %rsi
movq 96(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $321, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl $1, %ecx
movl $2000, %edx
movq 32(%rsp), %rsi
movq 104(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $322, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl $1, %ecx
movl $2000, %edx
movq 24(%rsp), %rsi
movq 112(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $323, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl $1, %ecx
movq %r15, %rdx
movq 48(%rsp), %rsi
movq 120(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $324, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl $1, %ecx
movl $81920, %edx
movq 16(%rsp), %rsi
movq 136(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $325, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl $1, %ecx
movl $81920, %edx
movq 56(%rsp), %rsi
movq 144(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $326, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl $1, %ecx
movl $2000, %edx
movq 64(%rsp), %rsi
movq 152(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $327, %edx
movq %rbp, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
leaq .LC18(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 160(%rsp), %rdi
call cudaEventCreate@PLT
leaq 168(%rsp), %rdi
call cudaEventCreate@PLT
leaq .LC19(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
movq 160(%rsp), %rdi
call cudaEventRecord@PLT
movl $640, 188(%rsp)
movl $1, 192(%rsp)
movl $1, 196(%rsp)
movl $32, 176(%rsp)
movl $1, 180(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 188(%rsp), %rdx
movl $1, %ecx
movq 176(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L70
.L55:
movl $341, %esi
leaq .LC17(%rip), %rbx
movq %rbx, %rdi
call _Z16__cudaCheckErrorPKci
movl $0, %esi
movq 168(%rsp), %rdi
call cudaEventRecord@PLT
movq 168(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 188(%rsp)
leaq 188(%rsp), %rdi
movq 168(%rsp), %rdx
movq 160(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $348, %esi
movq %rbx, %rdi
call _Z16__cudaCheckErrorPKci
leaq .LC21(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 188(%rsp), %xmm0
leaq .LC22(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L71
movl $0, %eax
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
movq 200(%rsp), %rax
subq %fs:40, %rax
je .L46
call __stack_chk_fail@PLT
.L46:
call __cxa_throw_bad_array_new_length@PLT
.L70:
pushq 152(%rsp)
.cfi_def_cfa_offset 280
pushq 152(%rsp)
.cfi_def_cfa_offset 288
pushq 152(%rsp)
.cfi_def_cfa_offset 296
pushq 144(%rsp)
.cfi_def_cfa_offset 304
movq 144(%rsp), %r9
movq 136(%rsp), %r8
movq 128(%rsp), %rcx
movq 120(%rsp), %rdx
movl $1000000, %esi
movl $0, %edi
call _Z73__device_stub__Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_iiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
addq $32, %rsp
.cfi_def_cfa_offset 272
jmp .L55
.L71:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2342:
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL9d_randomn
.comm _ZL9d_randomn,8,8
.local _ZL6d_jobs
.comm _ZL6d_jobs,8,8
.local _ZL16d_input_actor_id
.comm _ZL16d_input_actor_id,8,8
.local _ZL19d_Array_Street_size
.comm _ZL19d_Array_Street_size,8,8
.local _ZL21d_Array_Street_offset
.comm _ZL21d_Array_Street_offset,8,8
.local _ZL21d_Array_Street_arrays
.comm _ZL21d_Array_Street_arrays,8,8
.local _ZL9d_Streets
.comm _ZL9d_Streets,8,8
.local _ZL8d_Actors
.comm _ZL8d_Actors,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #define REORDER 0
#define GOOD_WEATHER 0
#define BAD_WEATHER 1
#define TAG_Car 0
#define TAG_Pedestrian 1
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//#include <random>
//#include <array>
#include <algorithm>
#define NUM_CARS 4096
#define NUM_PEDS 16384
#define NUM_STREETS 500
#define MAX_CONNECTIONS 10
#define MAX_LEN 25
using namespace std;
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
typedef struct
{
float progress;
int street;
float max_velocity;
int tag;
int a1;
int a2;
int a3;
int a4;
int a5;
int a6;
int a7;
int a8;
int a9;
int a10;
// TODO: add more fields here to avoid cache locality
} struct_Actor;
typedef struct
{
float length;
float max_velocity;
int neighbor_array_index;
int s1;
int s2;
int s3;
int s4;
int s5;
} struct_Street;
typedef struct
{
int tag;
int id;
} tag_id_pair;
__device__ struct_Actor *d_Actors;
__device__ struct_Street *d_Streets;
__device__ int *d_Array_Street_arrays;
__device__ int *d_Array_Street_offset;
__device__ int *d_Array_Street_size;
__device__ int *d_input_actor_id;
__device__ int *d_jobs;
__device__ int *d_randomn;
__device__ void method_Car_move(int car_id, int weather)
{
float weather_multiplier;
if (weather == GOOD_WEATHER)
{
weather_multiplier = 1.0;
}
else if (weather == BAD_WEATHER)
{
weather_multiplier = 0.75;
}
float speed = min(d_Actors[car_id].max_velocity, d_Streets[d_Actors[car_id].street].max_velocity) * weather_multiplier;
d_Actors[car_id].progress = d_Actors[car_id].progress + (speed / 60.0); /* 1 tick = 1 minute */
if (d_Actors[car_id].progress >= d_Streets[d_Actors[car_id].street].length)
{
// move to different street
int array_id = d_Streets[d_Actors[car_id].street].neighbor_array_index;
int neighbor_index = d_randomn[d_Actors[car_id].street] % d_Array_Street_size[array_id];
d_Actors[car_id].street = d_Array_Street_arrays[d_Array_Street_offset[array_id] + neighbor_index];
d_Actors[car_id].progress = 0.0f;
}
}
__device__ void method_Pedestrian_move(int ped_id, int weather)
{
float speed = d_randomn[((int) (d_Actors[ped_id].progress*d_Actors[ped_id].progress)) % NUM_STREETS] % 7 - 2;
d_Actors[ped_id].progress = d_Actors[ped_id].progress + (speed / 60.0);
if (d_Actors[ped_id].progress >= d_Streets[d_Actors[ped_id].street].length)
{
// move to different street
int array_id = d_Streets[d_Actors[ped_id].street].neighbor_array_index;
int neighbor_index = d_randomn[d_Actors[ped_id].street] % d_Array_Street_size[array_id];
d_Actors[ped_id].street = d_Array_Street_arrays[d_Array_Street_offset[array_id] + neighbor_index];
d_Actors[ped_id].progress = 0.0f;
}
}
__device__ void block(int actor_id, int weather, int ticks)
{
for (int i = 0; i < ticks; i++)
{
switch (d_Actors[actor_id].tag)
{
case TAG_Car:
method_Car_move(actor_id, weather);
break;
case TAG_Pedestrian:
method_Pedestrian_move(actor_id, weather);
break;
}
}
}
__global__ void kernel(int weather, int ticks,
struct_Actor *v_d_Actors, struct_Street *v_d_Streets,
int *v_d_Array_Street_size, int *v_d_Array_Street_offset, int *v_d_Array_Street_arrays,
int *v_d_input_actor_id, int *v_d_jobs, int *v_d_randomn)
{
d_Actors = v_d_Actors;
d_Streets = v_d_Streets;
d_Array_Street_size = v_d_Array_Street_size;
d_Array_Street_offset = v_d_Array_Street_offset;
d_Array_Street_arrays = v_d_Array_Street_arrays;
d_input_actor_id = v_d_input_actor_id;
d_jobs = v_d_jobs;
d_randomn = v_d_randomn;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
#if (REORDER)
block(d_input_actor_id[d_jobs[tid]], weather, ticks);
#else
block(d_input_actor_id[tid], weather, ticks);
#endif
}
int main()
{
printf("Setting up scenario...\n");
srand(42);
// streets
float *Street_length = new float[NUM_STREETS];
float *Street_max_velocity = new float[NUM_STREETS];
int *Street_neighbors = new int[NUM_STREETS];
for (int i = 0; i < NUM_STREETS; i++)
{
Street_length[i] = rand() % MAX_LEN + 1;
Street_max_velocity[i] = rand() % 40 + 45; /* speed between 45 and 105 */
Street_neighbors[i] = i;
}
// neighbors
int *Array_Street_offset = new int[NUM_STREETS];
int *Array_Street_size = new int[NUM_STREETS];
int num_connections = 0;
for (int i = 0; i < NUM_STREETS; i++)
{
Array_Street_offset[i] = num_connections;
int connections = rand() % MAX_CONNECTIONS + 1;
Array_Street_size[i] = connections;
num_connections += connections;
}
int *Array_Street_arrays = new int[num_connections];
for (int i = 0; i < num_connections; i++)
{
Array_Street_arrays[i] = rand() % NUM_STREETS;
}
// actors
int *Actor_street = new int[NUM_PEDS + NUM_CARS];
float *Actor_progress = new float[NUM_PEDS + NUM_CARS];
float *Car_max_velocity = new float[NUM_CARS + NUM_PEDS];
int *Actor_id = new int[NUM_PEDS + NUM_CARS];
for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
{
Actor_street[i] = rand() % NUM_STREETS;
Actor_progress[i] = rand() % 10;
Car_max_velocity[i] = rand() % 20 + 65;
}
for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
{
Actor_id[i] = i;
}
// jobs (dummy)
int *jobs = new int[NUM_PEDS + NUM_CARS];
for (int i = 0; i < NUM_CARS + NUM_PEDS; i++)
{
jobs[i] = i;
}
// random numbers
int *randomn = new int[NUM_STREETS];
for (int i = 0; i < NUM_STREETS; i++)
{
// TODO: real random
randomn[i] = rand() % NUM_STREETS;
}
printf("Scenario set up.\n");
printf("Converting data to row format...\n");
struct_Actor *actors = new struct_Actor[NUM_CARS + NUM_PEDS];
struct_Street *streets = new struct_Street[NUM_STREETS];
for (int i = 0; i < NUM_PEDS; i++)
{
actors[i].progress = Actor_progress[i];
actors[i].street = Actor_street[i];
actors[i].tag = TAG_Pedestrian;
}
for (int i = NUM_PEDS; i < NUM_CARS + NUM_PEDS; i++)
{
actors[i].progress = Actor_progress[i];
actors[i].street = Actor_street[i];
actors[i].max_velocity = Car_max_velocity[i];
actors[i].tag = TAG_Car;
}
for (int i = 0; i < NUM_STREETS; i++)
{
streets[i].length = Street_length[i];
streets[i].max_velocity = Street_max_velocity[i];
streets[i].neighbor_array_index = Street_neighbors[i];
}
std::srand(42);
#if !(REORDER)
random_shuffle(actors, actors + NUM_CARS + NUM_PEDS);
#endif
printf("Done converting data.\n");
printf("Copying data to GPU...\n");
struct_Actor *v_d_Actors;
struct_Street *v_d_Streets;
int *v_d_Array_Street_size;
int *v_d_Array_Street_offset;
int *v_d_Array_Street_arrays;
int *v_d_input_actor_tag;
int *v_d_input_actor_id;
int *v_d_jobs;
int *v_d_randomn;
CudaSafeCall(cudaMalloc((void**) &v_d_Actors, sizeof(struct_Actor) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(cudaMalloc((void**) &v_d_Streets, sizeof(struct_Street) * NUM_STREETS));
CudaSafeCall(cudaMalloc((void**) &v_d_Array_Street_size, sizeof(int) * NUM_STREETS));
CudaSafeCall(cudaMalloc((void**) &v_d_Array_Street_offset, sizeof(int) * NUM_STREETS));
CudaSafeCall(cudaMalloc((void**) &v_d_Array_Street_arrays, sizeof(int) * num_connections));
CudaSafeCall(cudaMalloc((void**) &v_d_input_actor_tag, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(cudaMalloc((void**) &v_d_input_actor_id, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(cudaMalloc((void**) &v_d_jobs, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(cudaMalloc((void**) &v_d_randomn, sizeof(int) * NUM_STREETS));
CudaSafeCall(cudaMemcpy(v_d_Actors, &actors[0], sizeof(struct_Actor) * (NUM_CARS + NUM_PEDS), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_Streets, &streets[0], sizeof(struct_Street) * NUM_STREETS, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_Array_Street_size, &Array_Street_size[0], sizeof(int) * NUM_STREETS, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_Array_Street_offset, &Array_Street_offset[0], sizeof(int) * NUM_STREETS, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_Array_Street_arrays, &Array_Street_arrays[0], sizeof(int) * num_connections, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_input_actor_id, &Actor_id[0], sizeof(int) * (NUM_PEDS + NUM_CARS), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_jobs, &jobs[0], sizeof(int) * (NUM_PEDS + NUM_CARS), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_randomn, &randomn[0], sizeof(int) * NUM_STREETS, cudaMemcpyHostToDevice));
printf("Finished copying data.\n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("Launching kernel...\n");
cudaEventRecord(start);
kernel<<<dim3(32), dim3((NUM_PEDS + NUM_CARS) / 32)>>>(GOOD_WEATHER, 1000000,
v_d_Actors, v_d_Streets,
v_d_Array_Street_size, v_d_Array_Street_offset, v_d_Array_Street_arrays,
v_d_input_actor_id, v_d_jobs, v_d_randomn);
CudaCheckError();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
CudaCheckError();
printf("Kernel finished.\n");
// cudaMemcpy(Actor_progress, v_d_Actor_progress, sizeof(float) * (NUM_PEDS + NUM_CARS), cudaMemcpyDeviceToHost);
// for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
// {
// printf(" %f ", Actor_progress[i]);
// }
// cudaMemcpy(Actor_street, v_d_Actor_street, sizeof(int) * (NUM_PEDS + NUM_CARS), cudaMemcpyDeviceToHost);
// for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
// {
// printf(" %i ", Actor_street[i]);
// }
printf("\n\n\nElapsed time millis: %f\n", milliseconds);
} | #define REORDER 0
#define GOOD_WEATHER 0
#define BAD_WEATHER 1
#define TAG_Car 0
#define TAG_Pedestrian 1
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//#include <random>
//#include <array>
#include <algorithm>
#define NUM_CARS 4096
#define NUM_PEDS 16384
#define NUM_STREETS 500
#define MAX_CONNECTIONS 10
#define MAX_LEN 25
using namespace std;
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
typedef struct
{
float progress;
int street;
float max_velocity;
int tag;
int a1;
int a2;
int a3;
int a4;
int a5;
int a6;
int a7;
int a8;
int a9;
int a10;
// TODO: add more fields here to avoid cache locality
} struct_Actor;
typedef struct
{
float length;
float max_velocity;
int neighbor_array_index;
int s1;
int s2;
int s3;
int s4;
int s5;
} struct_Street;
typedef struct
{
int tag;
int id;
} tag_id_pair;
__device__ struct_Actor *d_Actors;
__device__ struct_Street *d_Streets;
__device__ int *d_Array_Street_arrays;
__device__ int *d_Array_Street_offset;
__device__ int *d_Array_Street_size;
__device__ int *d_input_actor_id;
__device__ int *d_jobs;
__device__ int *d_randomn;
__device__ void method_Car_move(int car_id, int weather)
{
float weather_multiplier;
if (weather == GOOD_WEATHER)
{
weather_multiplier = 1.0;
}
else if (weather == BAD_WEATHER)
{
weather_multiplier = 0.75;
}
float speed = min(d_Actors[car_id].max_velocity, d_Streets[d_Actors[car_id].street].max_velocity) * weather_multiplier;
d_Actors[car_id].progress = d_Actors[car_id].progress + (speed / 60.0); /* 1 tick = 1 minute */
if (d_Actors[car_id].progress >= d_Streets[d_Actors[car_id].street].length)
{
// move to different street
int array_id = d_Streets[d_Actors[car_id].street].neighbor_array_index;
int neighbor_index = d_randomn[d_Actors[car_id].street] % d_Array_Street_size[array_id];
d_Actors[car_id].street = d_Array_Street_arrays[d_Array_Street_offset[array_id] + neighbor_index];
d_Actors[car_id].progress = 0.0f;
}
}
__device__ void method_Pedestrian_move(int ped_id, int weather)
{
float speed = d_randomn[((int) (d_Actors[ped_id].progress*d_Actors[ped_id].progress)) % NUM_STREETS] % 7 - 2;
d_Actors[ped_id].progress = d_Actors[ped_id].progress + (speed / 60.0);
if (d_Actors[ped_id].progress >= d_Streets[d_Actors[ped_id].street].length)
{
// move to different street
int array_id = d_Streets[d_Actors[ped_id].street].neighbor_array_index;
int neighbor_index = d_randomn[d_Actors[ped_id].street] % d_Array_Street_size[array_id];
d_Actors[ped_id].street = d_Array_Street_arrays[d_Array_Street_offset[array_id] + neighbor_index];
d_Actors[ped_id].progress = 0.0f;
}
}
__device__ void block(int actor_id, int weather, int ticks)
{
for (int i = 0; i < ticks; i++)
{
switch (d_Actors[actor_id].tag)
{
case TAG_Car:
method_Car_move(actor_id, weather);
break;
case TAG_Pedestrian:
method_Pedestrian_move(actor_id, weather);
break;
}
}
}
__global__ void kernel(int weather, int ticks,
struct_Actor *v_d_Actors, struct_Street *v_d_Streets,
int *v_d_Array_Street_size, int *v_d_Array_Street_offset, int *v_d_Array_Street_arrays,
int *v_d_input_actor_id, int *v_d_jobs, int *v_d_randomn)
{
d_Actors = v_d_Actors;
d_Streets = v_d_Streets;
d_Array_Street_size = v_d_Array_Street_size;
d_Array_Street_offset = v_d_Array_Street_offset;
d_Array_Street_arrays = v_d_Array_Street_arrays;
d_input_actor_id = v_d_input_actor_id;
d_jobs = v_d_jobs;
d_randomn = v_d_randomn;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
#if (REORDER)
block(d_input_actor_id[d_jobs[tid]], weather, ticks);
#else
block(d_input_actor_id[tid], weather, ticks);
#endif
}
int main()
{
printf("Setting up scenario...\n");
srand(42);
// streets
float *Street_length = new float[NUM_STREETS];
float *Street_max_velocity = new float[NUM_STREETS];
int *Street_neighbors = new int[NUM_STREETS];
for (int i = 0; i < NUM_STREETS; i++)
{
Street_length[i] = rand() % MAX_LEN + 1;
Street_max_velocity[i] = rand() % 40 + 45; /* speed between 45 and 105 */
Street_neighbors[i] = i;
}
// neighbors
int *Array_Street_offset = new int[NUM_STREETS];
int *Array_Street_size = new int[NUM_STREETS];
int num_connections = 0;
for (int i = 0; i < NUM_STREETS; i++)
{
Array_Street_offset[i] = num_connections;
int connections = rand() % MAX_CONNECTIONS + 1;
Array_Street_size[i] = connections;
num_connections += connections;
}
int *Array_Street_arrays = new int[num_connections];
for (int i = 0; i < num_connections; i++)
{
Array_Street_arrays[i] = rand() % NUM_STREETS;
}
// actors
int *Actor_street = new int[NUM_PEDS + NUM_CARS];
float *Actor_progress = new float[NUM_PEDS + NUM_CARS];
float *Car_max_velocity = new float[NUM_CARS + NUM_PEDS];
int *Actor_id = new int[NUM_PEDS + NUM_CARS];
for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
{
Actor_street[i] = rand() % NUM_STREETS;
Actor_progress[i] = rand() % 10;
Car_max_velocity[i] = rand() % 20 + 65;
}
for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
{
Actor_id[i] = i;
}
// jobs (dummy)
int *jobs = new int[NUM_PEDS + NUM_CARS];
for (int i = 0; i < NUM_CARS + NUM_PEDS; i++)
{
jobs[i] = i;
}
// random numbers
int *randomn = new int[NUM_STREETS];
for (int i = 0; i < NUM_STREETS; i++)
{
// TODO: real random
randomn[i] = rand() % NUM_STREETS;
}
printf("Scenario set up.\n");
printf("Converting data to row format...\n");
struct_Actor *actors = new struct_Actor[NUM_CARS + NUM_PEDS];
struct_Street *streets = new struct_Street[NUM_STREETS];
for (int i = 0; i < NUM_PEDS; i++)
{
actors[i].progress = Actor_progress[i];
actors[i].street = Actor_street[i];
actors[i].tag = TAG_Pedestrian;
}
for (int i = NUM_PEDS; i < NUM_CARS + NUM_PEDS; i++)
{
actors[i].progress = Actor_progress[i];
actors[i].street = Actor_street[i];
actors[i].max_velocity = Car_max_velocity[i];
actors[i].tag = TAG_Car;
}
for (int i = 0; i < NUM_STREETS; i++)
{
streets[i].length = Street_length[i];
streets[i].max_velocity = Street_max_velocity[i];
streets[i].neighbor_array_index = Street_neighbors[i];
}
std::srand(42);
#if !(REORDER)
random_shuffle(actors, actors + NUM_CARS + NUM_PEDS);
#endif
printf("Done converting data.\n");
printf("Copying data to GPU...\n");
struct_Actor *v_d_Actors;
struct_Street *v_d_Streets;
int *v_d_Array_Street_size;
int *v_d_Array_Street_offset;
int *v_d_Array_Street_arrays;
int *v_d_input_actor_tag;
int *v_d_input_actor_id;
int *v_d_jobs;
int *v_d_randomn;
CudaSafeCall(hipMalloc((void**) &v_d_Actors, sizeof(struct_Actor) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(hipMalloc((void**) &v_d_Streets, sizeof(struct_Street) * NUM_STREETS));
CudaSafeCall(hipMalloc((void**) &v_d_Array_Street_size, sizeof(int) * NUM_STREETS));
CudaSafeCall(hipMalloc((void**) &v_d_Array_Street_offset, sizeof(int) * NUM_STREETS));
CudaSafeCall(hipMalloc((void**) &v_d_Array_Street_arrays, sizeof(int) * num_connections));
CudaSafeCall(hipMalloc((void**) &v_d_input_actor_tag, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(hipMalloc((void**) &v_d_input_actor_id, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(hipMalloc((void**) &v_d_jobs, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(hipMalloc((void**) &v_d_randomn, sizeof(int) * NUM_STREETS));
CudaSafeCall(hipMemcpy(v_d_Actors, &actors[0], sizeof(struct_Actor) * (NUM_CARS + NUM_PEDS), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_Streets, &streets[0], sizeof(struct_Street) * NUM_STREETS, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_Array_Street_size, &Array_Street_size[0], sizeof(int) * NUM_STREETS, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_Array_Street_offset, &Array_Street_offset[0], sizeof(int) * NUM_STREETS, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_Array_Street_arrays, &Array_Street_arrays[0], sizeof(int) * num_connections, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_input_actor_id, &Actor_id[0], sizeof(int) * (NUM_PEDS + NUM_CARS), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_jobs, &jobs[0], sizeof(int) * (NUM_PEDS + NUM_CARS), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_randomn, &randomn[0], sizeof(int) * NUM_STREETS, hipMemcpyHostToDevice));
printf("Finished copying data.\n");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
printf("Launching kernel...\n");
hipEventRecord(start);
kernel<<<dim3(32), dim3((NUM_PEDS + NUM_CARS) / 32)>>>(GOOD_WEATHER, 1000000,
v_d_Actors, v_d_Streets,
v_d_Array_Street_size, v_d_Array_Street_offset, v_d_Array_Street_arrays,
v_d_input_actor_id, v_d_jobs, v_d_randomn);
CudaCheckError();
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
CudaCheckError();
printf("Kernel finished.\n");
// cudaMemcpy(Actor_progress, v_d_Actor_progress, sizeof(float) * (NUM_PEDS + NUM_CARS), cudaMemcpyDeviceToHost);
// for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
// {
// printf(" %f ", Actor_progress[i]);
// }
// cudaMemcpy(Actor_street, v_d_Actor_street, sizeof(int) * (NUM_PEDS + NUM_CARS), cudaMemcpyDeviceToHost);
// for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
// {
// printf(" %i ", Actor_street[i]);
// }
printf("\n\n\nElapsed time millis: %f\n", milliseconds);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #define REORDER 0
#define GOOD_WEATHER 0
#define BAD_WEATHER 1
#define TAG_Car 0
#define TAG_Pedestrian 1
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//#include <random>
//#include <array>
#include <algorithm>
#define NUM_CARS 4096
#define NUM_PEDS 16384
#define NUM_STREETS 500
#define MAX_CONNECTIONS 10
#define MAX_LEN 25
using namespace std;
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
typedef struct
{
float progress;
int street;
float max_velocity;
int tag;
int a1;
int a2;
int a3;
int a4;
int a5;
int a6;
int a7;
int a8;
int a9;
int a10;
// TODO: add more fields here to avoid cache locality
} struct_Actor;
typedef struct
{
float length;
float max_velocity;
int neighbor_array_index;
int s1;
int s2;
int s3;
int s4;
int s5;
} struct_Street;
typedef struct
{
int tag;
int id;
} tag_id_pair;
__device__ struct_Actor *d_Actors;
__device__ struct_Street *d_Streets;
__device__ int *d_Array_Street_arrays;
__device__ int *d_Array_Street_offset;
__device__ int *d_Array_Street_size;
__device__ int *d_input_actor_id;
__device__ int *d_jobs;
__device__ int *d_randomn;
__device__ void method_Car_move(int car_id, int weather)
{
float weather_multiplier;
if (weather == GOOD_WEATHER)
{
weather_multiplier = 1.0;
}
else if (weather == BAD_WEATHER)
{
weather_multiplier = 0.75;
}
float speed = min(d_Actors[car_id].max_velocity, d_Streets[d_Actors[car_id].street].max_velocity) * weather_multiplier;
d_Actors[car_id].progress = d_Actors[car_id].progress + (speed / 60.0); /* 1 tick = 1 minute */
if (d_Actors[car_id].progress >= d_Streets[d_Actors[car_id].street].length)
{
// move to different street
int array_id = d_Streets[d_Actors[car_id].street].neighbor_array_index;
int neighbor_index = d_randomn[d_Actors[car_id].street] % d_Array_Street_size[array_id];
d_Actors[car_id].street = d_Array_Street_arrays[d_Array_Street_offset[array_id] + neighbor_index];
d_Actors[car_id].progress = 0.0f;
}
}
__device__ void method_Pedestrian_move(int ped_id, int weather)
{
float speed = d_randomn[((int) (d_Actors[ped_id].progress*d_Actors[ped_id].progress)) % NUM_STREETS] % 7 - 2;
d_Actors[ped_id].progress = d_Actors[ped_id].progress + (speed / 60.0);
if (d_Actors[ped_id].progress >= d_Streets[d_Actors[ped_id].street].length)
{
// move to different street
int array_id = d_Streets[d_Actors[ped_id].street].neighbor_array_index;
int neighbor_index = d_randomn[d_Actors[ped_id].street] % d_Array_Street_size[array_id];
d_Actors[ped_id].street = d_Array_Street_arrays[d_Array_Street_offset[array_id] + neighbor_index];
d_Actors[ped_id].progress = 0.0f;
}
}
__device__ void block(int actor_id, int weather, int ticks)
{
for (int i = 0; i < ticks; i++)
{
switch (d_Actors[actor_id].tag)
{
case TAG_Car:
method_Car_move(actor_id, weather);
break;
case TAG_Pedestrian:
method_Pedestrian_move(actor_id, weather);
break;
}
}
}
__global__ void kernel(int weather, int ticks,
struct_Actor *v_d_Actors, struct_Street *v_d_Streets,
int *v_d_Array_Street_size, int *v_d_Array_Street_offset, int *v_d_Array_Street_arrays,
int *v_d_input_actor_id, int *v_d_jobs, int *v_d_randomn)
{
d_Actors = v_d_Actors;
d_Streets = v_d_Streets;
d_Array_Street_size = v_d_Array_Street_size;
d_Array_Street_offset = v_d_Array_Street_offset;
d_Array_Street_arrays = v_d_Array_Street_arrays;
d_input_actor_id = v_d_input_actor_id;
d_jobs = v_d_jobs;
d_randomn = v_d_randomn;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
#if (REORDER)
block(d_input_actor_id[d_jobs[tid]], weather, ticks);
#else
block(d_input_actor_id[tid], weather, ticks);
#endif
}
int main()
{
printf("Setting up scenario...\n");
srand(42);
// streets
float *Street_length = new float[NUM_STREETS];
float *Street_max_velocity = new float[NUM_STREETS];
int *Street_neighbors = new int[NUM_STREETS];
for (int i = 0; i < NUM_STREETS; i++)
{
Street_length[i] = rand() % MAX_LEN + 1;
Street_max_velocity[i] = rand() % 40 + 45; /* speed between 45 and 105 */
Street_neighbors[i] = i;
}
// neighbors
int *Array_Street_offset = new int[NUM_STREETS];
int *Array_Street_size = new int[NUM_STREETS];
int num_connections = 0;
for (int i = 0; i < NUM_STREETS; i++)
{
Array_Street_offset[i] = num_connections;
int connections = rand() % MAX_CONNECTIONS + 1;
Array_Street_size[i] = connections;
num_connections += connections;
}
int *Array_Street_arrays = new int[num_connections];
for (int i = 0; i < num_connections; i++)
{
Array_Street_arrays[i] = rand() % NUM_STREETS;
}
// actors
int *Actor_street = new int[NUM_PEDS + NUM_CARS];
float *Actor_progress = new float[NUM_PEDS + NUM_CARS];
float *Car_max_velocity = new float[NUM_CARS + NUM_PEDS];
int *Actor_id = new int[NUM_PEDS + NUM_CARS];
for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
{
Actor_street[i] = rand() % NUM_STREETS;
Actor_progress[i] = rand() % 10;
Car_max_velocity[i] = rand() % 20 + 65;
}
for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
{
Actor_id[i] = i;
}
// jobs (dummy)
int *jobs = new int[NUM_PEDS + NUM_CARS];
for (int i = 0; i < NUM_CARS + NUM_PEDS; i++)
{
jobs[i] = i;
}
// random numbers
int *randomn = new int[NUM_STREETS];
for (int i = 0; i < NUM_STREETS; i++)
{
// TODO: real random
randomn[i] = rand() % NUM_STREETS;
}
printf("Scenario set up.\n");
printf("Converting data to row format...\n");
struct_Actor *actors = new struct_Actor[NUM_CARS + NUM_PEDS];
struct_Street *streets = new struct_Street[NUM_STREETS];
for (int i = 0; i < NUM_PEDS; i++)
{
actors[i].progress = Actor_progress[i];
actors[i].street = Actor_street[i];
actors[i].tag = TAG_Pedestrian;
}
for (int i = NUM_PEDS; i < NUM_CARS + NUM_PEDS; i++)
{
actors[i].progress = Actor_progress[i];
actors[i].street = Actor_street[i];
actors[i].max_velocity = Car_max_velocity[i];
actors[i].tag = TAG_Car;
}
for (int i = 0; i < NUM_STREETS; i++)
{
streets[i].length = Street_length[i];
streets[i].max_velocity = Street_max_velocity[i];
streets[i].neighbor_array_index = Street_neighbors[i];
}
std::srand(42);
#if !(REORDER)
random_shuffle(actors, actors + NUM_CARS + NUM_PEDS);
#endif
printf("Done converting data.\n");
printf("Copying data to GPU...\n");
struct_Actor *v_d_Actors;
struct_Street *v_d_Streets;
int *v_d_Array_Street_size;
int *v_d_Array_Street_offset;
int *v_d_Array_Street_arrays;
int *v_d_input_actor_tag;
int *v_d_input_actor_id;
int *v_d_jobs;
int *v_d_randomn;
CudaSafeCall(hipMalloc((void**) &v_d_Actors, sizeof(struct_Actor) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(hipMalloc((void**) &v_d_Streets, sizeof(struct_Street) * NUM_STREETS));
CudaSafeCall(hipMalloc((void**) &v_d_Array_Street_size, sizeof(int) * NUM_STREETS));
CudaSafeCall(hipMalloc((void**) &v_d_Array_Street_offset, sizeof(int) * NUM_STREETS));
CudaSafeCall(hipMalloc((void**) &v_d_Array_Street_arrays, sizeof(int) * num_connections));
CudaSafeCall(hipMalloc((void**) &v_d_input_actor_tag, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(hipMalloc((void**) &v_d_input_actor_id, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(hipMalloc((void**) &v_d_jobs, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(hipMalloc((void**) &v_d_randomn, sizeof(int) * NUM_STREETS));
CudaSafeCall(hipMemcpy(v_d_Actors, &actors[0], sizeof(struct_Actor) * (NUM_CARS + NUM_PEDS), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_Streets, &streets[0], sizeof(struct_Street) * NUM_STREETS, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_Array_Street_size, &Array_Street_size[0], sizeof(int) * NUM_STREETS, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_Array_Street_offset, &Array_Street_offset[0], sizeof(int) * NUM_STREETS, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_Array_Street_arrays, &Array_Street_arrays[0], sizeof(int) * num_connections, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_input_actor_id, &Actor_id[0], sizeof(int) * (NUM_PEDS + NUM_CARS), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_jobs, &jobs[0], sizeof(int) * (NUM_PEDS + NUM_CARS), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_randomn, &randomn[0], sizeof(int) * NUM_STREETS, hipMemcpyHostToDevice));
printf("Finished copying data.\n");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
printf("Launching kernel...\n");
hipEventRecord(start);
kernel<<<dim3(32), dim3((NUM_PEDS + NUM_CARS) / 32)>>>(GOOD_WEATHER, 1000000,
v_d_Actors, v_d_Streets,
v_d_Array_Street_size, v_d_Array_Street_offset, v_d_Array_Street_arrays,
v_d_input_actor_id, v_d_jobs, v_d_randomn);
CudaCheckError();
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
CudaCheckError();
printf("Kernel finished.\n");
// cudaMemcpy(Actor_progress, v_d_Actor_progress, sizeof(float) * (NUM_PEDS + NUM_CARS), cudaMemcpyDeviceToHost);
// for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
// {
// printf(" %f ", Actor_progress[i]);
// }
// cudaMemcpy(Actor_street, v_d_Actor_street, sizeof(int) * (NUM_PEDS + NUM_CARS), cudaMemcpyDeviceToHost);
// for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
// {
// printf(" %i ", Actor_street[i]);
// }
printf("\n\n\nElapsed time millis: %f\n", milliseconds);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.globl _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.p2align 8
.type _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_,@function
_Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_:
s_clause 0x2
s_load_b256 s[20:27], s[0:1], 0x8
s_load_b256 s[36:43], s[0:1], 0x28
s_load_b32 s14, s[0:1], 0x4
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, d_Actors@rel32@lo+4
s_addc_u32 s3, s3, d_Actors@rel32@hi+12
v_mov_b32_e32 v18, 0
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, d_Streets@rel32@lo+4
s_addc_u32 s5, s5, d_Streets@rel32@hi+12
s_getpc_b64 s[6:7]
s_add_u32 s6, s6, d_Array_Street_size@rel32@lo+4
s_addc_u32 s7, s7, d_Array_Street_size@rel32@hi+12
s_getpc_b64 s[8:9]
s_add_u32 s8, s8, d_Array_Street_offset@rel32@lo+4
s_addc_u32 s9, s9, d_Array_Street_offset@rel32@hi+12
s_getpc_b64 s[10:11]
s_add_u32 s10, s10, d_Array_Street_arrays@rel32@lo+4
s_addc_u32 s11, s11, d_Array_Street_arrays@rel32@hi+12
s_getpc_b64 s[16:17]
s_add_u32 s16, s16, d_input_actor_id@rel32@lo+4
s_addc_u32 s17, s17, d_input_actor_id@rel32@hi+12
s_getpc_b64 s[18:19]
s_add_u32 s18, s18, d_jobs@rel32@lo+4
s_addc_u32 s19, s19, d_jobs@rel32@hi+12
s_getpc_b64 s[12:13]
s_add_u32 s12, s12, d_randomn@rel32@lo+4
s_addc_u32 s13, s13, d_randomn@rel32@hi+12
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v1, s20 :: v_dual_mov_b32 v2, s21
v_dual_mov_b32 v3, s22 :: v_dual_mov_b32 v4, s23
v_dual_mov_b32 v5, s24 :: v_dual_mov_b32 v6, s25
v_dual_mov_b32 v7, s26 :: v_dual_mov_b32 v8, s27
v_dual_mov_b32 v9, s36 :: v_dual_mov_b32 v10, s37
v_dual_mov_b32 v11, s38 :: v_dual_mov_b32 v12, s39
v_dual_mov_b32 v13, s40 :: v_dual_mov_b32 v14, s41
v_dual_mov_b32 v15, s42 :: v_dual_mov_b32 v16, s43
s_clause 0x7
global_store_b64 v18, v[1:2], s[2:3]
global_store_b64 v18, v[3:4], s[4:5]
global_store_b64 v18, v[5:6], s[6:7]
global_store_b64 v18, v[7:8], s[8:9]
global_store_b64 v18, v[9:10], s[10:11]
global_store_b64 v18, v[11:12], s[16:17]
global_store_b64 v18, v[13:14], s[18:19]
global_store_b64 v18, v[15:16], s[12:13]
s_add_u32 s18, s0, 0x48
s_addc_u32 s19, s1, 0
s_cmp_lt_i32 s14, 1
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_13
global_load_b64 v[4:5], v18, s[16:17]
s_load_b32 s16, s[18:19], 0xc
s_load_b32 s0, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s16, s16, 0xffff
s_cmp_eq_u32 s0, 1
v_mad_u64_u32 v[1:2], null, s15, s16, v[0:1]
s_cselect_b32 s0, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v20, 1.0, 0x3f400000, s0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[1:2]
s_clause 0x1
global_load_b64 v[0:1], v18, s[12:13]
global_load_b64 v[2:3], v18, s[10:11]
s_waitcnt vmcnt(2)
v_add_co_u32 v4, vcc_lo, v4, v6
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v7, vcc_lo
flat_load_b32 v14, v[4:5]
s_clause 0x3
global_load_b64 v[12:13], v18, s[2:3]
global_load_b64 v[4:5], v18, s[4:5]
global_load_b64 v[6:7], v18, s[6:7]
global_load_b64 v[8:9], v18, s[8:9]
s_waitcnt vmcnt(3) lgkmcnt(0)
v_mad_i64_i32 v[10:11], null, v14, 56, v[12:13]
flat_load_b32 v19, v[10:11] offset:12
v_add_co_u32 v12, vcc_lo, v10, 4
v_add_co_ci_u32_e32 v13, vcc_lo, 0, v11, vcc_lo
v_add_co_u32 v14, vcc_lo, v10, 8
v_add_co_ci_u32_e32 v15, vcc_lo, 0, v11, vcc_lo
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s0
s_add_i32 s14, s14, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lg_u32 s14, 0
s_cbranch_scc0 .LBB0_13
.LBB0_3:
s_mov_b32 s1, 0
s_mov_b32 s0, exec_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmpx_lt_i32_e32 0, v19
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_7
s_mov_b32 s2, 0
s_mov_b32 s1, exec_lo
v_cmpx_eq_u32_e32 1, v19
s_cbranch_execz .LBB0_6
s_clause 0x1
flat_load_b32 v31, v[10:11]
flat_load_b32 v16, v[12:13]
s_waitcnt vmcnt(1) lgkmcnt(1)
v_mul_f32_e32 v17, v31, v31
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f32_e32 v17, v17
v_mul_hi_i32 v21, v17, 0x10624dd3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v22, 31, v21
v_ashrrev_i32_e32 v21, 5, v21
v_add_nc_u32_e32 v21, v21, v22
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v21, v21, 0x1f4
v_sub_nc_u32_e32 v21, v17, v21
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v22, 31, v21
v_lshlrev_b64 v[21:22], 2, v[21:22]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v21, vcc_lo, v0, v21
v_add_co_ci_u32_e32 v22, vcc_lo, v1, v22, vcc_lo
flat_load_b32 v17, v[21:22]
s_waitcnt vmcnt(0) lgkmcnt(0)
v_mul_hi_i32 v21, v17, 0x92492493
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v21, v21, v17
v_lshrrev_b32_e32 v22, 31, v21
v_ashrrev_i32_e32 v21, 2, v21
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v21, v21, v22
v_mul_lo_u32 v21, v21, 7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v17, v17, v21
v_add_nc_u32_e32 v17, -2, v17
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_f32_i32_e32 v17, v17
v_cvt_f64_f32_e32 v[21:22], v17
v_ashrrev_i32_e32 v17, 31, v16
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f64 v[23:24], null, 0x404e0000, 0x404e0000, v[21:22]
v_rcp_f64_e32 v[25:26], v[23:24]
s_waitcnt_depctr 0xfff
v_fma_f64 v[27:28], -v[23:24], v[25:26], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f64 v[25:26], v[25:26], v[27:28], v[25:26]
v_lshlrev_b64 v[27:28], 5, v[16:17]
v_add_co_u32 v27, vcc_lo, v4, v27
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v28, vcc_lo, v5, v28, vcc_lo
flat_load_b32 v32, v[27:28]
v_fma_f64 v[27:28], -v[23:24], v[25:26], 1.0
v_fma_f64 v[25:26], v[25:26], v[27:28], v[25:26]
v_div_scale_f64 v[27:28], vcc_lo, v[21:22], 0x404e0000, v[21:22]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[29:30], v[27:28], v[25:26]
v_fma_f64 v[23:24], -v[23:24], v[29:30], v[27:28]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_fmas_f64 v[23:24], v[23:24], v[25:26], v[29:30]
v_cvt_f64_f32_e32 v[25:26], v31
v_div_fixup_f64 v[21:22], v[23:24], 0x404e0000, v[21:22]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[21:22], v[21:22], v[25:26]
v_cvt_f32_f64_e32 v21, v[21:22]
s_waitcnt vmcnt(0) lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_cmp_le_f32_e32 vcc_lo, v32, v21
flat_store_b32 v[10:11], v21
s_and_b32 s2, vcc_lo, exec_lo
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s1, s2, exec_lo
.LBB0_7:
s_and_not1_saveexec_b32 s0, s0
s_cbranch_execz .LBB0_11
s_mov_b32 s3, s1
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v19
s_cbranch_execz .LBB0_10
s_clause 0x1
flat_load_b32 v16, v[12:13]
flat_load_b32 v32, v[10:11]
s_and_not1_b32 s3, s1, exec_lo
s_waitcnt vmcnt(1) lgkmcnt(1)
v_ashrrev_i32_e32 v17, 31, v16
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[21:22], 5, v[16:17]
v_add_co_u32 v21, vcc_lo, v4, v21
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v22, vcc_lo, v5, v22, vcc_lo
flat_load_b32 v23, v[14:15]
flat_load_b64 v[21:22], v[21:22]
s_waitcnt vmcnt(0) lgkmcnt(0)
v_dual_max_f32 v23, v23, v23 :: v_dual_max_f32 v22, v22, v22
v_min_f32_e32 v22, v23, v22
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v22, v20, v22
v_cvt_f64_f32_e32 v[22:23], v22
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f64 v[24:25], null, 0x404e0000, 0x404e0000, v[22:23]
v_rcp_f64_e32 v[26:27], v[24:25]
s_waitcnt_depctr 0xfff
v_fma_f64 v[28:29], -v[24:25], v[26:27], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[26:27], v[26:27], v[28:29], v[26:27]
v_fma_f64 v[28:29], -v[24:25], v[26:27], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f64 v[26:27], v[26:27], v[28:29], v[26:27]
v_div_scale_f64 v[28:29], vcc_lo, v[22:23], 0x404e0000, v[22:23]
v_mul_f64 v[30:31], v[28:29], v[26:27]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[24:25], -v[24:25], v[30:31], v[28:29]
v_div_fmas_f64 v[24:25], v[24:25], v[26:27], v[30:31]
v_cvt_f64_f32_e32 v[26:27], v32
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f64 v[22:23], v[24:25], 0x404e0000, v[22:23]
v_add_f64 v[22:23], v[22:23], v[26:27]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_f32_f64_e32 v22, v[22:23]
v_cmp_le_f32_e32 vcc_lo, v21, v22
flat_store_b32 v[10:11], v22
s_and_b32 s4, vcc_lo, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s3, s3, s4
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_and_not1_b32 s1, s1, exec_lo
s_and_b32 s2, s3, exec_lo
s_or_b32 s1, s1, s2
.LBB0_11:
s_or_b32 exec_lo, exec_lo, s0
s_and_saveexec_b32 s0, s1
s_cbranch_execz .LBB0_2
v_lshlrev_b64 v[21:22], 5, v[16:17]
v_lshlrev_b64 v[16:17], 2, v[16:17]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v21, vcc_lo, v4, v21
v_add_co_ci_u32_e32 v22, vcc_lo, v5, v22, vcc_lo
flat_load_b32 v21, v[21:22] offset:8
s_waitcnt vmcnt(0) lgkmcnt(0)
v_ashrrev_i32_e32 v22, 31, v21
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[21:22], 2, v[21:22]
v_add_co_u32 v23, vcc_lo, v6, v21
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v24, vcc_lo, v7, v22, vcc_lo
v_add_co_u32 v16, vcc_lo, v0, v16
v_add_co_ci_u32_e32 v17, vcc_lo, v1, v17, vcc_lo
flat_load_b32 v23, v[23:24]
flat_load_b32 v24, v[16:17]
v_add_co_u32 v16, vcc_lo, v8, v21
v_add_co_ci_u32_e32 v17, vcc_lo, v9, v22, vcc_lo
flat_load_b32 v16, v[16:17]
s_waitcnt vmcnt(2) lgkmcnt(2)
v_ashrrev_i32_e32 v17, 31, v23
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v21, v23, v17
s_waitcnt vmcnt(1) lgkmcnt(1)
v_ashrrev_i32_e32 v23, 31, v24
v_xor_b32_e32 v17, v21, v17
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v24, v24, v23
v_cvt_f32_u32_e32 v21, v17
v_sub_nc_u32_e32 v22, 0, v17
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_xor_b32_e32 v24, v24, v23
v_rcp_iflag_f32_e32 v21, v21
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v21, 0x4f7ffffe, v21
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v21, v21
v_mul_lo_u32 v22, v22, v21
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v22, v21, v22
v_add_nc_u32_e32 v21, v21, v22
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v21, v24, v21
v_mul_lo_u32 v21, v21, v17
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v21, v24, v21
v_sub_nc_u32_e32 v22, v21, v17
v_cmp_ge_u32_e32 vcc_lo, v21, v17
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v21, v21, v22, vcc_lo
v_sub_nc_u32_e32 v22, v21, v17
v_cmp_ge_u32_e32 vcc_lo, v21, v17
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v17, v21, v22, vcc_lo
v_xor_b32_e32 v17, v17, v23
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v17, v17, v23
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_nc_u32_e32 v16, v16, v17
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v17, 31, v16
v_lshlrev_b64 v[16:17], 2, v[16:17]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v16, vcc_lo, v2, v16
v_add_co_ci_u32_e32 v17, vcc_lo, v3, v17, vcc_lo
flat_load_b32 v16, v[16:17]
s_waitcnt vmcnt(0) lgkmcnt(0)
s_clause 0x1
flat_store_b32 v[12:13], v16
flat_store_b32 v[10:11], v18
s_branch .LBB0_2
.LBB0_13:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 328
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 33
.amdhsa_next_free_sgpr 44
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_, .Lfunc_end0-_Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected d_Actors
.type d_Actors,@object
.section .bss,"aw",@nobits
.globl d_Actors
.p2align 3, 0x0
d_Actors:
.quad 0
.size d_Actors, 8
.protected d_Streets
.type d_Streets,@object
.globl d_Streets
.p2align 3, 0x0
d_Streets:
.quad 0
.size d_Streets, 8
.protected d_Array_Street_arrays
.type d_Array_Street_arrays,@object
.globl d_Array_Street_arrays
.p2align 3, 0x0
d_Array_Street_arrays:
.quad 0
.size d_Array_Street_arrays, 8
.protected d_Array_Street_offset
.type d_Array_Street_offset,@object
.globl d_Array_Street_offset
.p2align 3, 0x0
d_Array_Street_offset:
.quad 0
.size d_Array_Street_offset, 8
.protected d_Array_Street_size
.type d_Array_Street_size,@object
.globl d_Array_Street_size
.p2align 3, 0x0
d_Array_Street_size:
.quad 0
.size d_Array_Street_size, 8
.protected d_input_actor_id
.type d_input_actor_id,@object
.globl d_input_actor_id
.p2align 3, 0x0
d_input_actor_id:
.quad 0
.size d_input_actor_id, 8
.protected d_jobs
.type d_jobs,@object
.globl d_jobs
.p2align 3, 0x0
d_jobs:
.quad 0
.size d_jobs, 8
.protected d_randomn
.type d_randomn,@object
.globl d_randomn
.p2align 3, 0x0
d_randomn:
.quad 0
.size d_randomn, 8
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym d_Actors
.addrsig_sym d_Streets
.addrsig_sym d_Array_Street_arrays
.addrsig_sym d_Array_Street_offset
.addrsig_sym d_Array_Street_size
.addrsig_sym d_input_actor_id
.addrsig_sym d_jobs
.addrsig_sym d_randomn
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 64
.size: 8
.value_kind: global_buffer
- .offset: 72
.size: 4
.value_kind: hidden_block_count_x
- .offset: 76
.size: 4
.value_kind: hidden_block_count_y
- .offset: 80
.size: 4
.value_kind: hidden_block_count_z
- .offset: 84
.size: 2
.value_kind: hidden_group_size_x
- .offset: 86
.size: 2
.value_kind: hidden_group_size_y
- .offset: 88
.size: 2
.value_kind: hidden_group_size_z
- .offset: 90
.size: 2
.value_kind: hidden_remainder_x
- .offset: 92
.size: 2
.value_kind: hidden_remainder_y
- .offset: 94
.size: 2
.value_kind: hidden_remainder_z
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 128
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 136
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 328
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.private_segment_fixed_size: 0
.sgpr_count: 46
.sgpr_spill_count: 0
.symbol: _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 33
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #define REORDER 0
#define GOOD_WEATHER 0
#define BAD_WEATHER 1
#define TAG_Car 0
#define TAG_Pedestrian 1
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//#include <random>
//#include <array>
#include <algorithm>
#define NUM_CARS 4096
#define NUM_PEDS 16384
#define NUM_STREETS 500
#define MAX_CONNECTIONS 10
#define MAX_LEN 25
using namespace std;
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
typedef struct
{
float progress;
int street;
float max_velocity;
int tag;
int a1;
int a2;
int a3;
int a4;
int a5;
int a6;
int a7;
int a8;
int a9;
int a10;
// TODO: add more fields here to avoid cache locality
} struct_Actor;
typedef struct
{
float length;
float max_velocity;
int neighbor_array_index;
int s1;
int s2;
int s3;
int s4;
int s5;
} struct_Street;
typedef struct
{
int tag;
int id;
} tag_id_pair;
__device__ struct_Actor *d_Actors;
__device__ struct_Street *d_Streets;
__device__ int *d_Array_Street_arrays;
__device__ int *d_Array_Street_offset;
__device__ int *d_Array_Street_size;
__device__ int *d_input_actor_id;
__device__ int *d_jobs;
__device__ int *d_randomn;
__device__ void method_Car_move(int car_id, int weather)
{
float weather_multiplier;
if (weather == GOOD_WEATHER)
{
weather_multiplier = 1.0;
}
else if (weather == BAD_WEATHER)
{
weather_multiplier = 0.75;
}
float speed = min(d_Actors[car_id].max_velocity, d_Streets[d_Actors[car_id].street].max_velocity) * weather_multiplier;
d_Actors[car_id].progress = d_Actors[car_id].progress + (speed / 60.0); /* 1 tick = 1 minute */
if (d_Actors[car_id].progress >= d_Streets[d_Actors[car_id].street].length)
{
// move to different street
int array_id = d_Streets[d_Actors[car_id].street].neighbor_array_index;
int neighbor_index = d_randomn[d_Actors[car_id].street] % d_Array_Street_size[array_id];
d_Actors[car_id].street = d_Array_Street_arrays[d_Array_Street_offset[array_id] + neighbor_index];
d_Actors[car_id].progress = 0.0f;
}
}
__device__ void method_Pedestrian_move(int ped_id, int weather)
{
float speed = d_randomn[((int) (d_Actors[ped_id].progress*d_Actors[ped_id].progress)) % NUM_STREETS] % 7 - 2;
d_Actors[ped_id].progress = d_Actors[ped_id].progress + (speed / 60.0);
if (d_Actors[ped_id].progress >= d_Streets[d_Actors[ped_id].street].length)
{
// move to different street
int array_id = d_Streets[d_Actors[ped_id].street].neighbor_array_index;
int neighbor_index = d_randomn[d_Actors[ped_id].street] % d_Array_Street_size[array_id];
d_Actors[ped_id].street = d_Array_Street_arrays[d_Array_Street_offset[array_id] + neighbor_index];
d_Actors[ped_id].progress = 0.0f;
}
}
__device__ void block(int actor_id, int weather, int ticks)
{
for (int i = 0; i < ticks; i++)
{
switch (d_Actors[actor_id].tag)
{
case TAG_Car:
method_Car_move(actor_id, weather);
break;
case TAG_Pedestrian:
method_Pedestrian_move(actor_id, weather);
break;
}
}
}
__global__ void kernel(int weather, int ticks,
struct_Actor *v_d_Actors, struct_Street *v_d_Streets,
int *v_d_Array_Street_size, int *v_d_Array_Street_offset, int *v_d_Array_Street_arrays,
int *v_d_input_actor_id, int *v_d_jobs, int *v_d_randomn)
{
d_Actors = v_d_Actors;
d_Streets = v_d_Streets;
d_Array_Street_size = v_d_Array_Street_size;
d_Array_Street_offset = v_d_Array_Street_offset;
d_Array_Street_arrays = v_d_Array_Street_arrays;
d_input_actor_id = v_d_input_actor_id;
d_jobs = v_d_jobs;
d_randomn = v_d_randomn;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
#if (REORDER)
block(d_input_actor_id[d_jobs[tid]], weather, ticks);
#else
block(d_input_actor_id[tid], weather, ticks);
#endif
}
int main()
{
printf("Setting up scenario...\n");
srand(42);
// streets
float *Street_length = new float[NUM_STREETS];
float *Street_max_velocity = new float[NUM_STREETS];
int *Street_neighbors = new int[NUM_STREETS];
for (int i = 0; i < NUM_STREETS; i++)
{
Street_length[i] = rand() % MAX_LEN + 1;
Street_max_velocity[i] = rand() % 40 + 45; /* speed between 45 and 105 */
Street_neighbors[i] = i;
}
// neighbors
int *Array_Street_offset = new int[NUM_STREETS];
int *Array_Street_size = new int[NUM_STREETS];
int num_connections = 0;
for (int i = 0; i < NUM_STREETS; i++)
{
Array_Street_offset[i] = num_connections;
int connections = rand() % MAX_CONNECTIONS + 1;
Array_Street_size[i] = connections;
num_connections += connections;
}
int *Array_Street_arrays = new int[num_connections];
for (int i = 0; i < num_connections; i++)
{
Array_Street_arrays[i] = rand() % NUM_STREETS;
}
// actors
int *Actor_street = new int[NUM_PEDS + NUM_CARS];
float *Actor_progress = new float[NUM_PEDS + NUM_CARS];
float *Car_max_velocity = new float[NUM_CARS + NUM_PEDS];
int *Actor_id = new int[NUM_PEDS + NUM_CARS];
for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
{
Actor_street[i] = rand() % NUM_STREETS;
Actor_progress[i] = rand() % 10;
Car_max_velocity[i] = rand() % 20 + 65;
}
for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
{
Actor_id[i] = i;
}
// jobs (dummy)
int *jobs = new int[NUM_PEDS + NUM_CARS];
for (int i = 0; i < NUM_CARS + NUM_PEDS; i++)
{
jobs[i] = i;
}
// random numbers
int *randomn = new int[NUM_STREETS];
for (int i = 0; i < NUM_STREETS; i++)
{
// TODO: real random
randomn[i] = rand() % NUM_STREETS;
}
printf("Scenario set up.\n");
printf("Converting data to row format...\n");
struct_Actor *actors = new struct_Actor[NUM_CARS + NUM_PEDS];
struct_Street *streets = new struct_Street[NUM_STREETS];
for (int i = 0; i < NUM_PEDS; i++)
{
actors[i].progress = Actor_progress[i];
actors[i].street = Actor_street[i];
actors[i].tag = TAG_Pedestrian;
}
for (int i = NUM_PEDS; i < NUM_CARS + NUM_PEDS; i++)
{
actors[i].progress = Actor_progress[i];
actors[i].street = Actor_street[i];
actors[i].max_velocity = Car_max_velocity[i];
actors[i].tag = TAG_Car;
}
for (int i = 0; i < NUM_STREETS; i++)
{
streets[i].length = Street_length[i];
streets[i].max_velocity = Street_max_velocity[i];
streets[i].neighbor_array_index = Street_neighbors[i];
}
std::srand(42);
#if !(REORDER)
random_shuffle(actors, actors + NUM_CARS + NUM_PEDS);
#endif
printf("Done converting data.\n");
printf("Copying data to GPU...\n");
struct_Actor *v_d_Actors;
struct_Street *v_d_Streets;
int *v_d_Array_Street_size;
int *v_d_Array_Street_offset;
int *v_d_Array_Street_arrays;
int *v_d_input_actor_tag;
int *v_d_input_actor_id;
int *v_d_jobs;
int *v_d_randomn;
CudaSafeCall(hipMalloc((void**) &v_d_Actors, sizeof(struct_Actor) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(hipMalloc((void**) &v_d_Streets, sizeof(struct_Street) * NUM_STREETS));
CudaSafeCall(hipMalloc((void**) &v_d_Array_Street_size, sizeof(int) * NUM_STREETS));
CudaSafeCall(hipMalloc((void**) &v_d_Array_Street_offset, sizeof(int) * NUM_STREETS));
CudaSafeCall(hipMalloc((void**) &v_d_Array_Street_arrays, sizeof(int) * num_connections));
CudaSafeCall(hipMalloc((void**) &v_d_input_actor_tag, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(hipMalloc((void**) &v_d_input_actor_id, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(hipMalloc((void**) &v_d_jobs, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(hipMalloc((void**) &v_d_randomn, sizeof(int) * NUM_STREETS));
CudaSafeCall(hipMemcpy(v_d_Actors, &actors[0], sizeof(struct_Actor) * (NUM_CARS + NUM_PEDS), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_Streets, &streets[0], sizeof(struct_Street) * NUM_STREETS, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_Array_Street_size, &Array_Street_size[0], sizeof(int) * NUM_STREETS, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_Array_Street_offset, &Array_Street_offset[0], sizeof(int) * NUM_STREETS, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_Array_Street_arrays, &Array_Street_arrays[0], sizeof(int) * num_connections, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_input_actor_id, &Actor_id[0], sizeof(int) * (NUM_PEDS + NUM_CARS), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_jobs, &jobs[0], sizeof(int) * (NUM_PEDS + NUM_CARS), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(v_d_randomn, &randomn[0], sizeof(int) * NUM_STREETS, hipMemcpyHostToDevice));
printf("Finished copying data.\n");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
printf("Launching kernel...\n");
hipEventRecord(start);
kernel<<<dim3(32), dim3((NUM_PEDS + NUM_CARS) / 32)>>>(GOOD_WEATHER, 1000000,
v_d_Actors, v_d_Streets,
v_d_Array_Street_size, v_d_Array_Street_offset, v_d_Array_Street_arrays,
v_d_input_actor_id, v_d_jobs, v_d_randomn);
CudaCheckError();
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
CudaCheckError();
printf("Kernel finished.\n");
// cudaMemcpy(Actor_progress, v_d_Actor_progress, sizeof(float) * (NUM_PEDS + NUM_CARS), cudaMemcpyDeviceToHost);
// for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
// {
// printf(" %f ", Actor_progress[i]);
// }
// cudaMemcpy(Actor_street, v_d_Actor_street, sizeof(int) * (NUM_PEDS + NUM_CARS), cudaMemcpyDeviceToHost);
// for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
// {
// printf(" %i ", Actor_street[i]);
// }
printf("\n\n\nElapsed time millis: %f\n", milliseconds);
} | .text
.file "benchmark_row_nocache.hip"
.globl _Z21__device_stub__kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_ # -- Begin function _Z21__device_stub__kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.p2align 4, 0x90
.type _Z21__device_stub__kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_,@function
_Z21__device_stub__kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_: # @_Z21__device_stub__kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end0:
.size _Z21__device_stub__kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_, .Lfunc_end0-_Z21__device_stub__kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $360, %rsp # imm = 0x168
.cfi_def_cfa_offset 416
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $.Lstr, %edi
callq puts@PLT
movl $42, %edi
callq srand
movl $2000, %edi # imm = 0x7D0
callq _Znam
movq %rax, %r14
movl $2000, %edi # imm = 0x7D0
callq _Znam
movq %rax, %r15
movl $2000, %edi # imm = 0x7D0
callq _Znam
movq %rax, %r12
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $35, %rcx
addl %edx, %ecx
leal (%rcx,%rcx,4), %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
incl %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r14,%rbx,4)
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $36, %rcx
addl %edx, %ecx
shll $3, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
addl $45, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r15,%rbx,4)
movl %ebx, (%r12,%rbx,4)
incq %rbx
cmpq $500, %rbx # imm = 0x1F4
jne .LBB1_1
# %bb.2:
movq %r15, 224(%rsp) # 8-byte Spill
movq %r14, 232(%rsp) # 8-byte Spill
movl $2000, %edi # imm = 0x7D0
callq _Znam
movq %rax, %r13
movl $2000, %edi # imm = 0x7D0
callq _Znam
movq %rax, %r15
xorl %r14d, %r14d
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_3: # =>This Inner Loop Header: Depth=1
movl %ebx, (%r13,%r14,4)
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
movl %eax, %edx
subl %ecx, %edx
negl %ecx
addl %ecx, %eax
incl %eax
movl %eax, (%r15,%r14,4)
addl %edx, %ebx
incl %ebx
incq %r14
cmpq $500, %r14 # imm = 0x1F4
jne .LBB1_3
# %bb.4:
movq %r15, 216(%rsp) # 8-byte Spill
movslq %ebx, %r14
leaq (,%r14,4), %rax
testl %r14d, %r14d
movq $-1, %rdi
movq %rax, 208(%rsp) # 8-byte Spill
cmovnsq %rax, %rdi
callq _Znam
movq %rax, %r15
testl %r14d, %r14d
jle .LBB1_7
# %bb.5: # %.lr.ph.preheader
movl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_6: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $500, %ecx, %ecx # imm = 0x1F4
subl %ecx, %eax
movl %eax, (%r15,%r14,4)
incq %r14
cmpq %r14, %rbx
jne .LBB1_6
.LBB1_7: # %._crit_edge
movq %r15, 184(%rsp) # 8-byte Spill
movl $81920, %edi # imm = 0x14000
callq _Znam
movq %rax, %r14
movl $81920, %edi # imm = 0x14000
callq _Znam
movq %rax, %rbp
movl $81920, %edi # imm = 0x14000
callq _Znam
movq %rax, %r15
movl $81920, %edi # imm = 0x14000
callq _Znam
movq %rax, 16(%rsp) # 8-byte Spill
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_8: # =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $500, %ecx, %ecx # imm = 0x1F4
subl %ecx, %eax
movl %eax, (%r14,%rbx,4)
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbp,%rbx,4)
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $35, %rcx
addl %edx, %ecx
shll $2, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
addl $65, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r15,%rbx,4)
incq %rbx
cmpq $20480, %rbx # imm = 0x5000
jne .LBB1_8
# %bb.9: # %.preheader160.preheader
xorl %eax, %eax
movq 16(%rsp), %rcx # 8-byte Reload
.p2align 4, 0x90
.LBB1_10: # %.preheader160
# =>This Inner Loop Header: Depth=1
movl %eax, (%rcx,%rax,4)
incq %rax
cmpq $20480, %rax # imm = 0x5000
jne .LBB1_10
# %bb.11:
movl $81920, %edi # imm = 0x14000
callq _Znam
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_12: # =>This Inner Loop Header: Depth=1
movl %ecx, (%rax,%rcx,4)
incq %rcx
cmpq $20480, %rcx # imm = 0x5000
jne .LBB1_12
# %bb.13:
movq %rax, 176(%rsp) # 8-byte Spill
movq %r13, 200(%rsp) # 8-byte Spill
movl $2000, %edi # imm = 0x7D0
callq _Znam
movq %rax, %r13
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_14: # =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $500, %ecx, %ecx # imm = 0x1F4
subl %ecx, %eax
movl %eax, (%r13,%rbx,4)
incq %rbx
cmpq $500, %rbx # imm = 0x1F4
jne .LBB1_14
# %bb.15:
movq %r13, 192(%rsp) # 8-byte Spill
movl $.Lstr.1, %edi
callq puts@PLT
movl $.Lstr.2, %edi
callq puts@PLT
movl $1146880, %edi # imm = 0x118000
callq _Znam
movq %rax, %rbx
movl $16000, %edi # imm = 0x3E80
callq _Znam
movq %rax, %r13
xorl %eax, %eax
movq %rbx, %rcx
.p2align 4, 0x90
.LBB1_16: # =>This Inner Loop Header: Depth=1
movss (%rbp,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%rcx)
movl (%r14,%rax,4), %edx
movl %edx, 4(%rcx)
movl $1, 12(%rcx)
incq %rax
addq $56, %rcx
cmpq $16384, %rax # imm = 0x4000
jne .LBB1_16
# %bb.17: # %.preheader159.preheader
leaq 917516(%rbx), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_18: # %.preheader159
# =>This Inner Loop Header: Depth=1
movss 65536(%rbp,%rcx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, -12(%rax)
movl 65536(%r14,%rcx), %edx
movl %edx, -8(%rax)
movss 65536(%r15,%rcx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, -4(%rax)
movl $0, (%rax)
addq $4, %rcx
addq $56, %rax
cmpq $16384, %rcx # imm = 0x4000
jne .LBB1_18
# %bb.19: # %.preheader.preheader
movl $1, %eax
movq 232(%rsp), %rdx # 8-byte Reload
movq 224(%rsp), %rsi # 8-byte Reload
.p2align 4, 0x90
.LBB1_20: # %.preheader
# =>This Inner Loop Header: Depth=1
movss -1(%rdx,%rax), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, -8(%r13,%rax,8)
movss -1(%rsi,%rax), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, -4(%r13,%rax,8)
movl -1(%r12,%rax), %ecx
movl %ecx, (%r13,%rax,8)
addq $4, %rax
cmpq $2001, %rax # imm = 0x7D1
jne .LBB1_20
# %bb.21:
movl $42, %edi
callq srand
movl $56, %r14d
movl $2, %r15d
jmp .LBB1_22
.p2align 4, 0x90
.LBB1_24: # in Loop: Header=BB1_22 Depth=1
addq $56, %r14
incq %r15
cmpq $1146880, %r14 # imm = 0x118000
je .LBB1_25
.LBB1_22: # =>This Inner Loop Header: Depth=1
callq rand
cltq
cqto
idivq %r15
imulq $56, %rdx, %rax
cmpq %rax, %r14
je .LBB1_24
# %bb.23: # in Loop: Header=BB1_22 Depth=1
leaq (%rbx,%r14), %rcx
addq %rbx, %rax
movq 48(%rcx), %rdx
movq %rdx, 144(%rsp)
movups (%rcx), %xmm0
movups 16(%rcx), %xmm1
movups 32(%rcx), %xmm2
movaps %xmm2, 128(%rsp)
movaps %xmm1, 112(%rsp)
movaps %xmm0, 96(%rsp)
movq 48(%rax), %rdx
movq %rdx, 48(%rcx)
movups (%rax), %xmm0
movups 16(%rax), %xmm1
movups 32(%rax), %xmm2
movups %xmm2, 32(%rcx)
movups %xmm1, 16(%rcx)
movups %xmm0, (%rcx)
movq 144(%rsp), %rcx
movq %rcx, 48(%rax)
movaps 96(%rsp), %xmm0
movaps 112(%rsp), %xmm1
movaps 128(%rsp), %xmm2
movups %xmm2, 32(%rax)
movups %xmm1, 16(%rax)
movups %xmm0, (%rax)
jmp .LBB1_24
.LBB1_25: # %_ZSt14random_shuffleIP12struct_ActorEvT_S2_.exit
movl $.Lstr.3, %edi
callq puts@PLT
movl $.Lstr.4, %edi
callq puts@PLT
leaq 88(%rsp), %rdi
movl $1146880, %esi # imm = 0x118000
callq hipMalloc
testl %eax, %eax
jne .LBB1_26
# %bb.27: # %_Z14__cudaSafeCall10hipError_tPKci.exit
leaq 80(%rsp), %rdi
movl $16000, %esi # imm = 0x3E80
callq hipMalloc
testl %eax, %eax
movq 216(%rsp), %r14 # 8-byte Reload
movq 208(%rsp), %r15 # 8-byte Reload
jne .LBB1_28
# %bb.29: # %_Z14__cudaSafeCall10hipError_tPKci.exit121
leaq 72(%rsp), %rdi
movl $2000, %esi # imm = 0x7D0
callq hipMalloc
testl %eax, %eax
jne .LBB1_30
# %bb.31: # %_Z14__cudaSafeCall10hipError_tPKci.exit123
leaq 64(%rsp), %rdi
movl $2000, %esi # imm = 0x7D0
callq hipMalloc
testl %eax, %eax
jne .LBB1_32
# %bb.33: # %_Z14__cudaSafeCall10hipError_tPKci.exit125
leaq 56(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_34
# %bb.35: # %_Z14__cudaSafeCall10hipError_tPKci.exit127
leaq 352(%rsp), %rdi
movl $81920, %esi # imm = 0x14000
callq hipMalloc
testl %eax, %eax
jne .LBB1_36
# %bb.37: # %_Z14__cudaSafeCall10hipError_tPKci.exit129
leaq 48(%rsp), %rdi
movl $81920, %esi # imm = 0x14000
callq hipMalloc
testl %eax, %eax
jne .LBB1_38
# %bb.39: # %_Z14__cudaSafeCall10hipError_tPKci.exit131
leaq 40(%rsp), %rdi
movl $81920, %esi # imm = 0x14000
callq hipMalloc
testl %eax, %eax
jne .LBB1_40
# %bb.41: # %_Z14__cudaSafeCall10hipError_tPKci.exit133
leaq 32(%rsp), %rdi
movl $2000, %esi # imm = 0x7D0
callq hipMalloc
testl %eax, %eax
jne .LBB1_42
# %bb.43: # %_Z14__cudaSafeCall10hipError_tPKci.exit135
movq 88(%rsp), %rdi
movl $1146880, %edx # imm = 0x118000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_44
# %bb.45: # %_Z14__cudaSafeCall10hipError_tPKci.exit137
movq 80(%rsp), %rdi
movl $16000, %edx # imm = 0x3E80
movq %r13, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_46
# %bb.47: # %_Z14__cudaSafeCall10hipError_tPKci.exit139
movq 72(%rsp), %rdi
movl $2000, %edx # imm = 0x7D0
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
movq 200(%rsp), %rsi # 8-byte Reload
movq 192(%rsp), %rbx # 8-byte Reload
jne .LBB1_48
# %bb.49: # %_Z14__cudaSafeCall10hipError_tPKci.exit141
movq 64(%rsp), %rdi
movl $2000, %edx # imm = 0x7D0
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_50
# %bb.51: # %_Z14__cudaSafeCall10hipError_tPKci.exit143
movq 56(%rsp), %rdi
movq 184(%rsp), %rsi # 8-byte Reload
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_52
# %bb.53: # %_Z14__cudaSafeCall10hipError_tPKci.exit145
movq 48(%rsp), %rdi
movl $81920, %edx # imm = 0x14000
movq 16(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_54
# %bb.55: # %_Z14__cudaSafeCall10hipError_tPKci.exit147
movq 40(%rsp), %rdi
movl $81920, %edx # imm = 0x14000
movq 176(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_56
# %bb.57: # %_Z14__cudaSafeCall10hipError_tPKci.exit149
movq 32(%rsp), %rdi
movl $2000, %edx # imm = 0x7D0
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_58
# %bb.59: # %_Z14__cudaSafeCall10hipError_tPKci.exit151
movl $.Lstr.5, %edi
callq puts@PLT
leaq 24(%rsp), %rdi
callq hipEventCreate
movq %rsp, %rdi
callq hipEventCreate
movl $.Lstr.6, %edi
callq puts@PLT
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967328, %rdi # imm = 0x100000020
leaq 608(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_61
# %bb.60:
movq 88(%rsp), %rax
movq 80(%rsp), %rcx
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
movq 48(%rsp), %r8
movq 40(%rsp), %r9
movq 32(%rsp), %r10
movl $0, 12(%rsp)
movl $1000000, 8(%rsp) # imm = 0xF4240
movq %rax, 344(%rsp)
movq %rcx, 336(%rsp)
movq %rdx, 328(%rsp)
movq %rsi, 320(%rsp)
movq %rdi, 312(%rsp)
movq %r8, 304(%rsp)
movq %r9, 296(%rsp)
movq %r10, 288(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 344(%rsp), %rax
movq %rax, 112(%rsp)
leaq 336(%rsp), %rax
movq %rax, 120(%rsp)
leaq 328(%rsp), %rax
movq %rax, 128(%rsp)
leaq 320(%rsp), %rax
movq %rax, 136(%rsp)
leaq 312(%rsp), %rax
movq %rax, 144(%rsp)
leaq 304(%rsp), %rax
movq %rax, 152(%rsp)
leaq 296(%rsp), %rax
movq %rax, 160(%rsp)
leaq 288(%rsp), %rax
movq %rax, 168(%rsp)
leaq 272(%rsp), %rdi
leaq 256(%rsp), %rsi
leaq 248(%rsp), %rdx
leaq 240(%rsp), %rcx
callq __hipPopCallConfiguration
movq 272(%rsp), %rsi
movl 280(%rsp), %edx
movq 256(%rsp), %rcx
movl 264(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_, %edi
pushq 240(%rsp)
.cfi_adjust_cfa_offset 8
pushq 256(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_61:
callq hipGetLastError
testl %eax, %eax
jne .LBB1_62
# %bb.65:
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB1_66
# %bb.67: # %_Z16__cudaCheckErrorPKci.exit
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movl $0, 96(%rsp)
movq 24(%rsp), %rsi
movq (%rsp), %rdx
leaq 96(%rsp), %rdi
callq hipEventElapsedTime
callq hipGetLastError
testl %eax, %eax
jne .LBB1_68
# %bb.70:
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB1_71
# %bb.72: # %_Z16__cudaCheckErrorPKci.exit155
movl $.Lstr.7, %edi
callq puts@PLT
movss 96(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.9, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $360, %rsp # imm = 0x168
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_26:
.cfi_def_cfa_offset 416
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $312, %ecx # imm = 0x138
jmp .LBB1_64
.LBB1_28:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $313, %ecx # imm = 0x139
jmp .LBB1_64
.LBB1_30:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $314, %ecx # imm = 0x13A
jmp .LBB1_64
.LBB1_32:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $315, %ecx # imm = 0x13B
jmp .LBB1_64
.LBB1_34:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $316, %ecx # imm = 0x13C
jmp .LBB1_64
.LBB1_36:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $317, %ecx # imm = 0x13D
jmp .LBB1_64
.LBB1_38:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $318, %ecx # imm = 0x13E
jmp .LBB1_64
.LBB1_40:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $319, %ecx # imm = 0x13F
jmp .LBB1_64
.LBB1_42:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $320, %ecx # imm = 0x140
jmp .LBB1_64
.LBB1_44:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $322, %ecx # imm = 0x142
jmp .LBB1_64
.LBB1_46:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $323, %ecx # imm = 0x143
jmp .LBB1_64
.LBB1_48:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $324, %ecx # imm = 0x144
jmp .LBB1_64
.LBB1_50:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $325, %ecx # imm = 0x145
jmp .LBB1_64
.LBB1_52:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $326, %ecx # imm = 0x146
jmp .LBB1_64
.LBB1_54:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $327, %ecx # imm = 0x147
jmp .LBB1_64
.LBB1_56:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $328, %ecx # imm = 0x148
jmp .LBB1_64
.LBB1_58:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.10, %esi
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $329, %ecx # imm = 0x149
jmp .LBB1_64
.LBB1_62:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
jmp .LBB1_63
.LBB1_66:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.12, %esi
.LBB1_63:
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $343, %ecx # imm = 0x157
jmp .LBB1_64
.LBB1_68:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
jmp .LBB1_69
.LBB1_71:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.12, %esi
.LBB1_69:
movl $.L.str.5, %edx
movq %rbx, %rdi
movl $350, %ecx # imm = 0x15E
.LBB1_64:
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movl $-1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
subq $32, %rsp
.cfi_adjust_cfa_offset 32
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
addq $32, %rsp
.cfi_adjust_cfa_offset -32
movl $d_Actors, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $d_Streets, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $d_Array_Street_arrays, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $d_Array_Street_offset, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $d_Array_Street_size, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $d_input_actor_id, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $d_jobs, %esi
movl $.L__unnamed_8, %edx
movl $.L__unnamed_8, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $d_randomn, %esi
movl $.L__unnamed_9, %edx
movl $.L__unnamed_9, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type d_Actors,@object # @d_Actors
.local d_Actors
.comm d_Actors,8,8
.type d_Streets,@object # @d_Streets
.local d_Streets
.comm d_Streets,8,8
.type d_Array_Street_arrays,@object # @d_Array_Street_arrays
.local d_Array_Street_arrays
.comm d_Array_Street_arrays,8,8
.type d_Array_Street_offset,@object # @d_Array_Street_offset
.local d_Array_Street_offset
.comm d_Array_Street_offset,8,8
.type d_Array_Street_size,@object # @d_Array_Street_size
.local d_Array_Street_size
.comm d_Array_Street_size,8,8
.type d_input_actor_id,@object # @d_input_actor_id
.local d_input_actor_id
.comm d_input_actor_id,8,8
.type d_jobs,@object # @d_jobs
.local d_jobs
.comm d_jobs,8,8
.type d_randomn,@object # @d_randomn
.local d_randomn
.comm d_randomn,8,8
.type _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_,@object # @_Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.section .rodata,"a",@progbits
.globl _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.p2align 3, 0x0
_Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_:
.quad _Z21__device_stub__kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.size _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_, 8
.type .L.str.5,@object # @.str.5
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.5:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/prg-titech/array2016-paper/master/benchmarks/benchmark_row_nocache.hip"
.size .L.str.5, 128
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "\n\n\nElapsed time millis: %f\n"
.size .L.str.9, 28
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "cudaSafeCall() failed at %s:%i : %s\n"
.size .L.str.10, 37
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "cudaCheckError() failed at %s:%i : %s\n"
.size .L.str.11, 39
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "cudaCheckError() with sync failed at %s:%i : %s\n"
.size .L.str.12, 49
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_"
.size .L__unnamed_1, 60
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "d_Actors"
.size .L__unnamed_2, 9
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "d_Streets"
.size .L__unnamed_3, 10
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "d_Array_Street_arrays"
.size .L__unnamed_4, 22
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "d_Array_Street_offset"
.size .L__unnamed_5, 22
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "d_Array_Street_size"
.size .L__unnamed_6, 20
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "d_input_actor_id"
.size .L__unnamed_7, 17
.type .L__unnamed_8,@object # @7
.L__unnamed_8:
.asciz "d_jobs"
.size .L__unnamed_8, 7
.type .L__unnamed_9,@object # @8
.L__unnamed_9:
.asciz "d_randomn"
.size .L__unnamed_9, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Setting up scenario..."
.size .Lstr, 23
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Scenario set up."
.size .Lstr.1, 17
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Converting data to row format..."
.size .Lstr.2, 33
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Done converting data."
.size .Lstr.3, 22
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "Copying data to GPU..."
.size .Lstr.4, 23
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "Finished copying data."
.size .Lstr.5, 23
.type .Lstr.6,@object # @str.6
.Lstr.6:
.asciz "Launching kernel..."
.size .Lstr.6, 20
.type .Lstr.7,@object # @str.7
.Lstr.7:
.asciz "Kernel finished."
.size .Lstr.7, 17
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym d_Actors
.addrsig_sym d_Streets
.addrsig_sym d_Array_Street_arrays
.addrsig_sym d_Array_Street_offset
.addrsig_sym d_Array_Street_size
.addrsig_sym d_input_actor_id
.addrsig_sym d_jobs
.addrsig_sym d_randomn
.addrsig_sym _Z6kerneliiP12struct_ActorP13struct_StreetPiS3_S3_S3_S3_S3_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#define NUMBER 100
__global__ void demo(int *arr){
int tx=threadIdx.x;
if(tx>NUMBER){
return;
}
arr[tx]+=tx;
__syncthreads();
}
int main(int argc , char **argv){
int * arr;
cudaError_t err;
err=cudaMalloc((void**)&arr,NUMBER*sizeof(int));
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
dim3 dimGrid(1,1);
dim3 dimBlock(512,1);
demo<<<dimGrid,dimBlock>>>(arr);
err=cudaFree(arr);
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
return 0;
} | code for sm_80
Function : _Z4demoPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GT.AND P0, PT, R5, 0x64, PT ; /* 0x000000640500780c */
/* 0x001fda0003f04270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0060*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */
/* 0x000fca00078e0202 */
/*0070*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0080*/ IADD3 R5, R0, R5, RZ ; /* 0x0000000500057210 */
/* 0x004fca0007ffe0ff */
/*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*00a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#define NUMBER 100
__global__ void demo(int *arr){
int tx=threadIdx.x;
if(tx>NUMBER){
return;
}
arr[tx]+=tx;
__syncthreads();
}
int main(int argc , char **argv){
int * arr;
cudaError_t err;
err=cudaMalloc((void**)&arr,NUMBER*sizeof(int));
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
dim3 dimGrid(1,1);
dim3 dimBlock(512,1);
demo<<<dimGrid,dimBlock>>>(arr);
err=cudaFree(arr);
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
return 0;
} | .file "tmpxft_00134e17_00000000-6_misc1_bak2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z23__device_stub__Z4demoPiPi
.type _Z23__device_stub__Z4demoPiPi, @function
_Z23__device_stub__Z4demoPiPi:
.LFB2082:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4demoPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z23__device_stub__Z4demoPiPi, .-_Z23__device_stub__Z4demoPiPi
.globl _Z4demoPi
.type _Z4demoPi, @function
_Z4demoPi:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z4demoPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z4demoPi, .-_Z4demoPi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "CUDA error: %s\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L17
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $512, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L13:
movq 8(%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L19
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.L18:
movq 8(%rsp), %rdi
call _Z23__device_stub__Z4demoPiPi
jmp .L13
.L19:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z4demoPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z4demoPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#define NUMBER 100
__global__ void demo(int *arr){
int tx=threadIdx.x;
if(tx>NUMBER){
return;
}
arr[tx]+=tx;
__syncthreads();
}
int main(int argc , char **argv){
int * arr;
cudaError_t err;
err=cudaMalloc((void**)&arr,NUMBER*sizeof(int));
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
dim3 dimGrid(1,1);
dim3 dimBlock(512,1);
demo<<<dimGrid,dimBlock>>>(arr);
err=cudaFree(arr);
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define NUMBER 100
__global__ void demo(int *arr){
int tx=threadIdx.x;
if(tx>NUMBER){
return;
}
arr[tx]+=tx;
__syncthreads();
}
int main(int argc , char **argv){
int * arr;
hipError_t err;
err=hipMalloc((void**)&arr,NUMBER*sizeof(int));
if( err != hipSuccess)
{
printf("CUDA error: %s\n", hipGetErrorString(err));
exit(-1);
}
dim3 dimGrid(1,1);
dim3 dimBlock(512,1);
demo<<<dimGrid,dimBlock>>>(arr);
err=hipFree(arr);
if( err != hipSuccess)
{
printf("CUDA error: %s\n", hipGetErrorString(err));
exit(-1);
}
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define NUMBER 100
__global__ void demo(int *arr){
int tx=threadIdx.x;
if(tx>NUMBER){
return;
}
arr[tx]+=tx;
__syncthreads();
}
int main(int argc , char **argv){
int * arr;
hipError_t err;
err=hipMalloc((void**)&arr,NUMBER*sizeof(int));
if( err != hipSuccess)
{
printf("CUDA error: %s\n", hipGetErrorString(err));
exit(-1);
}
dim3 dimGrid(1,1);
dim3 dimBlock(512,1);
demo<<<dimGrid,dimBlock>>>(arr);
err=hipFree(arr);
if( err != hipSuccess)
{
printf("CUDA error: %s\n", hipGetErrorString(err));
exit(-1);
}
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4demoPi
.globl _Z4demoPi
.p2align 8
.type _Z4demoPi,@function
_Z4demoPi:
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e32 0x65, v0
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v2, v1, s[0:1]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v0, v2, v0
global_store_b32 v1, v0, s[0:1]
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
.LBB0_2:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4demoPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 3
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4demoPi, .Lfunc_end0-_Z4demoPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4demoPi
.private_segment_fixed_size: 0
.sgpr_count: 3
.sgpr_spill_count: 0
.symbol: _Z4demoPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define NUMBER 100
__global__ void demo(int *arr){
int tx=threadIdx.x;
if(tx>NUMBER){
return;
}
arr[tx]+=tx;
__syncthreads();
}
int main(int argc , char **argv){
int * arr;
hipError_t err;
err=hipMalloc((void**)&arr,NUMBER*sizeof(int));
if( err != hipSuccess)
{
printf("CUDA error: %s\n", hipGetErrorString(err));
exit(-1);
}
dim3 dimGrid(1,1);
dim3 dimBlock(512,1);
demo<<<dimGrid,dimBlock>>>(arr);
err=hipFree(arr);
if( err != hipSuccess)
{
printf("CUDA error: %s\n", hipGetErrorString(err));
exit(-1);
}
return 0;
} | .text
.file "misc1_bak2.hip"
.globl _Z19__device_stub__demoPi # -- Begin function _Z19__device_stub__demoPi
.p2align 4, 0x90
.type _Z19__device_stub__demoPi,@function
_Z19__device_stub__demoPi: # @_Z19__device_stub__demoPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z4demoPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z19__device_stub__demoPi, .Lfunc_end0-_Z19__device_stub__demoPi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
leaq 8(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
jne .LBB1_5
# %bb.1:
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 511(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_3
# %bb.2:
movq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 16(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z4demoPi, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_3:
movq 8(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB1_5
# %bb.4:
xorl %eax, %eax
addq $88, %rsp
.cfi_def_cfa_offset 8
retq
.LBB1_5:
.cfi_def_cfa_offset 96
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $-1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4demoPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4demoPi,@object # @_Z4demoPi
.section .rodata,"a",@progbits
.globl _Z4demoPi
.p2align 3, 0x0
_Z4demoPi:
.quad _Z19__device_stub__demoPi
.size _Z4demoPi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA error: %s\n"
.size .L.str, 16
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4demoPi"
.size .L__unnamed_1, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__demoPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4demoPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z4demoPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GT.AND P0, PT, R5, 0x64, PT ; /* 0x000000640500780c */
/* 0x001fda0003f04270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0060*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */
/* 0x000fca00078e0202 */
/*0070*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0080*/ IADD3 R5, R0, R5, RZ ; /* 0x0000000500057210 */
/* 0x004fca0007ffe0ff */
/*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*00a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4demoPi
.globl _Z4demoPi
.p2align 8
.type _Z4demoPi,@function
_Z4demoPi:
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e32 0x65, v0
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v2, v1, s[0:1]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v0, v2, v0
global_store_b32 v1, v0, s[0:1]
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
.LBB0_2:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4demoPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 3
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4demoPi, .Lfunc_end0-_Z4demoPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4demoPi
.private_segment_fixed_size: 0
.sgpr_count: 3
.sgpr_spill_count: 0
.symbol: _Z4demoPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00134e17_00000000-6_misc1_bak2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z23__device_stub__Z4demoPiPi
.type _Z23__device_stub__Z4demoPiPi, @function
_Z23__device_stub__Z4demoPiPi:
.LFB2082:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4demoPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z23__device_stub__Z4demoPiPi, .-_Z23__device_stub__Z4demoPiPi
.globl _Z4demoPi
.type _Z4demoPi, @function
_Z4demoPi:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z4demoPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z4demoPi, .-_Z4demoPi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "CUDA error: %s\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L17
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $512, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L13:
movq 8(%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L19
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.L18:
movq 8(%rsp), %rdi
call _Z23__device_stub__Z4demoPiPi
jmp .L13
.L19:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z4demoPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z4demoPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "misc1_bak2.hip"
.globl _Z19__device_stub__demoPi # -- Begin function _Z19__device_stub__demoPi
.p2align 4, 0x90
.type _Z19__device_stub__demoPi,@function
_Z19__device_stub__demoPi: # @_Z19__device_stub__demoPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z4demoPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z19__device_stub__demoPi, .Lfunc_end0-_Z19__device_stub__demoPi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
leaq 8(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
jne .LBB1_5
# %bb.1:
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 511(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_3
# %bb.2:
movq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 16(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z4demoPi, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_3:
movq 8(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB1_5
# %bb.4:
xorl %eax, %eax
addq $88, %rsp
.cfi_def_cfa_offset 8
retq
.LBB1_5:
.cfi_def_cfa_offset 96
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $-1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4demoPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4demoPi,@object # @_Z4demoPi
.section .rodata,"a",@progbits
.globl _Z4demoPi
.p2align 3, 0x0
_Z4demoPi:
.quad _Z19__device_stub__demoPi
.size _Z4demoPi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA error: %s\n"
.size .L.str, 16
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4demoPi"
.size .L__unnamed_1, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__demoPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4demoPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <fstream>
#include <cstdlib>
#include <cmath>
#include <stdio.h>
#include <vector>
#include <queue>
#define maxIter 3500
#define BLOCK_SIZE 32
// Learning rate policy
__device__ float step_fn(int t){
float alpha = 0.012, beta = 0.01;
return alpha/(1.0+beta*powf(t,1.5));
}
__global__ void SGD(float* dA, float* dW, float* dH, int* dI, int* dT, int m, int n, int k, float lambda){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<m && j<n){
if(dA[i*n+j]!=0 && dI[i]==j){
dT[i*n+j]++;
float step = step_fn(dT[i*n+j]), At = 0.0;
// Compute inner product of <dWi, dHj>
for(int l=0; l<k; l++){
At += dW[i*k+l]*dH[l*n+j];
}
// SGD update for dWi
for(int l=0; l<k; l++){
dW[i*k+l] -= step*((At-dA[i*n+j])*dH[l*n+j] + lambda*dW[i*k+l]);
}
// Compute inner product of <dWi, dHj>
At = 0.0;
for(int l=0; l<k; l++){
At += dW[i*k+l]*dH[l*n+j];
}
// SGD update for dHj
for(int l=0; l<k; l++){
dH[l*n+j] -= step*((At-dA[i*n+j])*dW[i*k+l] + lambda*dH[l*n+j]);
}
}
}
}
__global__ void matrixMultiplication(float* dAt, float* dW, float* dH, int m, int n, int k){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<m && j<n){
float tmp = 0.0;
for(int l=0; l<k; l++){
tmp += dW[i*k+l] * dH[l*n+j];
}
dAt[i*n+j] = tmp;
}
}
__global__ void computeColErrSum(float* dA, float* dAt, float* dErrSum, int* dNum, int m,int n){
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<n){
float errSum = 0.0;
int num = 0;
for(int i=0; i<m; i++){
if(dA[i*n+j]>0){
errSum += (dA[i*n+j]-dAt[i*n+j])*(dA[i*n+j]-dAt[i*n+j]);
num++;
}
}
dErrSum[j] = errSum;
dNum[j] = num;
}
}
__global__ void computeRMSE(float* dErrSum, int* dNum, int n, int ite){
float errSum = 0.0;
int num = 0;
for(int j=0; j<n; j++){
errSum += dErrSum[j];
num += dNum[j];
}
float RMSE = sqrt(errSum/num);
//printf("Traning RMSE: %f\n", RMSE);
if(ite%100==0) printf("Traning RMSE: %f\n", RMSE);
}
void readData(float* A, int m, int n){
char file_name[50];
sprintf(file_name, "data/data_%d",m);
std::ifstream is(file_name);
if(is.is_open()){
float buf;
for(int i=0; i<m*n; i++){
is >> buf;
A[i] = buf;
}
is.close();
}
}
void initEmbedding(float* W, float* H, int m, int n, int k){
float low = 0.0, high = 1.0/sqrt(k);
for(int i=0; i<m; i++){
for(int j=0; j<k; j++){
W[i*k+j] = (high - low) * rand() / RAND_MAX + low;
}
}
for(int i=0; i<k; i++){
for(int j=0; j<n; j++){
H[i*n+j] = (high - low) * rand() / RAND_MAX + low;
}
}
}
void initQueue(std::vector<std::queue<int> >& userQueue, int m, int n){
for(int j=0; j<n; j++){
int i = rand()%m;
userQueue[i].push(j);
}
}
void initTimes(int* T, int m, int n){
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
T[i*n+j] = 0;
}
}
}
void assignItemToUser(std::vector<std::queue<int> >& userQueue, int* I){
for(int i=0; i<userQueue.size(); i++){
if(userQueue[i].empty()) I[i] = -1;
else{
I[i] = userQueue[i].front();
userQueue[i].pop();
}
}
}
void assignNextItem(std::vector<std::queue<int> >& userQueue, int* I){
for(int i=0; i<userQueue.size(); i++){
int ind = rand()%userQueue.size();
userQueue[ind].push(I[i]);
}
}
void printMatrix(float* A, int m, int n){
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
std::cout << A[i*n+j] << ' ';
}
std::cout << std::endl;
}
}
int main(int argc, char** argv) {
if(argc<3){
std::cout << "Usage: ./mc_sgd m n" << std::endl;
return 0;
}
srand(time(NULL));
int m = atoi(argv[1]), n = atoi(argv[2]), k = 100, ite = 0;
float lambda = 0.05;
float *A, *dA, *W, *dW, *H, *dH, *dAt, *dErrSum;
int *I, *dI, *dNum, *T, *dT;
std::vector<std::queue<int> > userQueue(m);
dim3 numBlocks((m+BLOCK_SIZE-1)/BLOCK_SIZE, (n+BLOCK_SIZE-1)/BLOCK_SIZE);
dim3 numThreads(BLOCK_SIZE, BLOCK_SIZE);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
A = (float*)malloc(sizeof(float)*m*n);
W = (float*)malloc(sizeof(float)*m*k);
H = (float*)malloc(sizeof(float)*k*n);
I = (int*)malloc(sizeof(int)*m);
T = (int*)malloc(sizeof(int)*m*n);
cudaMalloc(&dA, sizeof(float)*m*n);
cudaMalloc(&dW, sizeof(float)*m*k);
cudaMalloc(&dH, sizeof(float)*k*n);
cudaMalloc(&dI, sizeof(int)*m);
cudaMalloc(&dAt, sizeof(float)*m*n);
cudaMalloc(&dErrSum, sizeof(float)*n);
cudaMalloc(&dNum, sizeof(int)*n);
cudaMalloc(&dT, sizeof(int)*m*n);
readData(A,m,n);
initEmbedding(W, H, m, n, k);
initQueue(userQueue,m,n);
initTimes(T,m,n);
cudaMemcpy(dA, A, sizeof(float)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(dW, W, sizeof(float)*m*k, cudaMemcpyHostToDevice);
cudaMemcpy(dH, H, sizeof(float)*k*n, cudaMemcpyHostToDevice);
cudaMemcpy(dT, T, sizeof(int)*m*n, cudaMemcpyHostToDevice);
cudaEventRecord(start);
while(ite<maxIter){
assignItemToUser(userQueue,I);
cudaMemcpy(dI, I, sizeof(int)*m, cudaMemcpyHostToDevice);
SGD<<<numBlocks, numThreads>>>(dA,dW,dH,dI,dT,m,n,k,lambda);
assignNextItem(userQueue,I);
ite++;
cudaDeviceSynchronize();
/*
// Compute RMSE
matrixMultiplication<<<numBlocks, numThreads>>>(dAt,dW,dH,m,n,k);
computeColErrSum<<<(n+BLOCK_SIZE-1)/BLOCK_SIZE,BLOCK_SIZE>>>(dA,dAt,dErrSum,dNum,m,n);
computeRMSE<<<1,1>>>(dErrSum,dNum,n,ite);
cudaDeviceSynchronize();
*/
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "ms" << std::endl;
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
#include <cstdlib>
#include <cmath>
#include <stdio.h>
#include <vector>
#include <queue>
#define maxIter 3500
#define BLOCK_SIZE 32
// Learning rate policy
__device__ float step_fn(int t){
float alpha = 0.012, beta = 0.01;
return alpha/(1.0+beta*powf(t,1.5));
}
__global__ void SGD(float* dA, float* dW, float* dH, int* dI, int* dT, int m, int n, int k, float lambda){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<m && j<n){
if(dA[i*n+j]!=0 && dI[i]==j){
dT[i*n+j]++;
float step = step_fn(dT[i*n+j]), At = 0.0;
// Compute inner product of <dWi, dHj>
for(int l=0; l<k; l++){
At += dW[i*k+l]*dH[l*n+j];
}
// SGD update for dWi
for(int l=0; l<k; l++){
dW[i*k+l] -= step*((At-dA[i*n+j])*dH[l*n+j] + lambda*dW[i*k+l]);
}
// Compute inner product of <dWi, dHj>
At = 0.0;
for(int l=0; l<k; l++){
At += dW[i*k+l]*dH[l*n+j];
}
// SGD update for dHj
for(int l=0; l<k; l++){
dH[l*n+j] -= step*((At-dA[i*n+j])*dW[i*k+l] + lambda*dH[l*n+j]);
}
}
}
}
__global__ void matrixMultiplication(float* dAt, float* dW, float* dH, int m, int n, int k){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<m && j<n){
float tmp = 0.0;
for(int l=0; l<k; l++){
tmp += dW[i*k+l] * dH[l*n+j];
}
dAt[i*n+j] = tmp;
}
}
__global__ void computeColErrSum(float* dA, float* dAt, float* dErrSum, int* dNum, int m,int n){
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<n){
float errSum = 0.0;
int num = 0;
for(int i=0; i<m; i++){
if(dA[i*n+j]>0){
errSum += (dA[i*n+j]-dAt[i*n+j])*(dA[i*n+j]-dAt[i*n+j]);
num++;
}
}
dErrSum[j] = errSum;
dNum[j] = num;
}
}
__global__ void computeRMSE(float* dErrSum, int* dNum, int n, int ite){
float errSum = 0.0;
int num = 0;
for(int j=0; j<n; j++){
errSum += dErrSum[j];
num += dNum[j];
}
float RMSE = sqrt(errSum/num);
//printf("Traning RMSE: %f\n", RMSE);
if(ite%100==0) printf("Traning RMSE: %f\n", RMSE);
}
void readData(float* A, int m, int n){
char file_name[50];
sprintf(file_name, "data/data_%d",m);
std::ifstream is(file_name);
if(is.is_open()){
float buf;
for(int i=0; i<m*n; i++){
is >> buf;
A[i] = buf;
}
is.close();
}
}
void initEmbedding(float* W, float* H, int m, int n, int k){
float low = 0.0, high = 1.0/sqrt(k);
for(int i=0; i<m; i++){
for(int j=0; j<k; j++){
W[i*k+j] = (high - low) * rand() / RAND_MAX + low;
}
}
for(int i=0; i<k; i++){
for(int j=0; j<n; j++){
H[i*n+j] = (high - low) * rand() / RAND_MAX + low;
}
}
}
void initQueue(std::vector<std::queue<int> >& userQueue, int m, int n){
for(int j=0; j<n; j++){
int i = rand()%m;
userQueue[i].push(j);
}
}
void initTimes(int* T, int m, int n){
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
T[i*n+j] = 0;
}
}
}
void assignItemToUser(std::vector<std::queue<int> >& userQueue, int* I){
for(int i=0; i<userQueue.size(); i++){
if(userQueue[i].empty()) I[i] = -1;
else{
I[i] = userQueue[i].front();
userQueue[i].pop();
}
}
}
void assignNextItem(std::vector<std::queue<int> >& userQueue, int* I){
for(int i=0; i<userQueue.size(); i++){
int ind = rand()%userQueue.size();
userQueue[ind].push(I[i]);
}
}
void printMatrix(float* A, int m, int n){
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
std::cout << A[i*n+j] << ' ';
}
std::cout << std::endl;
}
}
int main(int argc, char** argv) {
if(argc<3){
std::cout << "Usage: ./mc_sgd m n" << std::endl;
return 0;
}
srand(time(NULL));
int m = atoi(argv[1]), n = atoi(argv[2]), k = 100, ite = 0;
float lambda = 0.05;
float *A, *dA, *W, *dW, *H, *dH, *dAt, *dErrSum;
int *I, *dI, *dNum, *T, *dT;
std::vector<std::queue<int> > userQueue(m);
dim3 numBlocks((m+BLOCK_SIZE-1)/BLOCK_SIZE, (n+BLOCK_SIZE-1)/BLOCK_SIZE);
dim3 numThreads(BLOCK_SIZE, BLOCK_SIZE);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
A = (float*)malloc(sizeof(float)*m*n);
W = (float*)malloc(sizeof(float)*m*k);
H = (float*)malloc(sizeof(float)*k*n);
I = (int*)malloc(sizeof(int)*m);
T = (int*)malloc(sizeof(int)*m*n);
hipMalloc(&dA, sizeof(float)*m*n);
hipMalloc(&dW, sizeof(float)*m*k);
hipMalloc(&dH, sizeof(float)*k*n);
hipMalloc(&dI, sizeof(int)*m);
hipMalloc(&dAt, sizeof(float)*m*n);
hipMalloc(&dErrSum, sizeof(float)*n);
hipMalloc(&dNum, sizeof(int)*n);
hipMalloc(&dT, sizeof(int)*m*n);
readData(A,m,n);
initEmbedding(W, H, m, n, k);
initQueue(userQueue,m,n);
initTimes(T,m,n);
hipMemcpy(dA, A, sizeof(float)*m*n, hipMemcpyHostToDevice);
hipMemcpy(dW, W, sizeof(float)*m*k, hipMemcpyHostToDevice);
hipMemcpy(dH, H, sizeof(float)*k*n, hipMemcpyHostToDevice);
hipMemcpy(dT, T, sizeof(int)*m*n, hipMemcpyHostToDevice);
hipEventRecord(start);
while(ite<maxIter){
assignItemToUser(userQueue,I);
hipMemcpy(dI, I, sizeof(int)*m, hipMemcpyHostToDevice);
SGD<<<numBlocks, numThreads>>>(dA,dW,dH,dI,dT,m,n,k,lambda);
assignNextItem(userQueue,I);
ite++;
hipDeviceSynchronize();
/*
// Compute RMSE
matrixMultiplication<<<numBlocks, numThreads>>>(dAt,dW,dH,m,n,k);
computeColErrSum<<<(n+BLOCK_SIZE-1)/BLOCK_SIZE,BLOCK_SIZE>>>(dA,dAt,dErrSum,dNum,m,n);
computeRMSE<<<1,1>>>(dErrSum,dNum,n,ite);
cudaDeviceSynchronize();
*/
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "ms" << std::endl;
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | __global__ void calculate_sumterm_part(double2 * Up, double2 * Vpl, const double2 * A_t, const double* SR, const unsigned char* nonzero_midx1234s, const unsigned int N, const unsigned int M, const double SK_factor, const unsigned int NUM_NONZERO, const unsigned int NUM_MODES) {
unsigned int full_thread_idx = threadIdx.x + blockIdx.x*blockDim.x;
// Calculate the index
unsigned int Midx = full_thread_idx / N;
unsigned int Nidx = full_thread_idx % N;
unsigned int NM = N*M;
if (full_thread_idx >= NM) {
return;
}
// Compute the tensors
for (int i = 0; i < NUM_NONZERO; i++) {
unsigned int midx1 = nonzero_midx1234s[i*4]-1;
unsigned int midx2 = nonzero_midx1234s[1+i*4]-1;
unsigned int midx3 = nonzero_midx1234s[2+i*4]-1;
unsigned int midx4 = nonzero_midx1234s[3+i*4]-1;
double a = A_t[Nidx+Midx*N+midx2*NM].x;
double b = A_t[Nidx+Midx*N+midx2*NM].y;
double c = A_t[Nidx+Midx*N+midx3*NM].x;
double d = A_t[Nidx+Midx*N+midx3*NM].y;
double e = A_t[Nidx+Midx*N+midx4*NM].x;
double f = A_t[Nidx+Midx*N+midx4*NM].y;
Up[Nidx+Midx*N+midx1*NM].x = Up[Nidx+Midx*N+midx1*NM].x + SK_factor*SR[i]*(a*c*e-b*d*e+a*d*f+c*b*f);
Up[Nidx+Midx*N+midx1*NM].y = Up[Nidx+Midx*N+midx1*NM].y + SK_factor*SR[i]*(a*d*e+c*b*e-a*c*f+b*d*f);
Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].x = Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].x + SR[i]*(c*e+d*f);
Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].y = Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].y + SR[i]*(d*e-c*f);
}
} | code for sm_80
Function : _Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x188] ; /* 0x0000620000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x198], PT ; /* 0x00006600ff007a0c */
/* 0x000fe20003f05270 */
/*0040*/ UIMAD UR4, UR5, UR4, URZ ; /* 0x00000004050472a4 */
/* 0x000fe2000f8e023f */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0070*/ ISETP.GE.U32.OR P0, PT, R0, UR4, !P0 ; /* 0x0000000400007c0c */
/* 0x000fda000c706470 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ IMAD.MOV.U32 R18, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff127624 */
/* 0x000fe200078e00ff */
/*00a0*/ MOV R19, c[0x0][0x184] ; /* 0x0000610000137a02 */
/* 0x000fe20000000f00 */
/*00b0*/ IMAD.MOV.U32 R26, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff1a7624 */
/* 0x000fe200078e00ff */
/*00c0*/ MOV R27, c[0x0][0x17c] ; /* 0x00005f00001b7a02 */
/* 0x000fe20000000f00 */
/*00d0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fe200078e00ff */
/*00e0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*00f0*/ LDG.E.U8 R3, [R18.64+0x1] ; /* 0x0000010612037981 */
/* 0x000ea8000c1e1100 */
/*0100*/ LDG.E.U8 R4, [R18.64+0x2] ; /* 0x0000020612047981 */
/* 0x001ee8000c1e1100 */
/*0110*/ LDG.E.U8 R6, [R18.64+0x3] ; /* 0x0000030612067981 */
/* 0x000f28000c1e1100 */
/*0120*/ LDG.E.U8 R16, [R18.64] ; /* 0x0000000612107981 */
/* 0x000f62000c1e1100 */
/*0130*/ IADD3 R3, R3, -0x1, RZ ; /* 0xffffffff03037810 */
/* 0x004fc40007ffe0ff */
/*0140*/ IADD3 R5, R4, -0x1, RZ ; /* 0xffffffff04057810 */
/* 0x008fc60007ffe0ff */
/*0150*/ IMAD R17, R3, UR4, RZ ; /* 0x0000000403117c24 */
/* 0x000fe2000f8e02ff */
/*0160*/ HFMA2.MMA R3, -RZ, RZ, 0, 9.5367431640625e-07 ; /* 0x00000010ff037435 */
/* 0x000fe200000001ff */
/*0170*/ IADD3 R7, R6, -0x1, RZ ; /* 0xffffffff06077810 */
/* 0x010fe20007ffe0ff */
/*0180*/ IMAD R4, R5, UR4, R0.reuse ; /* 0x0000000405047c24 */
/* 0x100fe4000f8e0200 */
/*0190*/ IMAD.IADD R28, R0, 0x1, R17 ; /* 0x00000001001c7824 */
/* 0x000fe400078e0211 */
/*01a0*/ IMAD R8, R7, UR4, R0 ; /* 0x0000000407087c24 */
/* 0x000fc8000f8e0200 */
/*01b0*/ IMAD.WIDE.U32 R4, R4, R3, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fc800078e0003 */
/*01c0*/ IMAD.WIDE.U32 R28, R28, R3.reuse, c[0x0][0x170] ; /* 0x00005c001c1c7625 */
/* 0x080fe400078e0003 */
/*01d0*/ LDG.E.128 R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea4000c1e1d00 */
/*01e0*/ IMAD.WIDE.U32 R8, R8, R3, c[0x0][0x170] ; /* 0x00005c0008087625 */
/* 0x000fe400078e0003 */
/*01f0*/ LDG.E.128 R12, [R28.64] ; /* 0x000000061c0c7981 */
/* 0x000ea8000c1e1d00 */
/*0200*/ LDG.E.128 R8, [R8.64] ; /* 0x0000000608087981 */
/* 0x000ee2000c1e1d00 */
/*0210*/ DMUL R20, R14, R6 ; /* 0x000000060e147228 */
/* 0x004ec80000000000 */
/*0220*/ DMUL R22, R12, R4 ; /* 0x000000040c167228 */
/* 0x000fc80000000000 */
/*0230*/ DMUL R24, R8, R20 ; /* 0x0000001408187228 */
/* 0x008e080000000000 */
/*0240*/ DMUL R12, R12, R6 ; /* 0x000000060c0c7228 */
/* 0x000fc80000000000 */
/*0250*/ DFMA R24, R8, R22, -R24 ; /* 0x000000160818722b */
/* 0x001e080000000818 */
/*0260*/ DMUL R14, R14, R4 ; /* 0x000000040e0e7228 */
/* 0x000fc80000000000 */
/*0270*/ DFMA R24, R10, R12, R24 ; /* 0x0000000c0a18722b */
/* 0x001e0c0000000018 */
/*0280*/ DFMA R24, R10, R14, R24 ; /* 0x0000000e0a18722b */
/* 0x001fc80000000018 */
/*0290*/ DMUL R14, R8, R14 ; /* 0x0000000e080e7228 */
/* 0x000e0c0000000000 */
/*02a0*/ DFMA R14, R8, R12, R14 ; /* 0x0000000c080e722b */
/* 0x001a24000000000e */
/*02b0*/ IADD3 R13, R16, -0x1, RZ ; /* 0xffffffff100d7810 */
/* 0x020fc80007ffe0ff */
/*02c0*/ DFMA R22, R10, -R22, R14 ; /* 0x800000160a16722b */
/* 0x001e22000000000e */
/*02d0*/ IMAD R16, R13, UR4, R0 ; /* 0x000000040d107c24 */
/* 0x000fca000f8e0200 */
/*02e0*/ DFMA R20, R10, R20, R22 ; /* 0x000000140a14722b */
/* 0x0011e20000000016 */
/*02f0*/ IMAD.WIDE.U32 R28, R16, R3, c[0x0][0x160] ; /* 0x00005800101c7625 */
/* 0x000fc600078e0003 */
/*0300*/ LDG.E.64 R22, [R26.64] ; /* 0x000000061a167981 */
/* 0x001ea8000c1e1b00 */
/*0310*/ LDG.E.128 R12, [R28.64] ; /* 0x000000061c0c7981 */
/* 0x000ee2000c1e1d00 */
/*0320*/ DMUL R22, R22, c[0x0][0x190] ; /* 0x0000640016167a28 */
/* 0x004ecc0000000000 */
/*0330*/ DFMA R22, R22, R24, R12 ; /* 0x000000181616722b */
/* 0x008e0e000000000c */
/*0340*/ STG.E.64 [R28.64], R22 ; /* 0x000000161c007986 */
/* 0x001fe8000c101b06 */
/*0350*/ LDG.E.64 R12, [R26.64] ; /* 0x000000061a0c7981 */
/* 0x000ea2000c1e1b00 */
/*0360*/ IMAD R16, R17, c[0x0][0x19c], R16 ; /* 0x0000670011107a24 */
/* 0x000fc800078e0210 */
/*0370*/ IMAD.WIDE.U32 R16, R16, R3, c[0x0][0x168] ; /* 0x00005a0010107625 */
/* 0x000fe200078e0003 */
/*0380*/ DMUL R12, R12, c[0x0][0x190] ; /* 0x000064000c0c7a28 */
/* 0x004e0c0000000000 */
/*0390*/ DFMA R20, R12, R20, R14 ; /* 0x000000140c14722b */
/* 0x001e0e000000000e */
/*03a0*/ STG.E.64 [R28.64+0x8], R20 ; /* 0x000008141c007986 */
/* 0x0011e8000c101b06 */
/*03b0*/ LDG.E.128 R12, [R16.64] ; /* 0x00000006100c7981 */
/* 0x000ea8000c1e1d00 */
/*03c0*/ LDG.E.64 R20, [R26.64] ; /* 0x000000061a147981 */
/* 0x001ea2000c1e1b00 */
/*03d0*/ DMUL R24, R6, R10 ; /* 0x0000000a06187228 */
/* 0x000e0c0000000000 */
/*03e0*/ DFMA R24, R4, R8, R24 ; /* 0x000000080418722b */
/* 0x001e8c0000000018 */
/*03f0*/ DFMA R12, R24, R20, R12 ; /* 0x00000014180c722b */
/* 0x004e0e000000000c */
/*0400*/ STG.E.64 [R16.64], R12 ; /* 0x0000000c10007986 */
/* 0x0011e8000c101b06 */
/*0410*/ LDG.E.64 R22, [R26.64] ; /* 0x000000061a167981 */
/* 0x0002a2000c1e1b00 */
/*0420*/ DMUL R4, R4, R10 ; /* 0x0000000a04047228 */
/* 0x000ee20000000000 */
/*0430*/ IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102027810 */
/* 0x000fc80007ffe0ff */
/*0440*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x198], PT ; /* 0x0000660002007a0c */
/* 0x000fe20003f06070 */
/*0450*/ DFMA R4, R6, R8, -R4 ; /* 0x000000080604722b */
/* 0x008ea20000000804 */
/*0460*/ IADD3 R18, P1, R18, 0x4, RZ ; /* 0x0000000412127810 */
/* 0x000fe40007f3e0ff */
/*0470*/ IADD3 R26, P2, R26, 0x8, RZ ; /* 0x000000081a1a7810 */
/* 0x002fe40007f5e0ff */
/*0480*/ IADD3.X R19, RZ, R19, RZ, P1, !PT ; /* 0x00000013ff137210 */
/* 0x000fe40000ffe4ff */
/*0490*/ IADD3.X R27, RZ, R27, RZ, P2, !PT ; /* 0x0000001bff1b7210 */
/* 0x000fe200017fe4ff */
/*04a0*/ DFMA R4, R4, R22, R14 ; /* 0x000000160404722b */
/* 0x004e4e000000000e */
/*04b0*/ STG.E.64 [R16.64+0x8], R4 ; /* 0x0000080410007986 */
/* 0x0021e2000c101b06 */
/*04c0*/ @!P0 BRA 0xf0 ; /* 0xfffffc2000008947 */
/* 0x000fea000383ffff */
/*04d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*04e0*/ BRA 0x4e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0500*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | __global__ void calculate_sumterm_part(double2 * Up, double2 * Vpl, const double2 * A_t, const double* SR, const unsigned char* nonzero_midx1234s, const unsigned int N, const unsigned int M, const double SK_factor, const unsigned int NUM_NONZERO, const unsigned int NUM_MODES) {
unsigned int full_thread_idx = threadIdx.x + blockIdx.x*blockDim.x;
// Calculate the index
unsigned int Midx = full_thread_idx / N;
unsigned int Nidx = full_thread_idx % N;
unsigned int NM = N*M;
if (full_thread_idx >= NM) {
return;
}
// Compute the tensors
for (int i = 0; i < NUM_NONZERO; i++) {
unsigned int midx1 = nonzero_midx1234s[i*4]-1;
unsigned int midx2 = nonzero_midx1234s[1+i*4]-1;
unsigned int midx3 = nonzero_midx1234s[2+i*4]-1;
unsigned int midx4 = nonzero_midx1234s[3+i*4]-1;
double a = A_t[Nidx+Midx*N+midx2*NM].x;
double b = A_t[Nidx+Midx*N+midx2*NM].y;
double c = A_t[Nidx+Midx*N+midx3*NM].x;
double d = A_t[Nidx+Midx*N+midx3*NM].y;
double e = A_t[Nidx+Midx*N+midx4*NM].x;
double f = A_t[Nidx+Midx*N+midx4*NM].y;
Up[Nidx+Midx*N+midx1*NM].x = Up[Nidx+Midx*N+midx1*NM].x + SK_factor*SR[i]*(a*c*e-b*d*e+a*d*f+c*b*f);
Up[Nidx+Midx*N+midx1*NM].y = Up[Nidx+Midx*N+midx1*NM].y + SK_factor*SR[i]*(a*d*e+c*b*e-a*c*f+b*d*f);
Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].x = Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].x + SR[i]*(c*e+d*f);
Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].y = Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].y + SR[i]*(d*e-c*f);
}
} | .file "tmpxft_00069542_00000000-6_calculate_sumterm_part_double.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z67__device_stub__Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjjP7double2S0_PKS_PKdPKhjjdjj
.type _Z67__device_stub__Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjjP7double2S0_PKS_PKdPKhjjdjj, @function
_Z67__device_stub__Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjjP7double2S0_PKS_PKdPKhjjdjj:
.LFB2051:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movq %r8, 24(%rsp)
movl %r9d, 20(%rsp)
movsd %xmm0, 8(%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 24(%rsp), %rax
movq %rax, 160(%rsp)
leaq 20(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
leaq 248(%rsp), %rax
movq %rax, 192(%rsp)
leaq 256(%rsp), %rax
movq %rax, 200(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z67__device_stub__Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjjP7double2S0_PKS_PKdPKhjjdjj, .-_Z67__device_stub__Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjjP7double2S0_PKS_PKdPKhjjdjj
.globl _Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj
.type _Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj, @function
_Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
call _Z67__device_stub__Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjjP7double2S0_PKS_PKdPKhjjdjj
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj, .-_Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __global__ void calculate_sumterm_part(double2 * Up, double2 * Vpl, const double2 * A_t, const double* SR, const unsigned char* nonzero_midx1234s, const unsigned int N, const unsigned int M, const double SK_factor, const unsigned int NUM_NONZERO, const unsigned int NUM_MODES) {
unsigned int full_thread_idx = threadIdx.x + blockIdx.x*blockDim.x;
// Calculate the index
unsigned int Midx = full_thread_idx / N;
unsigned int Nidx = full_thread_idx % N;
unsigned int NM = N*M;
if (full_thread_idx >= NM) {
return;
}
// Compute the tensors
for (int i = 0; i < NUM_NONZERO; i++) {
unsigned int midx1 = nonzero_midx1234s[i*4]-1;
unsigned int midx2 = nonzero_midx1234s[1+i*4]-1;
unsigned int midx3 = nonzero_midx1234s[2+i*4]-1;
unsigned int midx4 = nonzero_midx1234s[3+i*4]-1;
double a = A_t[Nidx+Midx*N+midx2*NM].x;
double b = A_t[Nidx+Midx*N+midx2*NM].y;
double c = A_t[Nidx+Midx*N+midx3*NM].x;
double d = A_t[Nidx+Midx*N+midx3*NM].y;
double e = A_t[Nidx+Midx*N+midx4*NM].x;
double f = A_t[Nidx+Midx*N+midx4*NM].y;
Up[Nidx+Midx*N+midx1*NM].x = Up[Nidx+Midx*N+midx1*NM].x + SK_factor*SR[i]*(a*c*e-b*d*e+a*d*f+c*b*f);
Up[Nidx+Midx*N+midx1*NM].y = Up[Nidx+Midx*N+midx1*NM].y + SK_factor*SR[i]*(a*d*e+c*b*e-a*c*f+b*d*f);
Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].x = Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].x + SR[i]*(c*e+d*f);
Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].y = Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].y + SR[i]*(d*e-c*f);
}
} | #include <hip/hip_runtime.h>
__global__ void calculate_sumterm_part(double2 * Up, double2 * Vpl, const double2 * A_t, const double* SR, const unsigned char* nonzero_midx1234s, const unsigned int N, const unsigned int M, const double SK_factor, const unsigned int NUM_NONZERO, const unsigned int NUM_MODES) {
unsigned int full_thread_idx = threadIdx.x + blockIdx.x*blockDim.x;
// Calculate the index
unsigned int Midx = full_thread_idx / N;
unsigned int Nidx = full_thread_idx % N;
unsigned int NM = N*M;
if (full_thread_idx >= NM) {
return;
}
// Compute the tensors
for (int i = 0; i < NUM_NONZERO; i++) {
unsigned int midx1 = nonzero_midx1234s[i*4]-1;
unsigned int midx2 = nonzero_midx1234s[1+i*4]-1;
unsigned int midx3 = nonzero_midx1234s[2+i*4]-1;
unsigned int midx4 = nonzero_midx1234s[3+i*4]-1;
double a = A_t[Nidx+Midx*N+midx2*NM].x;
double b = A_t[Nidx+Midx*N+midx2*NM].y;
double c = A_t[Nidx+Midx*N+midx3*NM].x;
double d = A_t[Nidx+Midx*N+midx3*NM].y;
double e = A_t[Nidx+Midx*N+midx4*NM].x;
double f = A_t[Nidx+Midx*N+midx4*NM].y;
Up[Nidx+Midx*N+midx1*NM].x = Up[Nidx+Midx*N+midx1*NM].x + SK_factor*SR[i]*(a*c*e-b*d*e+a*d*f+c*b*f);
Up[Nidx+Midx*N+midx1*NM].y = Up[Nidx+Midx*N+midx1*NM].y + SK_factor*SR[i]*(a*d*e+c*b*e-a*c*f+b*d*f);
Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].x = Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].x + SR[i]*(c*e+d*f);
Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].y = Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].y + SR[i]*(d*e-c*f);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__ void calculate_sumterm_part(double2 * Up, double2 * Vpl, const double2 * A_t, const double* SR, const unsigned char* nonzero_midx1234s, const unsigned int N, const unsigned int M, const double SK_factor, const unsigned int NUM_NONZERO, const unsigned int NUM_MODES) {
unsigned int full_thread_idx = threadIdx.x + blockIdx.x*blockDim.x;
// Calculate the index
unsigned int Midx = full_thread_idx / N;
unsigned int Nidx = full_thread_idx % N;
unsigned int NM = N*M;
if (full_thread_idx >= NM) {
return;
}
// Compute the tensors
for (int i = 0; i < NUM_NONZERO; i++) {
unsigned int midx1 = nonzero_midx1234s[i*4]-1;
unsigned int midx2 = nonzero_midx1234s[1+i*4]-1;
unsigned int midx3 = nonzero_midx1234s[2+i*4]-1;
unsigned int midx4 = nonzero_midx1234s[3+i*4]-1;
double a = A_t[Nidx+Midx*N+midx2*NM].x;
double b = A_t[Nidx+Midx*N+midx2*NM].y;
double c = A_t[Nidx+Midx*N+midx3*NM].x;
double d = A_t[Nidx+Midx*N+midx3*NM].y;
double e = A_t[Nidx+Midx*N+midx4*NM].x;
double f = A_t[Nidx+Midx*N+midx4*NM].y;
Up[Nidx+Midx*N+midx1*NM].x = Up[Nidx+Midx*N+midx1*NM].x + SK_factor*SR[i]*(a*c*e-b*d*e+a*d*f+c*b*f);
Up[Nidx+Midx*N+midx1*NM].y = Up[Nidx+Midx*N+midx1*NM].y + SK_factor*SR[i]*(a*d*e+c*b*e-a*c*f+b*d*f);
Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].x = Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].x + SR[i]*(c*e+d*f);
Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].y = Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].y + SR[i]*(d*e-c*f);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.globl _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.p2align 8
.type _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj,@function
_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj:
s_clause 0x2
s_load_b32 s4, s[0:1], 0x4c
s_load_b64 s[2:3], s[0:1], 0x28
s_load_b32 s12, s[0:1], 0x38
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_mul_i32 s13, s3, s2
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_cmp_lg_u32 s12, 0
s_cselect_b32 s2, -1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_u32_e32 vcc_lo, s13, v1
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_3
s_clause 0x3
s_load_b64 s[16:17], s[0:1], 0x20
s_load_b256 s[4:11], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x30
s_load_b32 s14, s[0:1], 0x3c
v_mov_b32_e32 v0, 0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s16, 1
s_addc_u32 s1, s17, 0
.LBB0_2:
s_clause 0x3
global_load_u8 v2, v0, s[0:1]
global_load_u8 v14, v0, s[0:1] offset:-1
global_load_u8 v3, v0, s[0:1] offset:1
global_load_u8 v10, v0, s[0:1] offset:2
s_add_i32 s12, s12, -1
s_add_u32 s0, s0, 4
s_addc_u32 s1, s1, 0
s_waitcnt vmcnt(3)
v_add_nc_u32_e32 v2, -1, v2
s_waitcnt vmcnt(2)
v_add_nc_u32_e32 v14, -1, v14
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v30, v2, s13
v_dual_mov_b32 v18, 0 :: v_dual_add_nc_u32 v17, v30, v1
s_waitcnt vmcnt(1)
v_add_nc_u32_e32 v4, -1, v3
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v12, -1, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[2:3], null, v4, s13, v[1:2]
v_mov_b32_e32 v3, v18
v_lshlrev_b64 v[4:5], 4, v[17:18]
v_lshlrev_b64 v[2:3], 4, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v6, vcc_lo, s8, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, s8, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v5, vcc_lo
s_clause 0x1
global_load_b128 v[2:5], v[2:3], off
global_load_b128 v[6:9], v[6:7], off
s_waitcnt vmcnt(1)
v_mad_u64_u32 v[10:11], null, v12, s13, v[1:2]
v_mov_b32_e32 v11, v18
v_mad_u64_u32 v[21:22], null, v14, s13, v[1:2]
v_mov_b32_e32 v22, v18
s_waitcnt vmcnt(0)
v_mul_f64 v[24:25], v[4:5], v[8:9]
v_mul_f64 v[26:27], v[2:3], v[6:7]
v_lshlrev_b64 v[10:11], 4, v[10:11]
v_mul_f64 v[2:3], v[2:3], v[8:9]
v_mul_f64 v[4:5], v[4:5], v[6:7]
v_lshlrev_b64 v[14:15], 4, v[21:22]
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v10, vcc_lo, s8, v10
v_add_co_ci_u32_e32 v11, vcc_lo, s9, v11, vcc_lo
v_add_co_u32 v22, vcc_lo, s4, v14
global_load_b128 v[10:13], v[10:11], off
global_load_b64 v[19:20], v0, s[10:11]
v_add_co_ci_u32_e32 v23, vcc_lo, s5, v15, vcc_lo
global_load_b128 v[14:17], v[22:23], off
s_waitcnt vmcnt(2)
v_mul_f64 v[28:29], v[24:25], v[10:11]
s_waitcnt vmcnt(1)
v_mul_f64 v[19:20], v[19:20], s[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[28:29], v[26:27], v[10:11], -v[28:29]
v_fma_f64 v[28:29], v[2:3], v[12:13], v[28:29]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_fma_f64 v[28:29], v[4:5], v[12:13], v[28:29]
v_mul_f64 v[4:5], v[4:5], v[10:11]
s_waitcnt vmcnt(0)
v_fma_f64 v[14:15], v[19:20], v[28:29], v[14:15]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_fma_f64 v[2:3], v[2:3], v[10:11], v[4:5]
global_store_b64 v[22:23], v[14:15], off
global_load_b64 v[14:15], v0, s[10:11]
v_fma_f64 v[2:3], -v[26:27], v[12:13], v[2:3]
v_fma_f64 v[2:3], v[24:25], v[12:13], v[2:3]
s_waitcnt vmcnt(0)
v_mul_f64 v[4:5], v[14:15], s[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_fma_f64 v[2:3], v[2:3], v[4:5], v[16:17]
v_mad_u64_u32 v[4:5], null, v30, s14, v[21:22]
v_mov_b32_e32 v5, v18
v_mul_f64 v[18:19], v[8:9], v[12:13]
v_lshlrev_b64 v[4:5], 4, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v14, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v15, vcc_lo, s7, v5, vcc_lo
global_store_b64 v[22:23], v[2:3], off offset:8
global_load_b128 v[2:5], v[14:15], off
global_load_b64 v[16:17], v0, s[10:11]
v_fma_f64 v[18:19], v[6:7], v[10:11], v[18:19]
v_mul_f64 v[6:7], v[6:7], v[12:13]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_fma_f64 v[6:7], v[8:9], v[10:11], -v[6:7]
s_waitcnt vmcnt(0)
v_fma_f64 v[2:3], v[18:19], v[16:17], v[2:3]
global_store_b64 v[14:15], v[2:3], off
global_load_b64 v[2:3], v0, s[10:11]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s12, 0
s_waitcnt vmcnt(0)
v_fma_f64 v[2:3], v[6:7], v[2:3], v[4:5]
global_store_b64 v[14:15], v[2:3], off offset:8
s_cbranch_scc1 .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 320
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 31
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj, .Lfunc_end0-_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 44
.size: 4
.value_kind: by_value
- .offset: 48
.size: 8
.value_kind: by_value
- .offset: 56
.size: 4
.value_kind: by_value
- .offset: 60
.size: 4
.value_kind: by_value
- .offset: 64
.size: 4
.value_kind: hidden_block_count_x
- .offset: 68
.size: 4
.value_kind: hidden_block_count_y
- .offset: 72
.size: 4
.value_kind: hidden_block_count_z
- .offset: 76
.size: 2
.value_kind: hidden_group_size_x
- .offset: 78
.size: 2
.value_kind: hidden_group_size_y
- .offset: 80
.size: 2
.value_kind: hidden_group_size_z
- .offset: 82
.size: 2
.value_kind: hidden_remainder_x
- .offset: 84
.size: 2
.value_kind: hidden_remainder_y
- .offset: 86
.size: 2
.value_kind: hidden_remainder_z
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 128
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 320
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 31
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__ void calculate_sumterm_part(double2 * Up, double2 * Vpl, const double2 * A_t, const double* SR, const unsigned char* nonzero_midx1234s, const unsigned int N, const unsigned int M, const double SK_factor, const unsigned int NUM_NONZERO, const unsigned int NUM_MODES) {
unsigned int full_thread_idx = threadIdx.x + blockIdx.x*blockDim.x;
// Calculate the index
unsigned int Midx = full_thread_idx / N;
unsigned int Nidx = full_thread_idx % N;
unsigned int NM = N*M;
if (full_thread_idx >= NM) {
return;
}
// Compute the tensors
for (int i = 0; i < NUM_NONZERO; i++) {
unsigned int midx1 = nonzero_midx1234s[i*4]-1;
unsigned int midx2 = nonzero_midx1234s[1+i*4]-1;
unsigned int midx3 = nonzero_midx1234s[2+i*4]-1;
unsigned int midx4 = nonzero_midx1234s[3+i*4]-1;
double a = A_t[Nidx+Midx*N+midx2*NM].x;
double b = A_t[Nidx+Midx*N+midx2*NM].y;
double c = A_t[Nidx+Midx*N+midx3*NM].x;
double d = A_t[Nidx+Midx*N+midx3*NM].y;
double e = A_t[Nidx+Midx*N+midx4*NM].x;
double f = A_t[Nidx+Midx*N+midx4*NM].y;
Up[Nidx+Midx*N+midx1*NM].x = Up[Nidx+Midx*N+midx1*NM].x + SK_factor*SR[i]*(a*c*e-b*d*e+a*d*f+c*b*f);
Up[Nidx+Midx*N+midx1*NM].y = Up[Nidx+Midx*N+midx1*NM].y + SK_factor*SR[i]*(a*d*e+c*b*e-a*c*f+b*d*f);
Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].x = Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].x + SR[i]*(c*e+d*f);
Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].y = Vpl[Nidx+Midx*N+midx1*NM+midx2*NM*NUM_MODES].y + SR[i]*(d*e-c*f);
}
} | .text
.file "calculate_sumterm_part_double.hip"
.globl _Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj # -- Begin function _Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.p2align 4, 0x90
.type _Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj,@function
_Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj: # @_Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.cfi_startproc
# %bb.0:
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
movsd %xmm0, 64(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rax
movq %rax, 168(%rsp)
leaq 216(%rsp), %rax
movq %rax, 176(%rsp)
leaq 224(%rsp), %rax
movq %rax, 184(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $216, %rsp
.cfi_adjust_cfa_offset -216
retq
.Lfunc_end0:
.size _Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj, .Lfunc_end0-_Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj,@object # @_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.section .rodata,"a",@progbits
.globl _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.p2align 3, 0x0
_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj:
.quad _Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.size _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj"
.size .L__unnamed_1, 71
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x188] ; /* 0x0000620000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x198], PT ; /* 0x00006600ff007a0c */
/* 0x000fe20003f05270 */
/*0040*/ UIMAD UR4, UR5, UR4, URZ ; /* 0x00000004050472a4 */
/* 0x000fe2000f8e023f */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0070*/ ISETP.GE.U32.OR P0, PT, R0, UR4, !P0 ; /* 0x0000000400007c0c */
/* 0x000fda000c706470 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ IMAD.MOV.U32 R18, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff127624 */
/* 0x000fe200078e00ff */
/*00a0*/ MOV R19, c[0x0][0x184] ; /* 0x0000610000137a02 */
/* 0x000fe20000000f00 */
/*00b0*/ IMAD.MOV.U32 R26, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff1a7624 */
/* 0x000fe200078e00ff */
/*00c0*/ MOV R27, c[0x0][0x17c] ; /* 0x00005f00001b7a02 */
/* 0x000fe20000000f00 */
/*00d0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fe200078e00ff */
/*00e0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*00f0*/ LDG.E.U8 R3, [R18.64+0x1] ; /* 0x0000010612037981 */
/* 0x000ea8000c1e1100 */
/*0100*/ LDG.E.U8 R4, [R18.64+0x2] ; /* 0x0000020612047981 */
/* 0x001ee8000c1e1100 */
/*0110*/ LDG.E.U8 R6, [R18.64+0x3] ; /* 0x0000030612067981 */
/* 0x000f28000c1e1100 */
/*0120*/ LDG.E.U8 R16, [R18.64] ; /* 0x0000000612107981 */
/* 0x000f62000c1e1100 */
/*0130*/ IADD3 R3, R3, -0x1, RZ ; /* 0xffffffff03037810 */
/* 0x004fc40007ffe0ff */
/*0140*/ IADD3 R5, R4, -0x1, RZ ; /* 0xffffffff04057810 */
/* 0x008fc60007ffe0ff */
/*0150*/ IMAD R17, R3, UR4, RZ ; /* 0x0000000403117c24 */
/* 0x000fe2000f8e02ff */
/*0160*/ HFMA2.MMA R3, -RZ, RZ, 0, 9.5367431640625e-07 ; /* 0x00000010ff037435 */
/* 0x000fe200000001ff */
/*0170*/ IADD3 R7, R6, -0x1, RZ ; /* 0xffffffff06077810 */
/* 0x010fe20007ffe0ff */
/*0180*/ IMAD R4, R5, UR4, R0.reuse ; /* 0x0000000405047c24 */
/* 0x100fe4000f8e0200 */
/*0190*/ IMAD.IADD R28, R0, 0x1, R17 ; /* 0x00000001001c7824 */
/* 0x000fe400078e0211 */
/*01a0*/ IMAD R8, R7, UR4, R0 ; /* 0x0000000407087c24 */
/* 0x000fc8000f8e0200 */
/*01b0*/ IMAD.WIDE.U32 R4, R4, R3, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fc800078e0003 */
/*01c0*/ IMAD.WIDE.U32 R28, R28, R3.reuse, c[0x0][0x170] ; /* 0x00005c001c1c7625 */
/* 0x080fe400078e0003 */
/*01d0*/ LDG.E.128 R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea4000c1e1d00 */
/*01e0*/ IMAD.WIDE.U32 R8, R8, R3, c[0x0][0x170] ; /* 0x00005c0008087625 */
/* 0x000fe400078e0003 */
/*01f0*/ LDG.E.128 R12, [R28.64] ; /* 0x000000061c0c7981 */
/* 0x000ea8000c1e1d00 */
/*0200*/ LDG.E.128 R8, [R8.64] ; /* 0x0000000608087981 */
/* 0x000ee2000c1e1d00 */
/*0210*/ DMUL R20, R14, R6 ; /* 0x000000060e147228 */
/* 0x004ec80000000000 */
/*0220*/ DMUL R22, R12, R4 ; /* 0x000000040c167228 */
/* 0x000fc80000000000 */
/*0230*/ DMUL R24, R8, R20 ; /* 0x0000001408187228 */
/* 0x008e080000000000 */
/*0240*/ DMUL R12, R12, R6 ; /* 0x000000060c0c7228 */
/* 0x000fc80000000000 */
/*0250*/ DFMA R24, R8, R22, -R24 ; /* 0x000000160818722b */
/* 0x001e080000000818 */
/*0260*/ DMUL R14, R14, R4 ; /* 0x000000040e0e7228 */
/* 0x000fc80000000000 */
/*0270*/ DFMA R24, R10, R12, R24 ; /* 0x0000000c0a18722b */
/* 0x001e0c0000000018 */
/*0280*/ DFMA R24, R10, R14, R24 ; /* 0x0000000e0a18722b */
/* 0x001fc80000000018 */
/*0290*/ DMUL R14, R8, R14 ; /* 0x0000000e080e7228 */
/* 0x000e0c0000000000 */
/*02a0*/ DFMA R14, R8, R12, R14 ; /* 0x0000000c080e722b */
/* 0x001a24000000000e */
/*02b0*/ IADD3 R13, R16, -0x1, RZ ; /* 0xffffffff100d7810 */
/* 0x020fc80007ffe0ff */
/*02c0*/ DFMA R22, R10, -R22, R14 ; /* 0x800000160a16722b */
/* 0x001e22000000000e */
/*02d0*/ IMAD R16, R13, UR4, R0 ; /* 0x000000040d107c24 */
/* 0x000fca000f8e0200 */
/*02e0*/ DFMA R20, R10, R20, R22 ; /* 0x000000140a14722b */
/* 0x0011e20000000016 */
/*02f0*/ IMAD.WIDE.U32 R28, R16, R3, c[0x0][0x160] ; /* 0x00005800101c7625 */
/* 0x000fc600078e0003 */
/*0300*/ LDG.E.64 R22, [R26.64] ; /* 0x000000061a167981 */
/* 0x001ea8000c1e1b00 */
/*0310*/ LDG.E.128 R12, [R28.64] ; /* 0x000000061c0c7981 */
/* 0x000ee2000c1e1d00 */
/*0320*/ DMUL R22, R22, c[0x0][0x190] ; /* 0x0000640016167a28 */
/* 0x004ecc0000000000 */
/*0330*/ DFMA R22, R22, R24, R12 ; /* 0x000000181616722b */
/* 0x008e0e000000000c */
/*0340*/ STG.E.64 [R28.64], R22 ; /* 0x000000161c007986 */
/* 0x001fe8000c101b06 */
/*0350*/ LDG.E.64 R12, [R26.64] ; /* 0x000000061a0c7981 */
/* 0x000ea2000c1e1b00 */
/*0360*/ IMAD R16, R17, c[0x0][0x19c], R16 ; /* 0x0000670011107a24 */
/* 0x000fc800078e0210 */
/*0370*/ IMAD.WIDE.U32 R16, R16, R3, c[0x0][0x168] ; /* 0x00005a0010107625 */
/* 0x000fe200078e0003 */
/*0380*/ DMUL R12, R12, c[0x0][0x190] ; /* 0x000064000c0c7a28 */
/* 0x004e0c0000000000 */
/*0390*/ DFMA R20, R12, R20, R14 ; /* 0x000000140c14722b */
/* 0x001e0e000000000e */
/*03a0*/ STG.E.64 [R28.64+0x8], R20 ; /* 0x000008141c007986 */
/* 0x0011e8000c101b06 */
/*03b0*/ LDG.E.128 R12, [R16.64] ; /* 0x00000006100c7981 */
/* 0x000ea8000c1e1d00 */
/*03c0*/ LDG.E.64 R20, [R26.64] ; /* 0x000000061a147981 */
/* 0x001ea2000c1e1b00 */
/*03d0*/ DMUL R24, R6, R10 ; /* 0x0000000a06187228 */
/* 0x000e0c0000000000 */
/*03e0*/ DFMA R24, R4, R8, R24 ; /* 0x000000080418722b */
/* 0x001e8c0000000018 */
/*03f0*/ DFMA R12, R24, R20, R12 ; /* 0x00000014180c722b */
/* 0x004e0e000000000c */
/*0400*/ STG.E.64 [R16.64], R12 ; /* 0x0000000c10007986 */
/* 0x0011e8000c101b06 */
/*0410*/ LDG.E.64 R22, [R26.64] ; /* 0x000000061a167981 */
/* 0x0002a2000c1e1b00 */
/*0420*/ DMUL R4, R4, R10 ; /* 0x0000000a04047228 */
/* 0x000ee20000000000 */
/*0430*/ IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102027810 */
/* 0x000fc80007ffe0ff */
/*0440*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x198], PT ; /* 0x0000660002007a0c */
/* 0x000fe20003f06070 */
/*0450*/ DFMA R4, R6, R8, -R4 ; /* 0x000000080604722b */
/* 0x008ea20000000804 */
/*0460*/ IADD3 R18, P1, R18, 0x4, RZ ; /* 0x0000000412127810 */
/* 0x000fe40007f3e0ff */
/*0470*/ IADD3 R26, P2, R26, 0x8, RZ ; /* 0x000000081a1a7810 */
/* 0x002fe40007f5e0ff */
/*0480*/ IADD3.X R19, RZ, R19, RZ, P1, !PT ; /* 0x00000013ff137210 */
/* 0x000fe40000ffe4ff */
/*0490*/ IADD3.X R27, RZ, R27, RZ, P2, !PT ; /* 0x0000001bff1b7210 */
/* 0x000fe200017fe4ff */
/*04a0*/ DFMA R4, R4, R22, R14 ; /* 0x000000160404722b */
/* 0x004e4e000000000e */
/*04b0*/ STG.E.64 [R16.64+0x8], R4 ; /* 0x0000080410007986 */
/* 0x0021e2000c101b06 */
/*04c0*/ @!P0 BRA 0xf0 ; /* 0xfffffc2000008947 */
/* 0x000fea000383ffff */
/*04d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*04e0*/ BRA 0x4e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0500*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.globl _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.p2align 8
.type _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj,@function
_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj:
s_clause 0x2
s_load_b32 s4, s[0:1], 0x4c
s_load_b64 s[2:3], s[0:1], 0x28
s_load_b32 s12, s[0:1], 0x38
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_mul_i32 s13, s3, s2
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_cmp_lg_u32 s12, 0
s_cselect_b32 s2, -1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_u32_e32 vcc_lo, s13, v1
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_3
s_clause 0x3
s_load_b64 s[16:17], s[0:1], 0x20
s_load_b256 s[4:11], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x30
s_load_b32 s14, s[0:1], 0x3c
v_mov_b32_e32 v0, 0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s16, 1
s_addc_u32 s1, s17, 0
.LBB0_2:
s_clause 0x3
global_load_u8 v2, v0, s[0:1]
global_load_u8 v14, v0, s[0:1] offset:-1
global_load_u8 v3, v0, s[0:1] offset:1
global_load_u8 v10, v0, s[0:1] offset:2
s_add_i32 s12, s12, -1
s_add_u32 s0, s0, 4
s_addc_u32 s1, s1, 0
s_waitcnt vmcnt(3)
v_add_nc_u32_e32 v2, -1, v2
s_waitcnt vmcnt(2)
v_add_nc_u32_e32 v14, -1, v14
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v30, v2, s13
v_dual_mov_b32 v18, 0 :: v_dual_add_nc_u32 v17, v30, v1
s_waitcnt vmcnt(1)
v_add_nc_u32_e32 v4, -1, v3
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v12, -1, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[2:3], null, v4, s13, v[1:2]
v_mov_b32_e32 v3, v18
v_lshlrev_b64 v[4:5], 4, v[17:18]
v_lshlrev_b64 v[2:3], 4, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v6, vcc_lo, s8, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, s8, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v5, vcc_lo
s_clause 0x1
global_load_b128 v[2:5], v[2:3], off
global_load_b128 v[6:9], v[6:7], off
s_waitcnt vmcnt(1)
v_mad_u64_u32 v[10:11], null, v12, s13, v[1:2]
v_mov_b32_e32 v11, v18
v_mad_u64_u32 v[21:22], null, v14, s13, v[1:2]
v_mov_b32_e32 v22, v18
s_waitcnt vmcnt(0)
v_mul_f64 v[24:25], v[4:5], v[8:9]
v_mul_f64 v[26:27], v[2:3], v[6:7]
v_lshlrev_b64 v[10:11], 4, v[10:11]
v_mul_f64 v[2:3], v[2:3], v[8:9]
v_mul_f64 v[4:5], v[4:5], v[6:7]
v_lshlrev_b64 v[14:15], 4, v[21:22]
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v10, vcc_lo, s8, v10
v_add_co_ci_u32_e32 v11, vcc_lo, s9, v11, vcc_lo
v_add_co_u32 v22, vcc_lo, s4, v14
global_load_b128 v[10:13], v[10:11], off
global_load_b64 v[19:20], v0, s[10:11]
v_add_co_ci_u32_e32 v23, vcc_lo, s5, v15, vcc_lo
global_load_b128 v[14:17], v[22:23], off
s_waitcnt vmcnt(2)
v_mul_f64 v[28:29], v[24:25], v[10:11]
s_waitcnt vmcnt(1)
v_mul_f64 v[19:20], v[19:20], s[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[28:29], v[26:27], v[10:11], -v[28:29]
v_fma_f64 v[28:29], v[2:3], v[12:13], v[28:29]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_fma_f64 v[28:29], v[4:5], v[12:13], v[28:29]
v_mul_f64 v[4:5], v[4:5], v[10:11]
s_waitcnt vmcnt(0)
v_fma_f64 v[14:15], v[19:20], v[28:29], v[14:15]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_fma_f64 v[2:3], v[2:3], v[10:11], v[4:5]
global_store_b64 v[22:23], v[14:15], off
global_load_b64 v[14:15], v0, s[10:11]
v_fma_f64 v[2:3], -v[26:27], v[12:13], v[2:3]
v_fma_f64 v[2:3], v[24:25], v[12:13], v[2:3]
s_waitcnt vmcnt(0)
v_mul_f64 v[4:5], v[14:15], s[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_fma_f64 v[2:3], v[2:3], v[4:5], v[16:17]
v_mad_u64_u32 v[4:5], null, v30, s14, v[21:22]
v_mov_b32_e32 v5, v18
v_mul_f64 v[18:19], v[8:9], v[12:13]
v_lshlrev_b64 v[4:5], 4, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v14, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v15, vcc_lo, s7, v5, vcc_lo
global_store_b64 v[22:23], v[2:3], off offset:8
global_load_b128 v[2:5], v[14:15], off
global_load_b64 v[16:17], v0, s[10:11]
v_fma_f64 v[18:19], v[6:7], v[10:11], v[18:19]
v_mul_f64 v[6:7], v[6:7], v[12:13]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_fma_f64 v[6:7], v[8:9], v[10:11], -v[6:7]
s_waitcnt vmcnt(0)
v_fma_f64 v[2:3], v[18:19], v[16:17], v[2:3]
global_store_b64 v[14:15], v[2:3], off
global_load_b64 v[2:3], v0, s[10:11]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s12, 0
s_waitcnt vmcnt(0)
v_fma_f64 v[2:3], v[6:7], v[2:3], v[4:5]
global_store_b64 v[14:15], v[2:3], off offset:8
s_cbranch_scc1 .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 320
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 31
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj, .Lfunc_end0-_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 44
.size: 4
.value_kind: by_value
- .offset: 48
.size: 8
.value_kind: by_value
- .offset: 56
.size: 4
.value_kind: by_value
- .offset: 60
.size: 4
.value_kind: by_value
- .offset: 64
.size: 4
.value_kind: hidden_block_count_x
- .offset: 68
.size: 4
.value_kind: hidden_block_count_y
- .offset: 72
.size: 4
.value_kind: hidden_block_count_z
- .offset: 76
.size: 2
.value_kind: hidden_group_size_x
- .offset: 78
.size: 2
.value_kind: hidden_group_size_y
- .offset: 80
.size: 2
.value_kind: hidden_group_size_z
- .offset: 82
.size: 2
.value_kind: hidden_remainder_x
- .offset: 84
.size: 2
.value_kind: hidden_remainder_y
- .offset: 86
.size: 2
.value_kind: hidden_remainder_z
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 128
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 320
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 31
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00069542_00000000-6_calculate_sumterm_part_double.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z67__device_stub__Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjjP7double2S0_PKS_PKdPKhjjdjj
.type _Z67__device_stub__Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjjP7double2S0_PKS_PKdPKhjjdjj, @function
_Z67__device_stub__Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjjP7double2S0_PKS_PKdPKhjjdjj:
.LFB2051:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movq %r8, 24(%rsp)
movl %r9d, 20(%rsp)
movsd %xmm0, 8(%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 24(%rsp), %rax
movq %rax, 160(%rsp)
leaq 20(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
leaq 248(%rsp), %rax
movq %rax, 192(%rsp)
leaq 256(%rsp), %rax
movq %rax, 200(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z67__device_stub__Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjjP7double2S0_PKS_PKdPKhjjdjj, .-_Z67__device_stub__Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjjP7double2S0_PKS_PKdPKhjjdjj
.globl _Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj
.type _Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj, @function
_Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
call _Z67__device_stub__Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjjP7double2S0_PKS_PKdPKhjjdjj
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj, .-_Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z22calculate_sumterm_partP7double2S0_PKS_PKdPKhjjdjj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "calculate_sumterm_part_double.hip"
.globl _Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj # -- Begin function _Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.p2align 4, 0x90
.type _Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj,@function
_Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj: # @_Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.cfi_startproc
# %bb.0:
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
movsd %xmm0, 64(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rax
movq %rax, 168(%rsp)
leaq 216(%rsp), %rax
movq %rax, 176(%rsp)
leaq 224(%rsp), %rax
movq %rax, 184(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $216, %rsp
.cfi_adjust_cfa_offset -216
retq
.Lfunc_end0:
.size _Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj, .Lfunc_end0-_Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj,@object # @_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.section .rodata,"a",@progbits
.globl _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.p2align 3, 0x0
_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj:
.quad _Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.size _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj"
.size .L__unnamed_1, 71
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z37__device_stub__calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z22calculate_sumterm_partP15HIP_vector_typeIdLj2EES1_PKS0_PKdPKhjjdjj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "stdio.h"
__global__ void add(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
#define N 512
int main(void ){
int *a,*b,*c; // host copies of a, b and c
int *dev_a,*dev_b, *dev_c; // device copies of a, b and c
int size = N * sizeof(int); // we need space for an integer
//allocate device copies of a, b , c
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
//random_ints(a,N);
//random_ints(b,N);
for (int i= 0; i<N ; i++){
a[i]=i;
b[i]=i*2;
}
//copy inputs to device (GPU)
cudaMemcpy(dev_a, a, size , cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
// launch add() kernel on GPU, passing parameters
add<<< 1 , N >>> (dev_a,dev_b,dev_c);
//copy device result back to host copy of c
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
for(int i =0; i<N; i++){
printf("The value of the %d plus %d is : %d\n", a[i], b[i], c[i]);
}
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | code for sm_80
Function : _Z3addPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0007 */
/*0050*/ IMAD.WIDE.U32 R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0007 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0007 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "stdio.h"
__global__ void add(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
#define N 512
int main(void ){
int *a,*b,*c; // host copies of a, b and c
int *dev_a,*dev_b, *dev_c; // device copies of a, b and c
int size = N * sizeof(int); // we need space for an integer
//allocate device copies of a, b , c
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
//random_ints(a,N);
//random_ints(b,N);
for (int i= 0; i<N ; i++){
a[i]=i;
b[i]=i*2;
}
//copy inputs to device (GPU)
cudaMemcpy(dev_a, a, size , cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
// launch add() kernel on GPU, passing parameters
add<<< 1 , N >>> (dev_a,dev_b,dev_c);
//copy device result back to host copy of c
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
for(int i =0; i<N; i++){
printf("The value of the %d plus %d is : %d\n", a[i], b[i], c[i]);
}
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | .file "tmpxft_0017172b_00000000-6_main.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z3addPiS_S_PiS_S_
.type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function
_Z26__device_stub__Z3addPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_
.globl _Z3addPiS_S_
.type _Z3addPiS_S_, @function
_Z3addPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3addPiS_S_, .-_Z3addPiS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "The value of the %d plus %d is : %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $2048, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $2048, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $2048, %esi
call cudaMalloc@PLT
movl $2048, %edi
call malloc@PLT
movq %rax, %r12
movl $2048, %edi
call malloc@PLT
movq %rax, %rbp
movl $2048, %edi
call malloc@PLT
movq %rax, %r13
movl $0, %eax
.L12:
movl %eax, (%r12,%rax,4)
leal (%rax,%rax), %edx
movl %edx, 0(%rbp,%rax,4)
addq $1, %rax
cmpq $512, %rax
jne .L12
movl $1, %ecx
movl $2048, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $2048, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $512, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
movl $2, %ecx
movl $2048, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl $0, %ebx
leaq .LC0(%rip), %r14
.L14:
movl 0(%rbp,%rbx), %ecx
movl (%r12,%rbx), %edx
movl 0(%r13,%rbx), %r8d
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq $2048, %rbx
jne .L14
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z26__device_stub__Z3addPiS_S_PiS_S_
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z3addPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "stdio.h"
__global__ void add(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
#define N 512
int main(void ){
int *a,*b,*c; // host copies of a, b and c
int *dev_a,*dev_b, *dev_c; // device copies of a, b and c
int size = N * sizeof(int); // we need space for an integer
//allocate device copies of a, b , c
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
//random_ints(a,N);
//random_ints(b,N);
for (int i= 0; i<N ; i++){
a[i]=i;
b[i]=i*2;
}
//copy inputs to device (GPU)
cudaMemcpy(dev_a, a, size , cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
// launch add() kernel on GPU, passing parameters
add<<< 1 , N >>> (dev_a,dev_b,dev_c);
//copy device result back to host copy of c
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
for(int i =0; i<N; i++){
printf("The value of the %d plus %d is : %d\n", a[i], b[i], c[i]);
}
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | #include <hip/hip_runtime.h>
#include "stdio.h"
__global__ void add(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
#define N 512
int main(void ){
int *a,*b,*c; // host copies of a, b and c
int *dev_a,*dev_b, *dev_c; // device copies of a, b and c
int size = N * sizeof(int); // we need space for an integer
//allocate device copies of a, b , c
hipMalloc((void**) &dev_a, size);
hipMalloc((void**) &dev_b, size);
hipMalloc((void**) &dev_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
//random_ints(a,N);
//random_ints(b,N);
for (int i= 0; i<N ; i++){
a[i]=i;
b[i]=i*2;
}
//copy inputs to device (GPU)
hipMemcpy(dev_a, a, size , hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice);
// launch add() kernel on GPU, passing parameters
add<<< 1 , N >>> (dev_a,dev_b,dev_c);
//copy device result back to host copy of c
hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost);
for(int i =0; i<N; i++){
printf("The value of the %d plus %d is : %d\n", a[i], b[i], c[i]);
}
free(a);
free(b);
free(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "stdio.h"
__global__ void add(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
#define N 512
int main(void ){
int *a,*b,*c; // host copies of a, b and c
int *dev_a,*dev_b, *dev_c; // device copies of a, b and c
int size = N * sizeof(int); // we need space for an integer
//allocate device copies of a, b , c
hipMalloc((void**) &dev_a, size);
hipMalloc((void**) &dev_b, size);
hipMalloc((void**) &dev_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
//random_ints(a,N);
//random_ints(b,N);
for (int i= 0; i<N ; i++){
a[i]=i;
b[i]=i*2;
}
//copy inputs to device (GPU)
hipMemcpy(dev_a, a, size , hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice);
// launch add() kernel on GPU, passing parameters
add<<< 1 , N >>> (dev_a,dev_b,dev_c);
//copy device result back to host copy of c
hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost);
for(int i =0; i<N; i++){
printf("The value of the %d plus %d is : %d\n", a[i], b[i], c[i]);
}
free(a);
free(b);
free(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_
.globl _Z3addPiS_S_
.p2align 8
.type _Z3addPiS_S_,@function
_Z3addPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "stdio.h"
__global__ void add(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
#define N 512
int main(void ){
int *a,*b,*c; // host copies of a, b and c
int *dev_a,*dev_b, *dev_c; // device copies of a, b and c
int size = N * sizeof(int); // we need space for an integer
//allocate device copies of a, b , c
hipMalloc((void**) &dev_a, size);
hipMalloc((void**) &dev_b, size);
hipMalloc((void**) &dev_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
//random_ints(a,N);
//random_ints(b,N);
for (int i= 0; i<N ; i++){
a[i]=i;
b[i]=i*2;
}
//copy inputs to device (GPU)
hipMemcpy(dev_a, a, size , hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice);
// launch add() kernel on GPU, passing parameters
add<<< 1 , N >>> (dev_a,dev_b,dev_c);
//copy device result back to host copy of c
hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost);
for(int i =0; i<N; i++){
printf("The value of the %d plus %d is : %d\n", a[i], b[i], c[i]);
}
free(a);
free(b);
free(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | .text
.file "main.hip"
.globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_,@function
_Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 16(%rsp), %rdi
movl $2048, %esi # imm = 0x800
callq hipMalloc
leaq 8(%rsp), %rdi
movl $2048, %esi # imm = 0x800
callq hipMalloc
movq %rsp, %rdi
movl $2048, %esi # imm = 0x800
callq hipMalloc
movl $2048, %edi # imm = 0x800
callq malloc
movq %rax, %rbx
movl $2048, %edi # imm = 0x800
callq malloc
movq %rax, %r14
movl $2048, %edi # imm = 0x800
callq malloc
movq %rax, %r15
xorl %eax, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %ecx, (%rbx,%rcx,4)
movl %eax, (%r14,%rcx,4)
incq %rcx
addl $2, %eax
cmpq $512, %rcx # imm = 0x200
jne .LBB1_1
# %bb.2:
movq 16(%rsp), %rdi
movl $2048, %edx # imm = 0x800
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $2048, %edx # imm = 0x800
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 511(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
movl $2048, %edx # imm = 0x800
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r12,4), %esi
movl (%r14,%r12,4), %edx
movl (%r15,%r12,4), %ecx
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r12
cmpq $512, %r12 # imm = 0x200
jne .LBB1_5
# %bb.6:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_,@object # @_Z3addPiS_S_
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_
.p2align 3, 0x0
_Z3addPiS_S_:
.quad _Z18__device_stub__addPiS_S_
.size _Z3addPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "The value of the %d plus %d is : %d\n"
.size .L.str, 37
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0007 */
/*0050*/ IMAD.WIDE.U32 R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0007 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0007 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_
.globl _Z3addPiS_S_
.p2align 8
.type _Z3addPiS_S_,@function
_Z3addPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0017172b_00000000-6_main.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z3addPiS_S_PiS_S_
.type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function
_Z26__device_stub__Z3addPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_
.globl _Z3addPiS_S_
.type _Z3addPiS_S_, @function
_Z3addPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3addPiS_S_, .-_Z3addPiS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "The value of the %d plus %d is : %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $2048, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $2048, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $2048, %esi
call cudaMalloc@PLT
movl $2048, %edi
call malloc@PLT
movq %rax, %r12
movl $2048, %edi
call malloc@PLT
movq %rax, %rbp
movl $2048, %edi
call malloc@PLT
movq %rax, %r13
movl $0, %eax
.L12:
movl %eax, (%r12,%rax,4)
leal (%rax,%rax), %edx
movl %edx, 0(%rbp,%rax,4)
addq $1, %rax
cmpq $512, %rax
jne .L12
movl $1, %ecx
movl $2048, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $2048, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $512, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
movl $2, %ecx
movl $2048, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl $0, %ebx
leaq .LC0(%rip), %r14
.L14:
movl 0(%rbp,%rbx), %ecx
movl (%r12,%rbx), %edx
movl 0(%r13,%rbx), %r8d
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq $2048, %rbx
jne .L14
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z26__device_stub__Z3addPiS_S_PiS_S_
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z3addPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "main.hip"
.globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_,@function
_Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 16(%rsp), %rdi
movl $2048, %esi # imm = 0x800
callq hipMalloc
leaq 8(%rsp), %rdi
movl $2048, %esi # imm = 0x800
callq hipMalloc
movq %rsp, %rdi
movl $2048, %esi # imm = 0x800
callq hipMalloc
movl $2048, %edi # imm = 0x800
callq malloc
movq %rax, %rbx
movl $2048, %edi # imm = 0x800
callq malloc
movq %rax, %r14
movl $2048, %edi # imm = 0x800
callq malloc
movq %rax, %r15
xorl %eax, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %ecx, (%rbx,%rcx,4)
movl %eax, (%r14,%rcx,4)
incq %rcx
addl $2, %eax
cmpq $512, %rcx # imm = 0x200
jne .LBB1_1
# %bb.2:
movq 16(%rsp), %rdi
movl $2048, %edx # imm = 0x800
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $2048, %edx # imm = 0x800
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 511(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
movl $2048, %edx # imm = 0x800
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r12,4), %esi
movl (%r14,%r12,4), %edx
movl (%r15,%r12,4), %ecx
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r12
cmpq $512, %r12 # imm = 0x200
jne .LBB1_5
# %bb.6:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_,@object # @_Z3addPiS_S_
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_
.p2align 3, 0x0
_Z3addPiS_S_:
.quad _Z18__device_stub__addPiS_S_
.size _Z3addPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "The value of the %d plus %d is : %d\n"
.size .L.str, 37
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void kernel_push2_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up, int *g_relabel_mask, int *g_graph_height, int *g_height_write, int graph_size, int width, int rows, int graph_size1, int width1, int rows1)
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads() ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
} | .file "tmpxft_00062273_00000000-6_kernel_push2_atomic.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z69__device_stub__Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiiiPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.type _Z69__device_stub__Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiiiPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, @function
_Z69__device_stub__Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiiiPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii:
.LFB2051:
.cfi_startproc
endbr64
subq $344, %rsp
.cfi_def_cfa_offset 352
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
movq 352(%rsp), %rax
movq %rax, 56(%rsp)
movq 360(%rsp), %rax
movq %rax, 48(%rsp)
movq 368(%rsp), %rax
movq %rax, 40(%rsp)
movq 376(%rsp), %rax
movq %rax, 32(%rsp)
movq 384(%rsp), %rax
movq %rax, 24(%rsp)
movq 392(%rsp), %rax
movq %rax, 16(%rsp)
movq 400(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 328(%rsp)
xorl %eax, %eax
leaq 104(%rsp), %rax
movq %rax, 176(%rsp)
leaq 96(%rsp), %rax
movq %rax, 184(%rsp)
leaq 88(%rsp), %rax
movq %rax, 192(%rsp)
leaq 80(%rsp), %rax
movq %rax, 200(%rsp)
leaq 72(%rsp), %rax
movq %rax, 208(%rsp)
leaq 64(%rsp), %rax
movq %rax, 216(%rsp)
leaq 56(%rsp), %rax
movq %rax, 224(%rsp)
leaq 48(%rsp), %rax
movq %rax, 232(%rsp)
leaq 40(%rsp), %rax
movq %rax, 240(%rsp)
leaq 32(%rsp), %rax
movq %rax, 248(%rsp)
leaq 24(%rsp), %rax
movq %rax, 256(%rsp)
leaq 16(%rsp), %rax
movq %rax, 264(%rsp)
leaq 8(%rsp), %rax
movq %rax, 272(%rsp)
leaq 408(%rsp), %rax
movq %rax, 280(%rsp)
leaq 416(%rsp), %rax
movq %rax, 288(%rsp)
leaq 424(%rsp), %rax
movq %rax, 296(%rsp)
leaq 432(%rsp), %rax
movq %rax, 304(%rsp)
leaq 440(%rsp), %rax
movq %rax, 312(%rsp)
leaq 448(%rsp), %rax
movq %rax, 320(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
movl $1, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $1, 148(%rsp)
leaq 120(%rsp), %rcx
leaq 112(%rsp), %rdx
leaq 140(%rsp), %rsi
leaq 128(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 328(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $344, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 120(%rsp)
.cfi_def_cfa_offset 360
pushq 120(%rsp)
.cfi_def_cfa_offset 368
leaq 192(%rsp), %r9
movq 156(%rsp), %rcx
movl 164(%rsp), %r8d
movq 144(%rsp), %rsi
movl 152(%rsp), %edx
leaq _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 352
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z69__device_stub__Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiiiPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, .-_Z69__device_stub__Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiiiPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.globl _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.type _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, @function
_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 64
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 72
pushq 120(%rsp)
.cfi_def_cfa_offset 80
pushq 120(%rsp)
.cfi_def_cfa_offset 88
pushq 120(%rsp)
.cfi_def_cfa_offset 96
pushq 120(%rsp)
.cfi_def_cfa_offset 104
pushq 120(%rsp)
.cfi_def_cfa_offset 112
pushq 120(%rsp)
.cfi_def_cfa_offset 120
pushq 120(%rsp)
.cfi_def_cfa_offset 128
call _Z69__device_stub__Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiiiPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
addq $120, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, .-_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void kernel_push2_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up, int *g_relabel_mask, int *g_graph_height, int *g_height_write, int graph_size, int width, int rows, int graph_size1, int width1, int rows1)
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads() ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void kernel_push2_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up, int *g_relabel_mask, int *g_graph_height, int *g_height_write, int graph_size, int width, int rows, int graph_size1, int width1, int rows1)
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads() ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void kernel_push2_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up, int *g_relabel_mask, int *g_graph_height, int *g_height_write, int graph_size, int width, int rows, int graph_size1, int width1, int rows1)
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads() ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.globl _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.p2align 8
.type _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii,@function
_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x8c
s_load_b32 s28, s[0:1], 0x78
v_and_b32_e32 v3, 0x3ff, v0
v_bfe_u32 v2, v0, 10, 10
s_and_b32 s3, s14, 0xffffff
s_and_b32 s4, s15, 0xffffff
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s2, 0xffff
s_lshr_b32 s2, s2, 16
v_mad_u64_u32 v[6:7], null, s3, s5, v[3:4]
v_mad_u64_u32 v[4:5], null, s4, s2, v[2:3]
s_load_b64 s[4:5], s[0:1], 0x58
v_mul_u32_u24_e32 v5, 34, v2
s_add_i32 s2, s28, -1
s_delay_alu instid0(VALU_DEP_3) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e64 s2, s2, v6
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mad_u32_u24 v0, v4, s28, v6
v_add3_u32 v13, v3, v5, 34
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b32_e32 v5, 2, v13
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[7:8], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v7, vcc_lo, s4, v7
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v8, vcc_lo
v_cmp_eq_u32_e32 vcc_lo, 31, v3
global_load_b32 v9, v[7:8], off
s_and_b32 s3, vcc_lo, s2
s_waitcnt vmcnt(0)
ds_store_b32 v5, v9 offset:4
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB0_2
global_load_b32 v9, v[7:8], off offset:4
s_waitcnt vmcnt(0)
ds_store_b32 v5, v9 offset:8
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s2
v_cmp_eq_u32_e64 s2, 0, v3
v_cmp_lt_i32_e32 vcc_lo, 0, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s2, vcc_lo
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB0_4
global_load_b32 v3, v[7:8], off offset:-4
v_lshlrev_b32_e32 v7, 2, v13
s_waitcnt vmcnt(0)
ds_store_b32 v7, v3
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s2
s_load_b32 s2, s[0:1], 0x7c
s_waitcnt lgkmcnt(0)
s_add_i32 s3, s2, -1
v_cmp_eq_u32_e64 s2, 7, v2
v_cmp_gt_i32_e64 s3, s3, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, s3
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_6
v_add_nc_u32_e32 v7, s28, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v8, 31, v7
v_lshlrev_b64 v[7:8], 2, v[7:8]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v7, s2, s4, v7
v_add_co_ci_u32_e64 v8, s2, s5, v8, s2
global_load_b32 v3, v[7:8], off
v_lshlrev_b32_e32 v7, 2, v13
s_waitcnt vmcnt(0)
ds_store_b32 v7, v3 offset:140
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s3
v_cmp_eq_u32_e64 s3, 0, v2
v_cmp_lt_i32_e64 s2, 0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s3, s2
s_and_saveexec_b32 s6, s3
s_cbranch_execz .LBB0_8
v_subrev_nc_u32_e32 v2, s28, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v2, s3, s4, v2
v_add_co_ci_u32_e64 v3, s3, s5, v3, s3
global_load_b32 v2, v[2:3], off
v_lshl_add_u32 v3, v13, 2, 0xffffff7c
s_waitcnt vmcnt(0)
ds_store_b32 v3, v2
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s6
s_clause 0x5
s_load_b256 s[4:11], s[0:1], 0x20
s_load_b32 s3, s[0:1], 0x74
s_load_b64 s[24:25], s[0:1], 0x6c
s_load_b256 s[12:19], s[0:1], 0x0
s_load_b128 s[20:23], s[0:1], 0x40
s_load_b64 s[26:27], s[0:1], 0x50
v_lshlrev_b64 v[7:8], 2, v[0:1]
v_add_nc_u32_e32 v14, 4, v5
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_add_co_u32 v2, s0, s6, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s0, s7, v8, s0
v_cmp_gt_i32_e64 s0, s3, v0
s_and_saveexec_b32 s3, s0
s_cbranch_execz .LBB0_39
v_add_co_u32 v9, s1, s26, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s1, s27, v8, s1
global_load_b32 v5, v[9:10], off
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e64 s1, 1, v5
s_and_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_39
s_add_i32 s1, s24, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s1, s1, v6
s_and_b32 s1, s1, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_39
s_add_i32 s1, s25, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s1, s1, v4
s_and_b32 s1, s1, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_39
v_lshlrev_b64 v[9:10], 2, v[0:1]
s_mov_b32 s29, 0
s_mov_b32 s30, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v11, s1, s4, v9
v_add_co_ci_u32_e64 v12, s1, s5, v10, s1
global_load_b32 v5, v[2:3], off
global_load_b32 v15, v[11:12], off
s_waitcnt vmcnt(0)
v_min_i32_e32 v5, v15, v5
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_lt_i32_e32 0, v5
s_cbranch_execz .LBB0_15
ds_load_b32 v16, v14
s_waitcnt lgkmcnt(0)
v_cmp_eq_u32_e64 s1, 1, v16
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_15
v_sub_nc_u32_e32 v15, v15, v5
v_sub_nc_u32_e32 v5, 0, v5
global_store_b32 v[11:12], v15, off
global_atomic_add_u32 v[2:3], v5, off
.LBB0_15:
s_or_b32 exec_lo, exec_lo, s30
v_add_co_u32 v9, s1, s12, v9
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s1, s13, v10, s1
s_mov_b32 s31, exec_lo
global_load_b32 v5, v[2:3], off
global_load_b32 v11, v[9:10], off
s_waitcnt vmcnt(0)
v_min_i32_e32 v5, v11, v5
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e64 s30, 1, v5
v_cmpx_lt_i32_e32 0, v5
s_cbranch_execz .LBB0_17
v_lshlrev_b32_e32 v11, 2, v13
s_delay_alu instid0(VALU_DEP_3)
s_and_not1_b32 s30, s30, exec_lo
s_mov_b32 s29, exec_lo
ds_load_b32 v11, v11
ds_load_b32 v12, v14
s_waitcnt lgkmcnt(1)
v_add_nc_u32_e32 v11, 1, v11
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ne_u32_e64 s1, v12, v11
s_and_b32 s1, s1, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s30, s30, s1
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s31
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s1, s30
s_xor_b32 s30, exec_lo, s1
s_cbranch_execz .LBB0_19
v_lshlrev_b64 v[11:12], 2, v[0:1]
v_mov_b32_e32 v15, -1
s_and_not1_b32 s29, s29, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v11, s1, s8, v11
v_add_co_ci_u32_e64 v12, s1, s9, v12, s1
global_atomic_add_u32 v[11:12], v15, off offset:-4
.LBB0_19:
s_or_b32 exec_lo, exec_lo, s30
s_and_saveexec_b32 s30, s29
s_cbranch_execz .LBB0_21
v_lshlrev_b64 v[11:12], 2, v[0:1]
v_sub_nc_u32_e32 v17, 0, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v15, s1, v11, -4
v_add_co_ci_u32_e64 v16, s1, -1, v12, s1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v11, s1, s14, v15
v_add_co_ci_u32_e64 v12, s1, s15, v16, s1
v_add_co_u32 v15, s1, s6, v15
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v16, s1, s7, v16, s1
global_atomic_add_u32 v[9:10], v17, off
global_atomic_add_u32 v[11:12], v5, off
s_clause 0x1
global_atomic_add_u32 v[2:3], v17, off
global_atomic_add_u32 v[15:16], v5, off
.LBB0_21:
s_or_b32 exec_lo, exec_lo, s30
v_lshlrev_b64 v[9:10], 2, v[0:1]
s_mov_b32 s29, 0
s_mov_b32 s31, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v9, s1, s18, v9
v_add_co_ci_u32_e64 v10, s1, s19, v10, s1
global_load_b32 v5, v[2:3], off
global_load_b32 v11, v[9:10], off
s_waitcnt vmcnt(0)
v_min_i32_e32 v5, v11, v5
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e64 s30, 1, v5
v_cmpx_lt_i32_e32 0, v5
s_cbranch_execz .LBB0_23
v_lshl_add_u32 v11, v13, 2, 0xffffff7c
s_delay_alu instid0(VALU_DEP_3)
s_and_not1_b32 s30, s30, exec_lo
s_mov_b32 s29, exec_lo
ds_load_b32 v11, v11
ds_load_b32 v12, v14
s_waitcnt lgkmcnt(1)
v_add_nc_u32_e32 v11, 1, v11
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ne_u32_e64 s1, v12, v11
s_and_b32 s1, s1, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s30, s30, s1
.LBB0_23:
s_or_b32 exec_lo, exec_lo, s31
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s1, s30
s_xor_b32 s30, exec_lo, s1
s_cbranch_execz .LBB0_25
v_subrev_nc_u32_e32 v11, s28, v0
v_mov_b32_e32 v15, -1
s_and_not1_b32 s29, s29, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v12, 31, v11
v_lshlrev_b64 v[11:12], 2, v[11:12]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v11, s1, s22, v11
v_add_co_ci_u32_e64 v12, s1, s23, v12, s1
global_atomic_add_u32 v[11:12], v15, off
.LBB0_25:
s_or_b32 exec_lo, exec_lo, s30
s_and_saveexec_b32 s30, s29
s_cbranch_execz .LBB0_27
v_subrev_nc_u32_e32 v11, s28, v0
v_sub_nc_u32_e32 v17, 0, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v12, 31, v11
v_lshlrev_b64 v[11:12], 2, v[11:12]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v15, s1, s16, v11
v_add_co_ci_u32_e64 v16, s1, s17, v12, s1
v_add_co_u32 v11, s1, s6, v11
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v12, s1, s7, v12, s1
global_atomic_add_u32 v[9:10], v17, off
global_atomic_add_u32 v[15:16], v5, off
s_clause 0x1
global_atomic_add_u32 v[2:3], v17, off
global_atomic_add_u32 v[11:12], v5, off
.LBB0_27:
s_or_b32 exec_lo, exec_lo, s30
v_lshlrev_b64 v[9:10], 2, v[0:1]
s_mov_b32 s29, 0
s_mov_b32 s31, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v9, s1, s14, v9
v_add_co_ci_u32_e64 v10, s1, s15, v10, s1
global_load_b32 v5, v[2:3], off
global_load_b32 v11, v[9:10], off
s_waitcnt vmcnt(0)
v_min_i32_e32 v5, v11, v5
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e64 s30, 1, v5
v_cmpx_lt_i32_e32 0, v5
s_cbranch_execz .LBB0_29
v_lshlrev_b32_e32 v11, 2, v13
s_delay_alu instid0(VALU_DEP_3)
s_and_not1_b32 s30, s30, exec_lo
s_mov_b32 s29, exec_lo
ds_load_b32 v11, v11 offset:8
ds_load_b32 v12, v14
s_waitcnt lgkmcnt(1)
v_add_nc_u32_e32 v11, 1, v11
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ne_u32_e64 s1, v12, v11
s_and_b32 s1, s1, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s30, s30, s1
.LBB0_29:
s_or_b32 exec_lo, exec_lo, s31
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s1, s30
s_xor_b32 s30, exec_lo, s1
s_cbranch_execz .LBB0_31
v_lshlrev_b64 v[11:12], 2, v[0:1]
v_mov_b32_e32 v15, -1
s_and_not1_b32 s29, s29, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v11, s1, s10, v11
v_add_co_ci_u32_e64 v12, s1, s11, v12, s1
global_atomic_add_u32 v[11:12], v15, off offset:4
.LBB0_31:
s_or_b32 exec_lo, exec_lo, s30
s_and_saveexec_b32 s30, s29
s_cbranch_execz .LBB0_33
v_lshlrev_b64 v[11:12], 2, v[0:1]
v_sub_nc_u32_e32 v17, 0, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v15, s1, v11, 4
v_add_co_ci_u32_e64 v16, s1, 0, v12, s1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v11, s1, s12, v15
v_add_co_ci_u32_e64 v12, s1, s13, v16, s1
v_add_co_u32 v15, s1, s6, v15
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v16, s1, s7, v16, s1
global_atomic_add_u32 v[9:10], v17, off
global_atomic_add_u32 v[11:12], v5, off
s_clause 0x1
global_atomic_add_u32 v[2:3], v17, off
global_atomic_add_u32 v[15:16], v5, off
.LBB0_33:
s_or_b32 exec_lo, exec_lo, s30
v_lshlrev_b64 v[9:10], 2, v[0:1]
s_mov_b32 s29, 0
s_mov_b32 s31, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v9, s1, s16, v9
v_add_co_ci_u32_e64 v10, s1, s17, v10, s1
global_load_b32 v5, v[2:3], off
global_load_b32 v11, v[9:10], off
s_waitcnt vmcnt(0)
v_min_i32_e32 v5, v11, v5
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e64 s30, 1, v5
v_cmpx_lt_i32_e32 0, v5
s_cbranch_execz .LBB0_35
v_lshlrev_b32_e32 v11, 2, v13
s_delay_alu instid0(VALU_DEP_3)
s_and_not1_b32 s30, s30, exec_lo
s_mov_b32 s29, exec_lo
ds_load_b32 v11, v11 offset:140
ds_load_b32 v12, v14
s_waitcnt lgkmcnt(1)
v_add_nc_u32_e32 v11, 1, v11
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ne_u32_e64 s1, v12, v11
s_and_b32 s1, s1, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s30, s30, s1
.LBB0_35:
s_or_b32 exec_lo, exec_lo, s31
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s1, s30
s_xor_b32 s30, exec_lo, s1
s_cbranch_execz .LBB0_37
v_add_nc_u32_e32 v11, s28, v0
v_mov_b32_e32 v15, -1
s_and_not1_b32 s29, s29, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v12, 31, v11
v_lshlrev_b64 v[11:12], 2, v[11:12]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v11, s1, s20, v11
v_add_co_ci_u32_e64 v12, s1, s21, v12, s1
global_atomic_add_u32 v[11:12], v15, off
.LBB0_37:
s_or_b32 exec_lo, exec_lo, s30
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s29
s_cbranch_execz .LBB0_39
v_add_nc_u32_e32 v11, s28, v0
v_sub_nc_u32_e32 v17, 0, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v12, 31, v11
v_lshlrev_b64 v[11:12], 2, v[11:12]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v15, s1, s18, v11
v_add_co_ci_u32_e64 v16, s1, s19, v12, s1
v_add_co_u32 v11, s1, s6, v11
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v12, s1, s7, v12, s1
global_atomic_add_u32 v[9:10], v17, off
global_atomic_add_u32 v[15:16], v5, off
s_clause 0x1
global_atomic_add_u32 v[2:3], v17, off
global_atomic_add_u32 v[11:12], v5, off
.LBB0_39:
s_or_b32 exec_lo, exec_lo, s3
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
global_load_b32 v11, v[2:3], off
v_add_co_u32 v7, s1, s12, v7
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v8, s1, s13, v8, s1
v_mov_b32_e32 v15, 2
s_mov_b32 s29, exec_lo
s_waitcnt vmcnt(0)
v_cmpx_lt_i32_e32 0, v11
s_cbranch_execz .LBB0_69
global_load_b32 v12, v[7:8], off
s_mov_b32 s3, -1
s_mov_b32 s30, exec_lo
s_waitcnt vmcnt(0)
v_cmpx_eq_u32_e32 0, v12
s_cbranch_execz .LBB0_48
v_lshlrev_b64 v[9:10], 2, v[0:1]
s_mov_b32 s31, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v15, s1, s14, v9
v_add_co_ci_u32_e64 v16, s1, s15, v10, s1
global_load_b32 v5, v[15:16], off
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e64 s1, 0, v5
v_cmpx_eq_u32_e32 0, v5
s_cbranch_execz .LBB0_47
v_add_co_u32 v9, s3, s16, v9
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s3, s17, v10, s3
s_mov_b32 s35, -1
s_mov_b32 s34, exec_lo
global_load_b32 v5, v[9:10], off
s_waitcnt vmcnt(0)
v_cmpx_eq_u32_e32 0, v5
s_cbranch_execz .LBB0_46
v_lshlrev_b64 v[9:10], 2, v[0:1]
s_mov_b32 s33, 2
s_mov_b32 s36, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v15, s3, s18, v9
v_add_co_ci_u32_e64 v16, s3, s19, v10, s3
global_load_b32 v5, v[15:16], off
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e64 s35, 0, v5
v_cmpx_eq_u32_e32 0, v5
s_cbranch_execz .LBB0_45
v_add_co_u32 v9, s3, s4, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s3, s5, v10, s3
s_and_not1_b32 s35, s35, exec_lo
global_load_b32 v5, v[9:10], off
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e64 s3, 0, v5
s_and_b32 s3, s3, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s35, s35, s3
.LBB0_45:
s_or_b32 exec_lo, exec_lo, s36
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
s_or_not1_b32 s35, s35, exec_lo
.LBB0_46:
s_or_b32 exec_lo, exec_lo, s34
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_and_not1_b32 s1, s1, exec_lo
s_and_b32 s3, s35, exec_lo
s_or_b32 s1, s1, s3
.LBB0_47:
s_or_b32 exec_lo, exec_lo, s31
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
s_or_not1_b32 s3, s1, exec_lo
.LBB0_48:
s_or_b32 exec_lo, exec_lo, s30
v_mov_b32_e32 v15, s33
s_and_saveexec_b32 s30, s3
s_cbranch_execz .LBB0_68
v_lshlrev_b32_e32 v9, 2, v13
v_cmp_gt_i32_e64 s3, 1, v12
ds_load_b32 v10, v9
ds_load_b32 v5, v14
s_waitcnt lgkmcnt(1)
v_dual_mov_b32 v15, 1 :: v_dual_add_nc_u32 v10, 1, v10
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ne_u32_e64 s1, v5, v10
s_or_b32 s1, s3, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s1
s_cbranch_execz .LBB0_67
ds_load_b32 v9, v9 offset:8
s_mov_b32 s31, exec_lo
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v9, 1, v9
s_delay_alu instid0(VALU_DEP_1)
v_cmp_ne_u32_e64 s33, v5, v9
v_cmpx_eq_u32_e64 v5, v9
s_cbranch_execz .LBB0_52
v_lshlrev_b64 v[9:10], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
s_and_not1_b32 s33, s33, exec_lo
s_mov_b32 s34, 1
v_add_co_u32 v9, s1, s14, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s1, s15, v10, s1
global_load_b32 v9, v[9:10], off
s_waitcnt vmcnt(0)
v_cmp_gt_i32_e64 s1, 1, v9
s_and_b32 s1, s1, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s33, s33, s1
.LBB0_52:
s_or_b32 exec_lo, exec_lo, s31
v_mov_b32_e32 v15, s34
s_and_saveexec_b32 s31, s33
s_cbranch_execz .LBB0_66
v_lshlrev_b32_e32 v9, 2, v13
s_mov_b32 s33, 1
s_mov_b32 s35, exec_lo
ds_load_b32 v9, v9 offset:140
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v9, 1, v9
s_delay_alu instid0(VALU_DEP_1)
v_cmp_ne_u32_e64 s34, v5, v9
v_cmpx_eq_u32_e64 v5, v9
s_cbranch_execz .LBB0_55
v_lshlrev_b64 v[9:10], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
s_and_not1_b32 s34, s34, exec_lo
v_add_co_u32 v9, s1, s16, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s1, s17, v10, s1
global_load_b32 v9, v[9:10], off
s_waitcnt vmcnt(0)
v_cmp_gt_i32_e64 s1, 1, v9
s_and_b32 s1, s1, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s34, s34, s1
.LBB0_55:
s_or_b32 exec_lo, exec_lo, s35
v_mov_b32_e32 v15, s33
s_and_saveexec_b32 s33, s34
s_cbranch_execz .LBB0_65
v_lshl_add_u32 v9, v13, 2, 0xffffff7c
s_mov_b32 s34, 1
s_mov_b32 s36, exec_lo
ds_load_b32 v9, v9
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v9, 1, v9
s_delay_alu instid0(VALU_DEP_1)
v_cmp_ne_u32_e64 s35, v5, v9
v_cmpx_eq_u32_e64 v5, v9
s_cbranch_execz .LBB0_58
v_lshlrev_b64 v[9:10], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
s_and_not1_b32 s35, s35, exec_lo
v_add_co_u32 v9, s1, s18, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s1, s19, v10, s1
global_load_b32 v9, v[9:10], off
s_waitcnt vmcnt(0)
v_cmp_gt_i32_e64 s1, 1, v9
s_and_b32 s1, s1, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s35, s35, s1
.LBB0_58:
s_or_b32 exec_lo, exec_lo, s36
v_mov_b32_e32 v15, s34
s_and_saveexec_b32 s34, s35
s_cbranch_execz .LBB0_64
v_cmp_ne_u32_e64 s35, 1, v5
s_mov_b32 s36, 1
s_mov_b32 s37, exec_lo
v_cmpx_eq_u32_e32 1, v5
s_cbranch_execz .LBB0_61
v_lshlrev_b64 v[9:10], 2, v[0:1]
s_and_not1_b32 s35, s35, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v9, s1, s4, v9
v_add_co_ci_u32_e64 v10, s1, s5, v10, s1
global_load_b32 v5, v[9:10], off
s_waitcnt vmcnt(0)
v_cmp_gt_i32_e64 s1, 1, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s1, s1, exec_lo
s_or_b32 s35, s35, s1
.LBB0_61:
s_or_b32 exec_lo, exec_lo, s37
v_mov_b32_e32 v15, s36
s_and_saveexec_b32 s1, s35
v_mov_b32_e32 v15, 0
s_or_b32 exec_lo, exec_lo, s1
.LBB0_64:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s34
.LBB0_65:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s33
.LBB0_66:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s31
.LBB0_67:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s3
.LBB0_68:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s30
.LBB0_69:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
s_or_b32 exec_lo, exec_lo, s29
v_lshlrev_b64 v[9:10], 2, v[0:1]
v_add_co_u32 v16, s1, s26, v9
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v17, s1, s27, v10, s1
global_store_b32 v[16:17], v15, off
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_100
v_add_co_u32 v9, s0, s26, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s0, s27, v10, s0
global_load_b32 v5, v[9:10], off
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e64 s0, 1, v5
s_and_b32 exec_lo, exec_lo, s0
s_cbranch_execz .LBB0_100
s_add_i32 s0, s24, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s0, s0, v6
s_and_b32 s0, s0, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s0
s_cbranch_execz .LBB0_100
s_add_i32 s0, s25, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s0, v4
s_and_b32 s0, vcc_lo, s2
s_and_b32 exec_lo, exec_lo, s0
s_cbranch_execz .LBB0_100
v_lshlrev_b64 v[4:5], 2, v[0:1]
s_mov_b32 s0, 0
s_mov_b32 s1, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
global_load_b32 v9, v[4:5], off
s_waitcnt vmcnt(0)
v_min_i32_e32 v6, v11, v9
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_lt_i32_e32 0, v6
s_cbranch_execz .LBB0_76
ds_load_b32 v10, v14
s_waitcnt lgkmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 1, v10
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_76
v_sub_nc_u32_e32 v9, v9, v6
v_sub_nc_u32_e32 v6, 0, v6
global_store_b32 v[4:5], v9, off
global_atomic_add_u32 v[2:3], v6, off
.LBB0_76:
s_or_b32 exec_lo, exec_lo, s1
global_load_b32 v4, v[2:3], off
global_load_b32 v5, v[7:8], off
s_mov_b32 s2, exec_lo
s_waitcnt vmcnt(0)
v_min_i32_e32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e64 s1, 1, v4
v_cmpx_lt_i32_e32 0, v4
s_cbranch_execz .LBB0_78
v_lshlrev_b32_e32 v5, 2, v13
s_delay_alu instid0(VALU_DEP_3)
s_and_not1_b32 s1, s1, exec_lo
s_mov_b32 s0, exec_lo
ds_load_b32 v5, v5
ds_load_b32 v6, v14
s_waitcnt lgkmcnt(1)
v_add_nc_u32_e32 v5, 1, v5
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_ne_u32_e32 vcc_lo, v6, v5
s_and_b32 s3, vcc_lo, exec_lo
s_or_b32 s1, s1, s3
.LBB0_78:
s_or_b32 exec_lo, exec_lo, s2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s2, s1
s_xor_b32 s1, exec_lo, s2
s_cbranch_execz .LBB0_80
v_lshlrev_b64 v[5:6], 2, v[0:1]
v_mov_b32_e32 v9, -1
s_and_not1_b32 s0, s0, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v5, vcc_lo, s8, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s9, v6, vcc_lo
global_atomic_add_u32 v[5:6], v9, off offset:-4
.LBB0_80:
s_or_b32 exec_lo, exec_lo, s1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_82
v_lshlrev_b64 v[5:6], 2, v[0:1]
v_sub_nc_u32_e32 v11, 0, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v9, vcc_lo, v5, -4
v_add_co_ci_u32_e32 v10, vcc_lo, -1, v6, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v5, vcc_lo, s14, v9
v_add_co_ci_u32_e32 v6, vcc_lo, s15, v10, vcc_lo
v_add_co_u32 v9, vcc_lo, s6, v9
v_add_co_ci_u32_e32 v10, vcc_lo, s7, v10, vcc_lo
global_atomic_add_u32 v[7:8], v11, off
global_atomic_add_u32 v[5:6], v4, off
s_clause 0x1
global_atomic_add_u32 v[2:3], v11, off
global_atomic_add_u32 v[9:10], v4, off
.LBB0_82:
s_or_b32 exec_lo, exec_lo, s1
v_lshlrev_b64 v[4:5], 2, v[0:1]
s_mov_b32 s0, 0
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s18, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s19, v5, vcc_lo
global_load_b32 v6, v[2:3], off
global_load_b32 v7, v[4:5], off
s_waitcnt vmcnt(0)
v_min_i32_e32 v6, v7, v6
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e64 s1, 1, v6
v_cmpx_lt_i32_e32 0, v6
s_cbranch_execz .LBB0_84
v_lshl_add_u32 v7, v13, 2, 0xffffff7c
s_delay_alu instid0(VALU_DEP_3)
s_and_not1_b32 s1, s1, exec_lo
s_mov_b32 s0, exec_lo
ds_load_b32 v7, v7
ds_load_b32 v8, v14
s_waitcnt lgkmcnt(1)
v_add_nc_u32_e32 v7, 1, v7
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_ne_u32_e32 vcc_lo, v8, v7
s_and_b32 s3, vcc_lo, exec_lo
s_or_b32 s1, s1, s3
.LBB0_84:
s_or_b32 exec_lo, exec_lo, s2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s2, s1
s_xor_b32 s1, exec_lo, s2
s_cbranch_execz .LBB0_86
v_subrev_nc_u32_e32 v7, s28, v0
v_mov_b32_e32 v9, -1
s_and_not1_b32 s0, s0, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v8, 31, v7
v_lshlrev_b64 v[7:8], 2, v[7:8]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v7, vcc_lo, s22, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s23, v8, vcc_lo
global_atomic_add_u32 v[7:8], v9, off
.LBB0_86:
s_or_b32 exec_lo, exec_lo, s1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_88
v_subrev_nc_u32_e32 v7, s28, v0
v_sub_nc_u32_e32 v11, 0, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v8, 31, v7
v_lshlrev_b64 v[7:8], 2, v[7:8]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v9, vcc_lo, s16, v7
v_add_co_ci_u32_e32 v10, vcc_lo, s17, v8, vcc_lo
v_add_co_u32 v7, vcc_lo, s6, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v8, vcc_lo
global_atomic_add_u32 v[4:5], v11, off
global_atomic_add_u32 v[9:10], v6, off
s_clause 0x1
global_atomic_add_u32 v[2:3], v11, off
global_atomic_add_u32 v[7:8], v6, off
.LBB0_88:
s_or_b32 exec_lo, exec_lo, s1
v_lshlrev_b64 v[4:5], 2, v[0:1]
s_mov_b32 s0, 0
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s14, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s15, v5, vcc_lo
global_load_b32 v6, v[2:3], off
global_load_b32 v7, v[4:5], off
s_waitcnt vmcnt(0)
v_min_i32_e32 v6, v7, v6
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e64 s1, 1, v6
v_cmpx_lt_i32_e32 0, v6
s_cbranch_execz .LBB0_90
v_lshlrev_b32_e32 v7, 2, v13
s_delay_alu instid0(VALU_DEP_3)
s_and_not1_b32 s1, s1, exec_lo
s_mov_b32 s0, exec_lo
ds_load_b32 v7, v7 offset:8
ds_load_b32 v8, v14
s_waitcnt lgkmcnt(1)
v_add_nc_u32_e32 v7, 1, v7
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_ne_u32_e32 vcc_lo, v8, v7
s_and_b32 s3, vcc_lo, exec_lo
s_or_b32 s1, s1, s3
.LBB0_90:
s_or_b32 exec_lo, exec_lo, s2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s2, s1
s_xor_b32 s1, exec_lo, s2
s_cbranch_execz .LBB0_92
v_lshlrev_b64 v[7:8], 2, v[0:1]
v_mov_b32_e32 v9, -1
s_and_not1_b32 s0, s0, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s10, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s11, v8, vcc_lo
global_atomic_add_u32 v[7:8], v9, off offset:4
.LBB0_92:
s_or_b32 exec_lo, exec_lo, s1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_94
v_lshlrev_b64 v[7:8], 2, v[0:1]
v_sub_nc_u32_e32 v11, 0, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v9, vcc_lo, v7, 4
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v7, vcc_lo, s12, v9
v_add_co_ci_u32_e32 v8, vcc_lo, s13, v10, vcc_lo
v_add_co_u32 v9, vcc_lo, s6, v9
v_add_co_ci_u32_e32 v10, vcc_lo, s7, v10, vcc_lo
global_atomic_add_u32 v[4:5], v11, off
global_atomic_add_u32 v[7:8], v6, off
s_clause 0x1
global_atomic_add_u32 v[2:3], v11, off
global_atomic_add_u32 v[9:10], v6, off
.LBB0_94:
s_or_b32 exec_lo, exec_lo, s1
v_lshlrev_b64 v[4:5], 2, v[0:1]
s_mov_b32 s0, 0
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s16, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s17, v5, vcc_lo
global_load_b32 v1, v[2:3], off
global_load_b32 v6, v[4:5], off
s_waitcnt vmcnt(0)
v_min_i32_e32 v1, v6, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e64 s1, 1, v1
v_cmpx_lt_i32_e32 0, v1
s_cbranch_execz .LBB0_96
v_lshlrev_b32_e32 v6, 2, v13
s_delay_alu instid0(VALU_DEP_3)
s_and_not1_b32 s1, s1, exec_lo
s_mov_b32 s0, exec_lo
ds_load_b32 v6, v6 offset:140
ds_load_b32 v7, v14
s_waitcnt lgkmcnt(1)
v_add_nc_u32_e32 v6, 1, v6
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_ne_u32_e32 vcc_lo, v7, v6
s_and_b32 s3, vcc_lo, exec_lo
s_or_b32 s1, s1, s3
.LBB0_96:
s_or_b32 exec_lo, exec_lo, s2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s2, s1
s_xor_b32 s1, exec_lo, s2
s_cbranch_execz .LBB0_98
v_add_nc_u32_e32 v6, s28, v0
v_mov_b32_e32 v8, -1
s_and_not1_b32 s0, s0, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v7, 31, v6
v_lshlrev_b64 v[6:7], 2, v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v6, vcc_lo, s20, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s21, v7, vcc_lo
global_atomic_add_u32 v[6:7], v8, off
.LBB0_98:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s0
s_cbranch_execz .LBB0_100
v_add_nc_u32_e32 v6, s28, v0
v_sub_nc_u32_e32 v0, 0, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v7, 31, v6
v_lshlrev_b64 v[6:7], 2, v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, s18, v6
v_add_co_ci_u32_e32 v9, vcc_lo, s19, v7, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
global_atomic_add_u32 v[4:5], v0, off
global_atomic_add_u32 v[8:9], v1, off
s_clause 0x1
global_atomic_add_u32 v[2:3], v0, off
global_atomic_add_u32 v[6:7], v1, off
.LBB0_100:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.amdhsa_group_segment_fixed_size 1424
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 384
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 18
.amdhsa_next_free_sgpr 38
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, .Lfunc_end0-_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 64
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 72
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 80
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 88
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 96
.size: 8
.value_kind: global_buffer
- .offset: 104
.size: 4
.value_kind: by_value
- .offset: 108
.size: 4
.value_kind: by_value
- .offset: 112
.size: 4
.value_kind: by_value
- .offset: 116
.size: 4
.value_kind: by_value
- .offset: 120
.size: 4
.value_kind: by_value
- .offset: 124
.size: 4
.value_kind: by_value
- .offset: 128
.size: 4
.value_kind: hidden_block_count_x
- .offset: 132
.size: 4
.value_kind: hidden_block_count_y
- .offset: 136
.size: 4
.value_kind: hidden_block_count_z
- .offset: 140
.size: 2
.value_kind: hidden_group_size_x
- .offset: 142
.size: 2
.value_kind: hidden_group_size_y
- .offset: 144
.size: 2
.value_kind: hidden_group_size_z
- .offset: 146
.size: 2
.value_kind: hidden_remainder_x
- .offset: 148
.size: 2
.value_kind: hidden_remainder_y
- .offset: 150
.size: 2
.value_kind: hidden_remainder_z
- .offset: 168
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 176
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 184
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 192
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 1424
.kernarg_segment_align: 8
.kernarg_segment_size: 384
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.private_segment_fixed_size: 0
.sgpr_count: 40
.sgpr_spill_count: 0
.symbol: _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 18
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void kernel_push2_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up, int *g_relabel_mask, int *g_graph_height, int *g_height_write, int graph_size, int width, int rows, int graph_size1, int width1, int rows1)
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads() ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
} | .text
.file "kernel_push2_atomic.hip"
.globl _Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii # -- Begin function _Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.p2align 4, 0x90
.type _Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii,@function
_Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii: # @_Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.cfi_startproc
# %bb.0:
subq $248, %rsp
.cfi_def_cfa_offset 256
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 256(%rsp), %rax
movq %rax, 144(%rsp)
leaq 264(%rsp), %rax
movq %rax, 152(%rsp)
leaq 272(%rsp), %rax
movq %rax, 160(%rsp)
leaq 280(%rsp), %rax
movq %rax, 168(%rsp)
leaq 288(%rsp), %rax
movq %rax, 176(%rsp)
leaq 296(%rsp), %rax
movq %rax, 184(%rsp)
leaq 304(%rsp), %rax
movq %rax, 192(%rsp)
leaq 312(%rsp), %rax
movq %rax, 200(%rsp)
leaq 320(%rsp), %rax
movq %rax, 208(%rsp)
leaq 328(%rsp), %rax
movq %rax, 216(%rsp)
leaq 336(%rsp), %rax
movq %rax, 224(%rsp)
leaq 344(%rsp), %rax
movq %rax, 232(%rsp)
leaq 352(%rsp), %rax
movq %rax, 240(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $264, %rsp # imm = 0x108
.cfi_adjust_cfa_offset -264
retq
.Lfunc_end0:
.size _Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, .Lfunc_end0-_Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii,@object # @_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.section .rodata,"a",@progbits
.globl _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.p2align 3, 0x0
_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii:
.quad _Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.size _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii"
.size .L__unnamed_1, 56
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00062273_00000000-6_kernel_push2_atomic.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z69__device_stub__Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiiiPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.type _Z69__device_stub__Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiiiPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, @function
_Z69__device_stub__Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiiiPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii:
.LFB2051:
.cfi_startproc
endbr64
subq $344, %rsp
.cfi_def_cfa_offset 352
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
movq 352(%rsp), %rax
movq %rax, 56(%rsp)
movq 360(%rsp), %rax
movq %rax, 48(%rsp)
movq 368(%rsp), %rax
movq %rax, 40(%rsp)
movq 376(%rsp), %rax
movq %rax, 32(%rsp)
movq 384(%rsp), %rax
movq %rax, 24(%rsp)
movq 392(%rsp), %rax
movq %rax, 16(%rsp)
movq 400(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 328(%rsp)
xorl %eax, %eax
leaq 104(%rsp), %rax
movq %rax, 176(%rsp)
leaq 96(%rsp), %rax
movq %rax, 184(%rsp)
leaq 88(%rsp), %rax
movq %rax, 192(%rsp)
leaq 80(%rsp), %rax
movq %rax, 200(%rsp)
leaq 72(%rsp), %rax
movq %rax, 208(%rsp)
leaq 64(%rsp), %rax
movq %rax, 216(%rsp)
leaq 56(%rsp), %rax
movq %rax, 224(%rsp)
leaq 48(%rsp), %rax
movq %rax, 232(%rsp)
leaq 40(%rsp), %rax
movq %rax, 240(%rsp)
leaq 32(%rsp), %rax
movq %rax, 248(%rsp)
leaq 24(%rsp), %rax
movq %rax, 256(%rsp)
leaq 16(%rsp), %rax
movq %rax, 264(%rsp)
leaq 8(%rsp), %rax
movq %rax, 272(%rsp)
leaq 408(%rsp), %rax
movq %rax, 280(%rsp)
leaq 416(%rsp), %rax
movq %rax, 288(%rsp)
leaq 424(%rsp), %rax
movq %rax, 296(%rsp)
leaq 432(%rsp), %rax
movq %rax, 304(%rsp)
leaq 440(%rsp), %rax
movq %rax, 312(%rsp)
leaq 448(%rsp), %rax
movq %rax, 320(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
movl $1, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $1, 148(%rsp)
leaq 120(%rsp), %rcx
leaq 112(%rsp), %rdx
leaq 140(%rsp), %rsi
leaq 128(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 328(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $344, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 120(%rsp)
.cfi_def_cfa_offset 360
pushq 120(%rsp)
.cfi_def_cfa_offset 368
leaq 192(%rsp), %r9
movq 156(%rsp), %rcx
movl 164(%rsp), %r8d
movq 144(%rsp), %rsi
movl 152(%rsp), %edx
leaq _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 352
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z69__device_stub__Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiiiPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, .-_Z69__device_stub__Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiiiPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.globl _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.type _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, @function
_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 64
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 72
pushq 120(%rsp)
.cfi_def_cfa_offset 80
pushq 120(%rsp)
.cfi_def_cfa_offset 88
pushq 120(%rsp)
.cfi_def_cfa_offset 96
pushq 120(%rsp)
.cfi_def_cfa_offset 104
pushq 120(%rsp)
.cfi_def_cfa_offset 112
pushq 120(%rsp)
.cfi_def_cfa_offset 120
pushq 120(%rsp)
.cfi_def_cfa_offset 128
call _Z69__device_stub__Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiiiPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
addq $120, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, .-_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel_push2_atomic.hip"
.globl _Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii # -- Begin function _Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.p2align 4, 0x90
.type _Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii,@function
_Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii: # @_Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.cfi_startproc
# %bb.0:
subq $248, %rsp
.cfi_def_cfa_offset 256
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 256(%rsp), %rax
movq %rax, 144(%rsp)
leaq 264(%rsp), %rax
movq %rax, 152(%rsp)
leaq 272(%rsp), %rax
movq %rax, 160(%rsp)
leaq 280(%rsp), %rax
movq %rax, 168(%rsp)
leaq 288(%rsp), %rax
movq %rax, 176(%rsp)
leaq 296(%rsp), %rax
movq %rax, 184(%rsp)
leaq 304(%rsp), %rax
movq %rax, 192(%rsp)
leaq 312(%rsp), %rax
movq %rax, 200(%rsp)
leaq 320(%rsp), %rax
movq %rax, 208(%rsp)
leaq 328(%rsp), %rax
movq %rax, 216(%rsp)
leaq 336(%rsp), %rax
movq %rax, 224(%rsp)
leaq 344(%rsp), %rax
movq %rax, 232(%rsp)
leaq 352(%rsp), %rax
movq %rax, 240(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $264, %rsp # imm = 0x108
.cfi_adjust_cfa_offset -264
retq
.Lfunc_end0:
.size _Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, .Lfunc_end0-_Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii,@object # @_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.section .rodata,"a",@progbits
.globl _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.p2align 3, 0x0
_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii:
.quad _Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.size _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii"
.size .L__unnamed_1, 56
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19kernel_push2_atomicPiS_S_S_S_S_S_S_S_S_S_S_S_iiiiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <cstdio>
using namespace std;
#include <cuda_runtime.h>
#define TIMES 24
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
void RandomInit(unsigned* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() % n;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////_VECTOR_ADDITION_///////////////////////////////////////////////////////
// Device code
__global__ void l1_stride_cons(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i*stride] = A[i*stride];
}
__global__ void l1_stride(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[((i/stride)*32)+(i%stride)] = A[((i/stride)*32)+(i%stride)];
}
// Host code
void VectorAddition(int N, int threadsPerBlock, int stride)
{
cout<<"Vector Addition for input size "<<N<<" :\n";
// Variables
float* h_A;
float* h_C;
float* d_A;
float* d_C;
float total_time=0;
size_t size = N * sizeof(float) * 32;
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_C = (float*)malloc(size);
// Initialize input vectors
RandomInit(h_A, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaThreadSynchronize());
// Invoke kernel
cout<<"Invoke Kernel\n";
//int threads = 128;
int blocksPerGrid = ((N+ threadsPerBlock-1) / threadsPerBlock);
for (int i = 0; i < 1; i++) {
l1_stride_cons<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, stride);
getLastCudaError("kernel launch failure");
checkCudaErrors(cudaThreadSynchronize());
}
float dSeconds = total_time/((float)TIMES * 1000);
float dNumOps = N;
float gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
// Verify result
int i;
for (i = 0; i < N; ++i) {
float sum = h_A[i];
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_C)
free(h_C);
cudaDeviceReset();
if(i == N)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl;
}
//////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 4)
printf("Unsuffcient number of arguments!\n");
else
{
VectorAddition(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]));
}
} | code for sm_80
Function : _Z9l1_stridePKfPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IABS R7, c[0x0][0x170] ; /* 0x00005c0000077a13 */
/* 0x000fe20000000000 */
/*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ I2F.RP R4, R7 ; /* 0x0000000700047306 */
/* 0x000e620000209400 */
/*0050*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e2e0000002100 */
/*0060*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x002e620000001000 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fe200078e0205 */
/*0080*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */
/* 0x002fc80007ffe0ff */
/*0090*/ IABS R4, R0 ; /* 0x0000000000047213 */
/* 0x000fe40000000000 */
/*00a0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*00b0*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x001fe200000001ff */
/*00c0*/ IMAD.MOV R6, RZ, RZ, -R3 ; /* 0x000000ffff067224 */
/* 0x002fc800078e0a03 */
/*00d0*/ IMAD R5, R6, R7, RZ ; /* 0x0000000706057224 */
/* 0x000fca00078e02ff */
/*00e0*/ IMAD.HI.U32 R3, R3, R5, R2 ; /* 0x0000000503037227 */
/* 0x000fcc00078e0002 */
/*00f0*/ IMAD.HI.U32 R3, R3, R4, RZ ; /* 0x0000000403037227 */
/* 0x000fc800078e00ff */
/*0100*/ IMAD.MOV R5, RZ, RZ, -R3 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a03 */
/*0110*/ IMAD R2, R7, R5, R4 ; /* 0x0000000507027224 */
/* 0x000fca00078e0204 */
/*0120*/ ISETP.GT.U32.AND P2, PT, R7, R2, PT ; /* 0x000000020700720c */
/* 0x000fda0003f44070 */
/*0130*/ @!P2 IADD3 R2, R2, -R7.reuse, RZ ; /* 0x800000070202a210 */
/* 0x080fe40007ffe0ff */
/*0140*/ @!P2 IADD3 R3, R3, 0x1, RZ ; /* 0x000000010303a810 */
/* 0x000fe40007ffe0ff */
/*0150*/ ISETP.GE.U32.AND P0, PT, R2, R7, PT ; /* 0x000000070200720c */
/* 0x000fe40003f06070 */
/*0160*/ LOP3.LUT R2, R0, c[0x0][0x170], RZ, 0x3c, !PT ; /* 0x00005c0000027a12 */
/* 0x000fe400078e3cff */
/*0170*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x170], PT ; /* 0x00005c00ff007a0c */
/* 0x000fe40003f45270 */
/*0180*/ ISETP.GE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fce0003f26270 */
/*0190*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fcc0007ffe0ff */
/*01a0*/ @!P1 IMAD.MOV R3, RZ, RZ, -R3 ; /* 0x000000ffff039224 */
/* 0x000fe200078e0a03 */
/*01b0*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x170], RZ, 0x33, !PT ; /* 0x00005c00ff03aa12 */
/* 0x000fc800078e33ff */
/*01c0*/ IADD3 R5, -R3, RZ, RZ ; /* 0x000000ff03057210 */
/* 0x000fca0007ffe1ff */
/*01d0*/ IMAD R0, R5, c[0x0][0x170], R0 ; /* 0x00005c0005007a24 */
/* 0x000fe400078e0200 */
/*01e0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fc600078e00ff */
/*01f0*/ LEA R0, R3, R0, 0x5 ; /* 0x0000000003007211 */
/* 0x000fca00078e28ff */
/*0200*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fcc00078e0205 */
/*0210*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0220*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fca00078e0205 */
/*0230*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*0240*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0250*/ BRA 0x250; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z14l1_stride_consPKfPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc800078e0203 */
/*0060*/ IMAD R0, R0, c[0x0][0x170], RZ ; /* 0x00005c0000007a24 */
/* 0x000fc800078e02ff */
/*0070*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fcc00078e0205 */
/*0080*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0090*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fca00078e0205 */
/*00a0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <cstdio>
using namespace std;
#include <cuda_runtime.h>
#define TIMES 24
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
void RandomInit(unsigned* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() % n;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////_VECTOR_ADDITION_///////////////////////////////////////////////////////
// Device code
__global__ void l1_stride_cons(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i*stride] = A[i*stride];
}
__global__ void l1_stride(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[((i/stride)*32)+(i%stride)] = A[((i/stride)*32)+(i%stride)];
}
// Host code
void VectorAddition(int N, int threadsPerBlock, int stride)
{
cout<<"Vector Addition for input size "<<N<<" :\n";
// Variables
float* h_A;
float* h_C;
float* d_A;
float* d_C;
float total_time=0;
size_t size = N * sizeof(float) * 32;
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_C = (float*)malloc(size);
// Initialize input vectors
RandomInit(h_A, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaThreadSynchronize());
// Invoke kernel
cout<<"Invoke Kernel\n";
//int threads = 128;
int blocksPerGrid = ((N+ threadsPerBlock-1) / threadsPerBlock);
for (int i = 0; i < 1; i++) {
l1_stride_cons<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, stride);
getLastCudaError("kernel launch failure");
checkCudaErrors(cudaThreadSynchronize());
}
float dSeconds = total_time/((float)TIMES * 1000);
float dNumOps = N;
float gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
// Verify result
int i;
for (i = 0; i < N; ++i) {
float sum = h_A[i];
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_C)
free(h_C);
cudaDeviceReset();
if(i == N)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl;
}
//////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 4)
printf("Unsuffcient number of arguments!\n");
else
{
VectorAddition(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]));
}
} | .file "tmpxft_0003245d_00000000-6_l1_stride.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3677:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3677:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10RandomInitPfi
.type _Z10RandomInitPfi, @function
_Z10RandomInitPfi:
.LFB3669:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE3669:
.size _Z10RandomInitPfi, .-_Z10RandomInitPfi
.globl _Z10RandomInitPji
.type _Z10RandomInitPji, @function
_Z10RandomInitPji:
.LFB3670:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L16
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl %esi, %ebp
movq %rdi, %rbx
movslq %esi, %rax
leaq (%rdi,%rax,4), %r12
.L13:
call rand@PLT
cltd
idivl %ebp
movl %edx, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L13
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
ret
.cfi_endproc
.LFE3670:
.size _Z10RandomInitPji, .-_Z10RandomInitPji
.globl _Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi
.type _Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi, @function
_Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi:
.LFB3699:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14l1_stride_consPKfPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3699:
.size _Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi, .-_Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi
.globl _Z14l1_stride_consPKfPfi
.type _Z14l1_stride_consPKfPfi, @function
_Z14l1_stride_consPKfPfi:
.LFB3700:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3700:
.size _Z14l1_stride_consPKfPfi, .-_Z14l1_stride_consPKfPfi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Vector Addition for input size "
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string " :\n"
.section .rodata.str1.8
.align 8
.LC3:
.string "/home/ubuntu/Datasets/stackv2/train-structured/shen203/GPU_Microbenchmark/master/coalescer/l1_stride.cu"
.align 8
.LC4:
.string "%s(%i) : CUDA Runtime API error %d: %s.\n"
.section .rodata.str1.1
.LC5:
.string "Invoke Kernel\n"
.LC6:
.string "kernel launch failure"
.section .rodata.str1.8
.align 8
.LC7:
.string "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n"
.section .rodata.str1.1
.LC10:
.string "Time = "
.LC11:
.string "msec"
.LC12:
.string "gflops = "
.LC15:
.string "SUCCSESS"
.LC16:
.string "FAILED"
.text
.globl _Z14VectorAdditioniii
.type _Z14VectorAdditioniii, @function
_Z14VectorAdditioniii:
.LFB3673:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movl %edi, %r13d
movl %esi, %r15d
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $31, %edx
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r13d, %esi
movq %rbx, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $3, %edx
leaq .LC2(%rip), %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movslq %r13d, %r12
movq %r12, %r14
salq $7, %r14
movq %r14, %rdi
call malloc@PLT
movq %rax, %rbx
movq %r14, %rdi
call malloc@PLT
movq %rax, %rbp
movl %r13d, %esi
movq %rbx, %rdi
call _Z10RandomInitPfi
leaq 16(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L70
leaq 24(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L71
movl $1, %ecx
movq %r14, %rdx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L72
call cudaThreadSynchronize@PLT
testl %eax, %eax
jne .L73
movl $14, %edx
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r15d, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leal -1(%r13,%r15), %eax
cltd
idivl %r15d
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L74
.L32:
call cudaGetLastError@PLT
movl %eax, %r15d
testl %eax, %eax
jne .L75
call cudaThreadSynchronize@PLT
movl %eax, %r15d
testl %eax, %eax
jne .L76
pxor %xmm0, %xmm0
cvtsi2ssl %r13d, %xmm0
cvtss2sd %xmm0, %xmm0
mulsd .LC8(%rip), %xmm0
pxor %xmm1, %xmm1
divsd %xmm1, %xmm0
pxor %xmm3, %xmm3
cvtsd2ss %xmm0, %xmm3
movss %xmm3, (%rsp)
movl $7, %edx
leaq .LC10(%rip), %rsi
leaq _ZSt4cout(%rip), %r15
movq %r15, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
pxor %xmm0, %xmm0
movq %r15, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %r15
movq %rax, 8(%rsp)
movl $4, %edx
leaq .LC11(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq (%r15), %rax
movq -24(%rax), %rax
movq 240(%r15,%rax), %r15
testq %r15, %r15
je .L77
cmpb $0, 56(%r15)
je .L37
movzbl 67(%r15), %esi
.L38:
movsbl %sil, %esi
movq 8(%rsp), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movq %rax, %r15
movl $9, %edx
leaq .LC12(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
pxor %xmm0, %xmm0
cvtss2sd (%rsp), %xmm0
movq %r15, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rcx
movq %rax, (%rsp)
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rcx,%rax), %r15
testq %r15, %r15
je .L78
cmpb $0, 56(%r15)
je .L41
movzbl 67(%r15), %esi
.L42:
movsbl %sil, %esi
movq (%rsp), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movl $2, %ecx
movq %r14, %rdx
movq 24(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L43
movl $0, %eax
testl %r13d, %r13d
jle .L67
movss .LC13(%rip), %xmm2
movsd .LC14(%rip), %xmm1
jmp .L44
.L70:
movl %eax, %ebx
movl %eax, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %rax
.cfi_def_cfa_offset 144
movl %ebx, %r9d
movl $98, %r8d
leaq .LC3(%rip), %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L71:
.cfi_restore_state
movl %eax, %ebx
movl %eax, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %rax
.cfi_def_cfa_offset 144
movl %ebx, %r9d
movl $99, %r8d
leaq .LC3(%rip), %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L72:
.cfi_restore_state
movl %eax, %ebx
movl %eax, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %rax
.cfi_def_cfa_offset 144
movl %ebx, %r9d
movl $103, %r8d
leaq .LC3(%rip), %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L73:
.cfi_restore_state
movl %eax, %ebx
movl %eax, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %rax
.cfi_def_cfa_offset 144
movl %ebx, %r9d
movl $105, %r8d
leaq .LC3(%rip), %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L74:
.cfi_restore_state
movl (%rsp), %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi
jmp .L32
.L75:
movl %eax, %edi
call cudaGetErrorString@PLT
pushq %rax
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %r15
.cfi_def_cfa_offset 144
leaq .LC6(%rip), %r9
movl $114, %r8d
leaq .LC3(%rip), %rcx
leaq .LC7(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L76:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %rax
.cfi_def_cfa_offset 144
movl %r15d, %r9d
movl $115, %r8d
leaq .LC3(%rip), %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L77:
.cfi_restore_state
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L79
call _ZSt16__throw_bad_castv@PLT
.L79:
call __stack_chk_fail@PLT
.L37:
movq %r15, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r15), %rax
movl $10, %esi
movq %r15, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L38
.L78:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L80
call _ZSt16__throw_bad_castv@PLT
.L80:
call __stack_chk_fail@PLT
.L41:
movq %r15, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r15), %rax
movl $10, %esi
movq %r15, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L42
.L67:
movl $0, %r12d
jmp .L45
.L43:
movl %eax, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %rax
.cfi_def_cfa_offset 144
movl %r14d, %r9d
movl $125, %r8d
leaq .LC3(%rip), %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L64:
.cfi_restore_state
movq %rdx, %rax
.L44:
movss 0(%rbp,%rax,4), %xmm0
subss (%rbx,%rax,4), %xmm0
andps %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm1, %xmm0
ja .L81
leaq 1(%rax), %rdx
cmpq %rdx, %r12
jne .L64
leal 1(%rax), %r12d
jmp .L45
.L81:
movl %eax, %r12d
.L45:
movq 16(%rsp), %rdi
testq %rdi, %rdi
je .L48
call cudaFree@PLT
.L48:
movq 24(%rsp), %rdi
testq %rdi, %rdi
je .L49
call cudaFree@PLT
.L49:
testq %rbx, %rbx
je .L50
movq %rbx, %rdi
call free@PLT
.L50:
testq %rbp, %rbp
je .L51
movq %rbp, %rdi
call free@PLT
.L51:
call cudaDeviceReset@PLT
cmpl %r13d, %r12d
je .L82
movl $6, %edx
leaq .LC16(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbx
testq %rbx, %rbx
je .L83
cmpb $0, 56(%rbx)
je .L60
movzbl 67(%rbx), %esi
.L61:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
.L27:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L84
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L82:
.cfi_restore_state
movl $8, %edx
leaq .LC15(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbx
testq %rbx, %rbx
je .L85
cmpb $0, 56(%rbx)
je .L55
movzbl 67(%rbx), %esi
.L56:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
jmp .L27
.L85:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L86
call _ZSt16__throw_bad_castv@PLT
.L86:
call __stack_chk_fail@PLT
.L55:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L56
.L83:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L87
call _ZSt16__throw_bad_castv@PLT
.L87:
call __stack_chk_fail@PLT
.L60:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L61
.L84:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3673:
.size _Z14VectorAdditioniii, .-_Z14VectorAdditioniii
.section .rodata.str1.8
.align 8
.LC17:
.string "Unsuffcient number of arguments!\n"
.text
.globl main
.type main, @function
main:
.LFB3674:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
cmpl $3, %edi
jg .L89
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L90:
movl $0, %eax
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L89:
.cfi_restore_state
movq %rsi, %rbx
movq 24(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r12
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %r12d, %edx
movl %ebp, %esi
movl %eax, %edi
call _Z14VectorAdditioniii
jmp .L90
.cfi_endproc
.LFE3674:
.size main, .-main
.globl _Z32__device_stub__Z9l1_stridePKfPfiPKfPfi
.type _Z32__device_stub__Z9l1_stridePKfPfiPKfPfi, @function
_Z32__device_stub__Z9l1_stridePKfPfiPKfPfi:
.LFB3701:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L96
.L92:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L97
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L96:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9l1_stridePKfPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L92
.L97:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3701:
.size _Z32__device_stub__Z9l1_stridePKfPfiPKfPfi, .-_Z32__device_stub__Z9l1_stridePKfPfiPKfPfi
.globl _Z9l1_stridePKfPfi
.type _Z9l1_stridePKfPfi, @function
_Z9l1_stridePKfPfi:
.LFB3702:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9l1_stridePKfPfiPKfPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3702:
.size _Z9l1_stridePKfPfi, .-_Z9l1_stridePKfPfi
.section .rodata.str1.1
.LC18:
.string "_Z9l1_stridePKfPfi"
.LC19:
.string "_Z14l1_stride_consPKfPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3704:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _Z9l1_stridePKfPfi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z14l1_stride_consPKfPfi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3704:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 805306368
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC8:
.long -400107883
.long 1041313291
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC13:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC14:
.long -1998362383
.long 1055193269
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <cstdio>
using namespace std;
#include <cuda_runtime.h>
#define TIMES 24
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
void RandomInit(unsigned* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() % n;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////_VECTOR_ADDITION_///////////////////////////////////////////////////////
// Device code
__global__ void l1_stride_cons(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i*stride] = A[i*stride];
}
__global__ void l1_stride(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[((i/stride)*32)+(i%stride)] = A[((i/stride)*32)+(i%stride)];
}
// Host code
void VectorAddition(int N, int threadsPerBlock, int stride)
{
cout<<"Vector Addition for input size "<<N<<" :\n";
// Variables
float* h_A;
float* h_C;
float* d_A;
float* d_C;
float total_time=0;
size_t size = N * sizeof(float) * 32;
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_C = (float*)malloc(size);
// Initialize input vectors
RandomInit(h_A, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaThreadSynchronize());
// Invoke kernel
cout<<"Invoke Kernel\n";
//int threads = 128;
int blocksPerGrid = ((N+ threadsPerBlock-1) / threadsPerBlock);
for (int i = 0; i < 1; i++) {
l1_stride_cons<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, stride);
getLastCudaError("kernel launch failure");
checkCudaErrors(cudaThreadSynchronize());
}
float dSeconds = total_time/((float)TIMES * 1000);
float dNumOps = N;
float gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
// Verify result
int i;
for (i = 0; i < N; ++i) {
float sum = h_A[i];
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_C)
free(h_C);
cudaDeviceReset();
if(i == N)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl;
}
//////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 4)
printf("Unsuffcient number of arguments!\n");
else
{
VectorAddition(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]));
}
} | #include <iostream>
#include <cstdio>
using namespace std;
#include <hip/hip_runtime.h>
#define TIMES 24
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
void RandomInit(unsigned* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() % n;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////_VECTOR_ADDITION_///////////////////////////////////////////////////////
// Device code
__global__ void l1_stride_cons(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i*stride] = A[i*stride];
}
__global__ void l1_stride(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[((i/stride)*32)+(i%stride)] = A[((i/stride)*32)+(i%stride)];
}
// Host code
void VectorAddition(int N, int threadsPerBlock, int stride)
{
cout<<"Vector Addition for input size "<<N<<" :\n";
// Variables
float* h_A;
float* h_C;
float* d_A;
float* d_C;
float total_time=0;
size_t size = N * sizeof(float) * 32;
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_C = (float*)malloc(size);
// Initialize input vectors
RandomInit(h_A, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors(hipDeviceSynchronize());
// Invoke kernel
cout<<"Invoke Kernel\n";
//int threads = 128;
int blocksPerGrid = ((N+ threadsPerBlock-1) / threadsPerBlock);
for (int i = 0; i < 1; i++) {
l1_stride_cons<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, stride);
getLastCudaError("kernel launch failure");
checkCudaErrors(hipDeviceSynchronize());
}
float dSeconds = total_time/((float)TIMES * 1000);
float dNumOps = N;
float gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
// Verify result
int i;
for (i = 0; i < N; ++i) {
float sum = h_A[i];
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
// Free device memory
if (d_A)
hipFree(d_A);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_C)
free(h_C);
hipDeviceReset();
if(i == N)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl;
}
//////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 4)
printf("Unsuffcient number of arguments!\n");
else
{
VectorAddition(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]));
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include <cstdio>
using namespace std;
#include <hip/hip_runtime.h>
#define TIMES 24
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
void RandomInit(unsigned* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() % n;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////_VECTOR_ADDITION_///////////////////////////////////////////////////////
// Device code
__global__ void l1_stride_cons(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i*stride] = A[i*stride];
}
__global__ void l1_stride(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[((i/stride)*32)+(i%stride)] = A[((i/stride)*32)+(i%stride)];
}
// Host code
void VectorAddition(int N, int threadsPerBlock, int stride)
{
cout<<"Vector Addition for input size "<<N<<" :\n";
// Variables
float* h_A;
float* h_C;
float* d_A;
float* d_C;
float total_time=0;
size_t size = N * sizeof(float) * 32;
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_C = (float*)malloc(size);
// Initialize input vectors
RandomInit(h_A, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors(hipDeviceSynchronize());
// Invoke kernel
cout<<"Invoke Kernel\n";
//int threads = 128;
int blocksPerGrid = ((N+ threadsPerBlock-1) / threadsPerBlock);
for (int i = 0; i < 1; i++) {
l1_stride_cons<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, stride);
getLastCudaError("kernel launch failure");
checkCudaErrors(hipDeviceSynchronize());
}
float dSeconds = total_time/((float)TIMES * 1000);
float dNumOps = N;
float gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
// Verify result
int i;
for (i = 0; i < N; ++i) {
float sum = h_A[i];
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
// Free device memory
if (d_A)
hipFree(d_A);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_C)
free(h_C);
hipDeviceReset();
if(i == N)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl;
}
//////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 4)
printf("Unsuffcient number of arguments!\n");
else
{
VectorAddition(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]));
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14l1_stride_consPKfPfi
.globl _Z14l1_stride_consPKfPfi
.p2align 8
.type _Z14l1_stride_consPKfPfi,@function
_Z14l1_stride_consPKfPfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_load_b128 s[0:3], s[0:1], 0x0
v_mul_lo_u32 v0, v1, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14l1_stride_consPKfPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14l1_stride_consPKfPfi, .Lfunc_end0-_Z14l1_stride_consPKfPfi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z9l1_stridePKfPfi
.globl _Z9l1_stridePKfPfi
.p2align 8
.type _Z9l1_stridePKfPfi,@function
_Z9l1_stridePKfPfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x10
s_load_b32 s3, s[0:1], 0x24
s_waitcnt lgkmcnt(0)
s_ashr_i32 s4, s2, 31
s_and_b32 s3, s3, 0xffff
s_add_i32 s5, s2, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_xor_b32 s5, s5, s4
v_cvt_f32_u32_e32 v1, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
v_cvt_u32_f32_e32 v3, v1
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
s_sub_i32 s3, 0, s5
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v0, s3, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
v_mul_hi_u32 v0, v3, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v4, v1, v2
v_xor_b32_e32 v4, v4, v2
v_xor_b32_e32 v2, s4, v2
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v0, v3, v0
v_mul_hi_u32 v0, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, v0, s5
v_sub_nc_u32_e32 v3, v4, v3
v_add_nc_u32_e32 v4, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s5, v3
v_cmp_le_u32_e32 vcc_lo, s5, v3
v_dual_cndmask_b32 v3, v3, v5 :: v_dual_cndmask_b32 v0, v0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_le_u32_e32 vcc_lo, s5, v3
v_add_nc_u32_e32 v4, 1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v0, v0, v4, vcc_lo
v_xor_b32_e32 v0, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v0, v0, v2
v_mul_lo_u32 v2, v0, s2
s_load_b128 s[0:3], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v1, v1, v2
v_lshl_add_u32 v0, v0, 5, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9l1_stridePKfPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z9l1_stridePKfPfi, .Lfunc_end1-_Z9l1_stridePKfPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14l1_stride_consPKfPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14l1_stride_consPKfPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9l1_stridePKfPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9l1_stridePKfPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include <cstdio>
using namespace std;
#include <hip/hip_runtime.h>
#define TIMES 24
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
void RandomInit(unsigned* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() % n;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////_VECTOR_ADDITION_///////////////////////////////////////////////////////
// Device code
__global__ void l1_stride_cons(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i*stride] = A[i*stride];
}
__global__ void l1_stride(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[((i/stride)*32)+(i%stride)] = A[((i/stride)*32)+(i%stride)];
}
// Host code
void VectorAddition(int N, int threadsPerBlock, int stride)
{
cout<<"Vector Addition for input size "<<N<<" :\n";
// Variables
float* h_A;
float* h_C;
float* d_A;
float* d_C;
float total_time=0;
size_t size = N * sizeof(float) * 32;
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_C = (float*)malloc(size);
// Initialize input vectors
RandomInit(h_A, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors(hipDeviceSynchronize());
// Invoke kernel
cout<<"Invoke Kernel\n";
//int threads = 128;
int blocksPerGrid = ((N+ threadsPerBlock-1) / threadsPerBlock);
for (int i = 0; i < 1; i++) {
l1_stride_cons<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, stride);
getLastCudaError("kernel launch failure");
checkCudaErrors(hipDeviceSynchronize());
}
float dSeconds = total_time/((float)TIMES * 1000);
float dNumOps = N;
float gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
// Verify result
int i;
for (i = 0; i < N; ++i) {
float sum = h_A[i];
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
// Free device memory
if (d_A)
hipFree(d_A);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_C)
free(h_C);
hipDeviceReset();
if(i == N)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl;
}
//////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 4)
printf("Unsuffcient number of arguments!\n");
else
{
VectorAddition(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]));
}
} | .text
.file "l1_stride.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z10RandomInitPfi
.LCPI0_0:
.long 0x30000000 # float 4.65661287E-10
.text
.globl _Z10RandomInitPfi
.p2align 4, 0x90
.type _Z10RandomInitPfi,@function
_Z10RandomInitPfi: # @_Z10RandomInitPfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI0_0(%rip), %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB0_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB0_4: # %._crit_edge
retq
.Lfunc_end0:
.size _Z10RandomInitPfi, .Lfunc_end0-_Z10RandomInitPfi
.cfi_endproc
# -- End function
.globl _Z10RandomInitPji # -- Begin function _Z10RandomInitPji
.p2align 4, 0x90
.type _Z10RandomInitPji,@function
_Z10RandomInitPji: # @_Z10RandomInitPji
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %esi, %ebx
movq %rdi, %r14
movl %esi, %r15d
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltd
idivl %ebx
movl %edx, (%r14,%r12,4)
incq %r12
cmpq %r12, %r15
jne .LBB1_2
# %bb.3:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z10RandomInitPji, .Lfunc_end1-_Z10RandomInitPji
.cfi_endproc
# -- End function
.globl _Z29__device_stub__l1_stride_consPKfPfi # -- Begin function _Z29__device_stub__l1_stride_consPKfPfi
.p2align 4, 0x90
.type _Z29__device_stub__l1_stride_consPKfPfi,@function
_Z29__device_stub__l1_stride_consPKfPfi: # @_Z29__device_stub__l1_stride_consPKfPfi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14l1_stride_consPKfPfi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z29__device_stub__l1_stride_consPKfPfi, .Lfunc_end2-_Z29__device_stub__l1_stride_consPKfPfi
.cfi_endproc
# -- End function
.globl _Z24__device_stub__l1_stridePKfPfi # -- Begin function _Z24__device_stub__l1_stridePKfPfi
.p2align 4, 0x90
.type _Z24__device_stub__l1_stridePKfPfi,@function
_Z24__device_stub__l1_stridePKfPfi: # @_Z24__device_stub__l1_stridePKfPfi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9l1_stridePKfPfi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z24__device_stub__l1_stridePKfPfi, .Lfunc_end3-_Z24__device_stub__l1_stridePKfPfi
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z14VectorAdditioniii
.LCPI4_0:
.long 0x30000000 # float 4.65661287E-10
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI4_1:
.quad 0x3e112e0be826d695 # double 1.0000000000000001E-9
.LCPI4_3:
.quad 0x3ee4f8b588e368f1 # double 1.0000000000000001E-5
.LCPI4_4:
.quad 0x0000000000000000 # double 0
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI4_2:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl _Z14VectorAdditioniii
.p2align 4, 0x90
.type _Z14VectorAdditioniii,@function
_Z14VectorAdditioniii: # @_Z14VectorAdditioniii
.cfi_startproc
# %bb.0: # %.critedge
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $136, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, 12(%rsp) # 4-byte Spill
movl %esi, %r13d
movl %edi, %ebx
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $31, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %ebx, %esi
callq _ZNSolsEi
movl $.L.str.1, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbx, 16(%rsp) # 8-byte Spill
movslq %ebx, %rbp
movq %rbp, %r12
shlq $7, %r12
movq %r12, %rdi
callq malloc
movq %rax, %r15
movq %r12, %rdi
callq malloc
movq %rax, %r14
movl %ebp, %ebx
testl %ebp, %ebp
jle .LBB4_3
# %bb.1: # %.lr.ph.preheader.i
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB4_2: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI4_0(%rip), %xmm0
movss %xmm0, (%r15,%rbp,4)
incq %rbp
cmpq %rbp, %rbx
jne .LBB4_2
.LBB4_3: # %_Z10RandomInitPfi.exit
leaq 32(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB4_44
# %bb.4: # %_Z17__checkCudaErrors10hipError_tPKci.exit
leaq 24(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB4_45
# %bb.5: # %_Z17__checkCudaErrors10hipError_tPKci.exit53
movq 32(%rsp), %rdi
movq %r15, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB4_46
# %bb.6: # %_Z17__checkCudaErrors10hipError_tPKci.exit55
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB4_47
# %bb.7: # %_Z17__checkCudaErrors10hipError_tPKci.exit57
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq 16(%rsp), %rax # 8-byte Reload
addl %r13d, %eax
decl %eax
cltd
idivl %r13d
# kill: def $eax killed $eax def $rax
movabsq $4294967296, %rcx # imm = 0x100000000
leaq (%rax,%rcx), %rdi
movl %r13d, %edx
orq %rcx, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_9
# %bb.8:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl 12(%rsp), %eax # 4-byte Reload
movl %eax, 44(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 44(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z14l1_stride_consPKfPfi, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_9:
callq hipGetLastError
testl %eax, %eax
jne .LBB4_48
# %bb.10: # %_Z18__getLastCudaErrorPKcS0_i.exit
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB4_49
# %bb.11: # %_Z17__checkCudaErrors10hipError_tPKci.exit60
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
xorps %xmm0, %xmm0
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %r13
movl $.L.str.6, %esi
movl $4, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%r13), %rax
movq -24(%rax), %rax
movq 240(%r13,%rax), %rbp
testq %rbp, %rbp
je .LBB4_43
# %bb.12: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
xorps %xmm0, %xmm0
cvtsi2ssl 16(%rsp), %xmm0 # 4-byte Folded Reload
cvtss2sd %xmm0, %xmm0
mulsd .LCPI4_1(%rip), %xmm0
divsd .LCPI4_4(%rip), %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 12(%rsp) # 4-byte Spill
cmpb $0, 56(%rbp)
je .LBB4_14
# %bb.13:
movzbl 67(%rbp), %eax
jmp .LBB4_15
.LBB4_14:
movq %rbp, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
.LBB4_15: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movq %r13, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %rax, %r13
movl $.L.str.7, %esi
movl $9, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movq %r13, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r13
testq %r13, %r13
je .LBB4_43
# %bb.16: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i64
cmpb $0, 56(%r13)
je .LBB4_18
# %bb.17:
movzbl 67(%r13), %ecx
jmp .LBB4_19
.LBB4_18:
movq %r13, %rdi
movq %rax, %rbp
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbp, %rax
.LBB4_19: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit67
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 24(%rsp), %rsi
movq %r14, %rdi
movq %r12, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB4_50
# %bb.20: # %_Z17__checkCudaErrors10hipError_tPKci.exit62.preheader
cmpl $0, 16(%rsp) # 4-byte Folded Reload
jle .LBB4_25
# %bb.21: # %.lr.ph.preheader
xorl %r12d, %r12d
movaps .LCPI4_2(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
movsd .LCPI4_3(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB4_22: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%r14,%r12,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
subss (%r15,%r12,4), %xmm2
andps %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm1, %xmm2
ja .LBB4_26
# %bb.23: # %_Z17__checkCudaErrors10hipError_tPKci.exit62
# in Loop: Header=BB4_22 Depth=1
incq %r12
cmpq %r12, %rbx
jne .LBB4_22
# %bb.24: # %._crit_edge.loopexit
movl %ebx, %r12d
jmp .LBB4_26
.LBB4_25:
xorl %r12d, %r12d
.LBB4_26: # %._crit_edge
movq 32(%rsp), %rdi
testq %rdi, %rdi
je .LBB4_28
# %bb.27:
callq hipFree
.LBB4_28:
movq 24(%rsp), %rdi
testq %rdi, %rdi
je .LBB4_30
# %bb.29:
callq hipFree
.LBB4_30:
testq %r15, %r15
je .LBB4_32
# %bb.31:
movq %r15, %rdi
callq free
.LBB4_32:
testq %r14, %r14
je .LBB4_34
# %bb.33:
movq %r14, %rdi
callq free
.LBB4_34:
callq hipDeviceReset
cmpl 16(%rsp), %r12d # 4-byte Folded Reload
jne .LBB4_38
# %bb.35:
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $8, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB4_43
# %bb.36: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i69
cmpb $0, 56(%rbx)
je .LBB4_41
.LBB4_37:
movzbl 67(%rbx), %eax
jmp .LBB4_42
.LBB4_38:
movl $_ZSt4cout, %edi
movl $.L.str.9, %esi
movl $6, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB4_43
# %bb.39: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i74
cmpb $0, 56(%rbx)
jne .LBB4_37
.LBB4_41:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB4_42: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit72
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
addq $136, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB4_43:
.cfi_def_cfa_offset 192
callq _ZSt16__throw_bad_castv
.LBB4_44:
movl %eax, %ebp
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $98, %ecx
jmp .LBB4_51
.LBB4_45:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $99, %ecx
jmp .LBB4_51
.LBB4_46:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $103, %ecx
jmp .LBB4_51
.LBB4_47:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $105, %ecx
jmp .LBB4_51
.LBB4_48:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movq %rax, %r10
subq $8, %rsp
.cfi_adjust_cfa_offset 8
movl $.L.str.12, %esi
movl $.L.str.2, %edx
movl $.L.str.4, %r8d
movq %rbx, %rdi
movl $114, %ecx
movl %ebp, %r9d
xorl %eax, %eax
pushq %r10
.cfi_adjust_cfa_offset 8
callq fprintf
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $-1, %edi
callq exit
.LBB4_49:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $115, %ecx
jmp .LBB4_51
.LBB4_50:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $125, %ecx
.LBB4_51:
movl %ebp, %r8d
movq %rax, %r9
xorl %eax, %eax
callq fprintf
movl $-1, %edi
callq exit
.Lfunc_end4:
.size _Z14VectorAdditioniii, .Lfunc_end4-_Z14VectorAdditioniii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
cmpl $3, %edi
jg .LBB5_2
# %bb.1:
movl $.Lstr, %edi
callq puts@PLT
jmp .LBB5_3
.LBB5_2:
movq 8(%rsi), %rdi
movq %rsi, %r15
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movq 16(%r15), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movq 24(%r15), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %ebx, %edi
movl %r14d, %esi
movl %eax, %edx
callq _Z14VectorAdditioniii
.LBB5_3:
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14l1_stride_consPKfPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9l1_stridePKfPfi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14l1_stride_consPKfPfi,@object # @_Z14l1_stride_consPKfPfi
.section .rodata,"a",@progbits
.globl _Z14l1_stride_consPKfPfi
.p2align 3, 0x0
_Z14l1_stride_consPKfPfi:
.quad _Z29__device_stub__l1_stride_consPKfPfi
.size _Z14l1_stride_consPKfPfi, 8
.type _Z9l1_stridePKfPfi,@object # @_Z9l1_stridePKfPfi
.globl _Z9l1_stridePKfPfi
.p2align 3, 0x0
_Z9l1_stridePKfPfi:
.quad _Z24__device_stub__l1_stridePKfPfi
.size _Z9l1_stridePKfPfi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Vector Addition for input size "
.size .L.str, 32
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " :\n"
.size .L.str.1, 4
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/shen203/GPU_Microbenchmark/master/coalescer/l1_stride.hip"
.size .L.str.2, 115
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Invoke Kernel\n"
.size .L.str.3, 15
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "kernel launch failure"
.size .L.str.4, 22
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Time = "
.size .L.str.5, 8
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "msec"
.size .L.str.6, 5
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "gflops = "
.size .L.str.7, 10
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "SUCCSESS"
.size .L.str.8, 9
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "FAILED"
.size .L.str.9, 7
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "%s(%i) : CUDA Runtime API error %d: %s.\n"
.size .L.str.11, 41
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n"
.size .L.str.12, 56
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14l1_stride_consPKfPfi"
.size .L__unnamed_1, 25
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z9l1_stridePKfPfi"
.size .L__unnamed_2, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Unsuffcient number of arguments!"
.size .Lstr, 33
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__l1_stride_consPKfPfi
.addrsig_sym _Z24__device_stub__l1_stridePKfPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14l1_stride_consPKfPfi
.addrsig_sym _Z9l1_stridePKfPfi
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9l1_stridePKfPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IABS R7, c[0x0][0x170] ; /* 0x00005c0000077a13 */
/* 0x000fe20000000000 */
/*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ I2F.RP R4, R7 ; /* 0x0000000700047306 */
/* 0x000e620000209400 */
/*0050*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e2e0000002100 */
/*0060*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x002e620000001000 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fe200078e0205 */
/*0080*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */
/* 0x002fc80007ffe0ff */
/*0090*/ IABS R4, R0 ; /* 0x0000000000047213 */
/* 0x000fe40000000000 */
/*00a0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*00b0*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x001fe200000001ff */
/*00c0*/ IMAD.MOV R6, RZ, RZ, -R3 ; /* 0x000000ffff067224 */
/* 0x002fc800078e0a03 */
/*00d0*/ IMAD R5, R6, R7, RZ ; /* 0x0000000706057224 */
/* 0x000fca00078e02ff */
/*00e0*/ IMAD.HI.U32 R3, R3, R5, R2 ; /* 0x0000000503037227 */
/* 0x000fcc00078e0002 */
/*00f0*/ IMAD.HI.U32 R3, R3, R4, RZ ; /* 0x0000000403037227 */
/* 0x000fc800078e00ff */
/*0100*/ IMAD.MOV R5, RZ, RZ, -R3 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a03 */
/*0110*/ IMAD R2, R7, R5, R4 ; /* 0x0000000507027224 */
/* 0x000fca00078e0204 */
/*0120*/ ISETP.GT.U32.AND P2, PT, R7, R2, PT ; /* 0x000000020700720c */
/* 0x000fda0003f44070 */
/*0130*/ @!P2 IADD3 R2, R2, -R7.reuse, RZ ; /* 0x800000070202a210 */
/* 0x080fe40007ffe0ff */
/*0140*/ @!P2 IADD3 R3, R3, 0x1, RZ ; /* 0x000000010303a810 */
/* 0x000fe40007ffe0ff */
/*0150*/ ISETP.GE.U32.AND P0, PT, R2, R7, PT ; /* 0x000000070200720c */
/* 0x000fe40003f06070 */
/*0160*/ LOP3.LUT R2, R0, c[0x0][0x170], RZ, 0x3c, !PT ; /* 0x00005c0000027a12 */
/* 0x000fe400078e3cff */
/*0170*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x170], PT ; /* 0x00005c00ff007a0c */
/* 0x000fe40003f45270 */
/*0180*/ ISETP.GE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fce0003f26270 */
/*0190*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fcc0007ffe0ff */
/*01a0*/ @!P1 IMAD.MOV R3, RZ, RZ, -R3 ; /* 0x000000ffff039224 */
/* 0x000fe200078e0a03 */
/*01b0*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x170], RZ, 0x33, !PT ; /* 0x00005c00ff03aa12 */
/* 0x000fc800078e33ff */
/*01c0*/ IADD3 R5, -R3, RZ, RZ ; /* 0x000000ff03057210 */
/* 0x000fca0007ffe1ff */
/*01d0*/ IMAD R0, R5, c[0x0][0x170], R0 ; /* 0x00005c0005007a24 */
/* 0x000fe400078e0200 */
/*01e0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fc600078e00ff */
/*01f0*/ LEA R0, R3, R0, 0x5 ; /* 0x0000000003007211 */
/* 0x000fca00078e28ff */
/*0200*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fcc00078e0205 */
/*0210*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0220*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fca00078e0205 */
/*0230*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*0240*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0250*/ BRA 0x250; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z14l1_stride_consPKfPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc800078e0203 */
/*0060*/ IMAD R0, R0, c[0x0][0x170], RZ ; /* 0x00005c0000007a24 */
/* 0x000fc800078e02ff */
/*0070*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fcc00078e0205 */
/*0080*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0090*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fca00078e0205 */
/*00a0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14l1_stride_consPKfPfi
.globl _Z14l1_stride_consPKfPfi
.p2align 8
.type _Z14l1_stride_consPKfPfi,@function
_Z14l1_stride_consPKfPfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_load_b128 s[0:3], s[0:1], 0x0
v_mul_lo_u32 v0, v1, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14l1_stride_consPKfPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14l1_stride_consPKfPfi, .Lfunc_end0-_Z14l1_stride_consPKfPfi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z9l1_stridePKfPfi
.globl _Z9l1_stridePKfPfi
.p2align 8
.type _Z9l1_stridePKfPfi,@function
_Z9l1_stridePKfPfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x10
s_load_b32 s3, s[0:1], 0x24
s_waitcnt lgkmcnt(0)
s_ashr_i32 s4, s2, 31
s_and_b32 s3, s3, 0xffff
s_add_i32 s5, s2, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_xor_b32 s5, s5, s4
v_cvt_f32_u32_e32 v1, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
v_cvt_u32_f32_e32 v3, v1
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
s_sub_i32 s3, 0, s5
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v0, s3, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
v_mul_hi_u32 v0, v3, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v4, v1, v2
v_xor_b32_e32 v4, v4, v2
v_xor_b32_e32 v2, s4, v2
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v0, v3, v0
v_mul_hi_u32 v0, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, v0, s5
v_sub_nc_u32_e32 v3, v4, v3
v_add_nc_u32_e32 v4, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s5, v3
v_cmp_le_u32_e32 vcc_lo, s5, v3
v_dual_cndmask_b32 v3, v3, v5 :: v_dual_cndmask_b32 v0, v0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_le_u32_e32 vcc_lo, s5, v3
v_add_nc_u32_e32 v4, 1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v0, v0, v4, vcc_lo
v_xor_b32_e32 v0, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v0, v0, v2
v_mul_lo_u32 v2, v0, s2
s_load_b128 s[0:3], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v1, v1, v2
v_lshl_add_u32 v0, v0, 5, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9l1_stridePKfPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z9l1_stridePKfPfi, .Lfunc_end1-_Z9l1_stridePKfPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14l1_stride_consPKfPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14l1_stride_consPKfPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9l1_stridePKfPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9l1_stridePKfPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0003245d_00000000-6_l1_stride.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3677:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3677:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10RandomInitPfi
.type _Z10RandomInitPfi, @function
_Z10RandomInitPfi:
.LFB3669:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE3669:
.size _Z10RandomInitPfi, .-_Z10RandomInitPfi
.globl _Z10RandomInitPji
.type _Z10RandomInitPji, @function
_Z10RandomInitPji:
.LFB3670:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L16
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl %esi, %ebp
movq %rdi, %rbx
movslq %esi, %rax
leaq (%rdi,%rax,4), %r12
.L13:
call rand@PLT
cltd
idivl %ebp
movl %edx, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L13
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
ret
.cfi_endproc
.LFE3670:
.size _Z10RandomInitPji, .-_Z10RandomInitPji
.globl _Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi
.type _Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi, @function
_Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi:
.LFB3699:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14l1_stride_consPKfPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3699:
.size _Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi, .-_Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi
.globl _Z14l1_stride_consPKfPfi
.type _Z14l1_stride_consPKfPfi, @function
_Z14l1_stride_consPKfPfi:
.LFB3700:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3700:
.size _Z14l1_stride_consPKfPfi, .-_Z14l1_stride_consPKfPfi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Vector Addition for input size "
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string " :\n"
.section .rodata.str1.8
.align 8
.LC3:
.string "/home/ubuntu/Datasets/stackv2/train-structured/shen203/GPU_Microbenchmark/master/coalescer/l1_stride.cu"
.align 8
.LC4:
.string "%s(%i) : CUDA Runtime API error %d: %s.\n"
.section .rodata.str1.1
.LC5:
.string "Invoke Kernel\n"
.LC6:
.string "kernel launch failure"
.section .rodata.str1.8
.align 8
.LC7:
.string "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n"
.section .rodata.str1.1
.LC10:
.string "Time = "
.LC11:
.string "msec"
.LC12:
.string "gflops = "
.LC15:
.string "SUCCSESS"
.LC16:
.string "FAILED"
.text
.globl _Z14VectorAdditioniii
.type _Z14VectorAdditioniii, @function
_Z14VectorAdditioniii:
.LFB3673:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movl %edi, %r13d
movl %esi, %r15d
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $31, %edx
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r13d, %esi
movq %rbx, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $3, %edx
leaq .LC2(%rip), %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movslq %r13d, %r12
movq %r12, %r14
salq $7, %r14
movq %r14, %rdi
call malloc@PLT
movq %rax, %rbx
movq %r14, %rdi
call malloc@PLT
movq %rax, %rbp
movl %r13d, %esi
movq %rbx, %rdi
call _Z10RandomInitPfi
leaq 16(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L70
leaq 24(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L71
movl $1, %ecx
movq %r14, %rdx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L72
call cudaThreadSynchronize@PLT
testl %eax, %eax
jne .L73
movl $14, %edx
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r15d, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leal -1(%r13,%r15), %eax
cltd
idivl %r15d
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L74
.L32:
call cudaGetLastError@PLT
movl %eax, %r15d
testl %eax, %eax
jne .L75
call cudaThreadSynchronize@PLT
movl %eax, %r15d
testl %eax, %eax
jne .L76
pxor %xmm0, %xmm0
cvtsi2ssl %r13d, %xmm0
cvtss2sd %xmm0, %xmm0
mulsd .LC8(%rip), %xmm0
pxor %xmm1, %xmm1
divsd %xmm1, %xmm0
pxor %xmm3, %xmm3
cvtsd2ss %xmm0, %xmm3
movss %xmm3, (%rsp)
movl $7, %edx
leaq .LC10(%rip), %rsi
leaq _ZSt4cout(%rip), %r15
movq %r15, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
pxor %xmm0, %xmm0
movq %r15, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %r15
movq %rax, 8(%rsp)
movl $4, %edx
leaq .LC11(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq (%r15), %rax
movq -24(%rax), %rax
movq 240(%r15,%rax), %r15
testq %r15, %r15
je .L77
cmpb $0, 56(%r15)
je .L37
movzbl 67(%r15), %esi
.L38:
movsbl %sil, %esi
movq 8(%rsp), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movq %rax, %r15
movl $9, %edx
leaq .LC12(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
pxor %xmm0, %xmm0
cvtss2sd (%rsp), %xmm0
movq %r15, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rcx
movq %rax, (%rsp)
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rcx,%rax), %r15
testq %r15, %r15
je .L78
cmpb $0, 56(%r15)
je .L41
movzbl 67(%r15), %esi
.L42:
movsbl %sil, %esi
movq (%rsp), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movl $2, %ecx
movq %r14, %rdx
movq 24(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L43
movl $0, %eax
testl %r13d, %r13d
jle .L67
movss .LC13(%rip), %xmm2
movsd .LC14(%rip), %xmm1
jmp .L44
.L70:
movl %eax, %ebx
movl %eax, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %rax
.cfi_def_cfa_offset 144
movl %ebx, %r9d
movl $98, %r8d
leaq .LC3(%rip), %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L71:
.cfi_restore_state
movl %eax, %ebx
movl %eax, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %rax
.cfi_def_cfa_offset 144
movl %ebx, %r9d
movl $99, %r8d
leaq .LC3(%rip), %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L72:
.cfi_restore_state
movl %eax, %ebx
movl %eax, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %rax
.cfi_def_cfa_offset 144
movl %ebx, %r9d
movl $103, %r8d
leaq .LC3(%rip), %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L73:
.cfi_restore_state
movl %eax, %ebx
movl %eax, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %rax
.cfi_def_cfa_offset 144
movl %ebx, %r9d
movl $105, %r8d
leaq .LC3(%rip), %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L74:
.cfi_restore_state
movl (%rsp), %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z38__device_stub__Z14l1_stride_consPKfPfiPKfPfi
jmp .L32
.L75:
movl %eax, %edi
call cudaGetErrorString@PLT
pushq %rax
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %r15
.cfi_def_cfa_offset 144
leaq .LC6(%rip), %r9
movl $114, %r8d
leaq .LC3(%rip), %rcx
leaq .LC7(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L76:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %rax
.cfi_def_cfa_offset 144
movl %r15d, %r9d
movl $115, %r8d
leaq .LC3(%rip), %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L77:
.cfi_restore_state
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L79
call _ZSt16__throw_bad_castv@PLT
.L79:
call __stack_chk_fail@PLT
.L37:
movq %r15, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r15), %rax
movl $10, %esi
movq %r15, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L38
.L78:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L80
call _ZSt16__throw_bad_castv@PLT
.L80:
call __stack_chk_fail@PLT
.L41:
movq %r15, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r15), %rax
movl $10, %esi
movq %r15, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L42
.L67:
movl $0, %r12d
jmp .L45
.L43:
movl %eax, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 136
pushq %rax
.cfi_def_cfa_offset 144
movl %r14d, %r9d
movl $125, %r8d
leaq .LC3(%rip), %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L64:
.cfi_restore_state
movq %rdx, %rax
.L44:
movss 0(%rbp,%rax,4), %xmm0
subss (%rbx,%rax,4), %xmm0
andps %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm1, %xmm0
ja .L81
leaq 1(%rax), %rdx
cmpq %rdx, %r12
jne .L64
leal 1(%rax), %r12d
jmp .L45
.L81:
movl %eax, %r12d
.L45:
movq 16(%rsp), %rdi
testq %rdi, %rdi
je .L48
call cudaFree@PLT
.L48:
movq 24(%rsp), %rdi
testq %rdi, %rdi
je .L49
call cudaFree@PLT
.L49:
testq %rbx, %rbx
je .L50
movq %rbx, %rdi
call free@PLT
.L50:
testq %rbp, %rbp
je .L51
movq %rbp, %rdi
call free@PLT
.L51:
call cudaDeviceReset@PLT
cmpl %r13d, %r12d
je .L82
movl $6, %edx
leaq .LC16(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbx
testq %rbx, %rbx
je .L83
cmpb $0, 56(%rbx)
je .L60
movzbl 67(%rbx), %esi
.L61:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
.L27:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L84
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L82:
.cfi_restore_state
movl $8, %edx
leaq .LC15(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbx
testq %rbx, %rbx
je .L85
cmpb $0, 56(%rbx)
je .L55
movzbl 67(%rbx), %esi
.L56:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
jmp .L27
.L85:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L86
call _ZSt16__throw_bad_castv@PLT
.L86:
call __stack_chk_fail@PLT
.L55:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L56
.L83:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L87
call _ZSt16__throw_bad_castv@PLT
.L87:
call __stack_chk_fail@PLT
.L60:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L61
.L84:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3673:
.size _Z14VectorAdditioniii, .-_Z14VectorAdditioniii
.section .rodata.str1.8
.align 8
.LC17:
.string "Unsuffcient number of arguments!\n"
.text
.globl main
.type main, @function
main:
.LFB3674:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
cmpl $3, %edi
jg .L89
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L90:
movl $0, %eax
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L89:
.cfi_restore_state
movq %rsi, %rbx
movq 24(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r12
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %r12d, %edx
movl %ebp, %esi
movl %eax, %edi
call _Z14VectorAdditioniii
jmp .L90
.cfi_endproc
.LFE3674:
.size main, .-main
.globl _Z32__device_stub__Z9l1_stridePKfPfiPKfPfi
.type _Z32__device_stub__Z9l1_stridePKfPfiPKfPfi, @function
_Z32__device_stub__Z9l1_stridePKfPfiPKfPfi:
.LFB3701:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L96
.L92:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L97
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L96:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9l1_stridePKfPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L92
.L97:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3701:
.size _Z32__device_stub__Z9l1_stridePKfPfiPKfPfi, .-_Z32__device_stub__Z9l1_stridePKfPfiPKfPfi
.globl _Z9l1_stridePKfPfi
.type _Z9l1_stridePKfPfi, @function
_Z9l1_stridePKfPfi:
.LFB3702:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9l1_stridePKfPfiPKfPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3702:
.size _Z9l1_stridePKfPfi, .-_Z9l1_stridePKfPfi
.section .rodata.str1.1
.LC18:
.string "_Z9l1_stridePKfPfi"
.LC19:
.string "_Z14l1_stride_consPKfPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3704:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _Z9l1_stridePKfPfi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z14l1_stride_consPKfPfi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3704:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 805306368
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC8:
.long -400107883
.long 1041313291
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC13:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC14:
.long -1998362383
.long 1055193269
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "l1_stride.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z10RandomInitPfi
.LCPI0_0:
.long 0x30000000 # float 4.65661287E-10
.text
.globl _Z10RandomInitPfi
.p2align 4, 0x90
.type _Z10RandomInitPfi,@function
_Z10RandomInitPfi: # @_Z10RandomInitPfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI0_0(%rip), %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB0_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB0_4: # %._crit_edge
retq
.Lfunc_end0:
.size _Z10RandomInitPfi, .Lfunc_end0-_Z10RandomInitPfi
.cfi_endproc
# -- End function
.globl _Z10RandomInitPji # -- Begin function _Z10RandomInitPji
.p2align 4, 0x90
.type _Z10RandomInitPji,@function
_Z10RandomInitPji: # @_Z10RandomInitPji
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %esi, %ebx
movq %rdi, %r14
movl %esi, %r15d
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltd
idivl %ebx
movl %edx, (%r14,%r12,4)
incq %r12
cmpq %r12, %r15
jne .LBB1_2
# %bb.3:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z10RandomInitPji, .Lfunc_end1-_Z10RandomInitPji
.cfi_endproc
# -- End function
.globl _Z29__device_stub__l1_stride_consPKfPfi # -- Begin function _Z29__device_stub__l1_stride_consPKfPfi
.p2align 4, 0x90
.type _Z29__device_stub__l1_stride_consPKfPfi,@function
_Z29__device_stub__l1_stride_consPKfPfi: # @_Z29__device_stub__l1_stride_consPKfPfi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14l1_stride_consPKfPfi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z29__device_stub__l1_stride_consPKfPfi, .Lfunc_end2-_Z29__device_stub__l1_stride_consPKfPfi
.cfi_endproc
# -- End function
.globl _Z24__device_stub__l1_stridePKfPfi # -- Begin function _Z24__device_stub__l1_stridePKfPfi
.p2align 4, 0x90
.type _Z24__device_stub__l1_stridePKfPfi,@function
_Z24__device_stub__l1_stridePKfPfi: # @_Z24__device_stub__l1_stridePKfPfi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9l1_stridePKfPfi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z24__device_stub__l1_stridePKfPfi, .Lfunc_end3-_Z24__device_stub__l1_stridePKfPfi
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z14VectorAdditioniii
.LCPI4_0:
.long 0x30000000 # float 4.65661287E-10
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI4_1:
.quad 0x3e112e0be826d695 # double 1.0000000000000001E-9
.LCPI4_3:
.quad 0x3ee4f8b588e368f1 # double 1.0000000000000001E-5
.LCPI4_4:
.quad 0x0000000000000000 # double 0
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI4_2:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl _Z14VectorAdditioniii
.p2align 4, 0x90
.type _Z14VectorAdditioniii,@function
_Z14VectorAdditioniii: # @_Z14VectorAdditioniii
.cfi_startproc
# %bb.0: # %.critedge
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $136, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, 12(%rsp) # 4-byte Spill
movl %esi, %r13d
movl %edi, %ebx
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $31, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %ebx, %esi
callq _ZNSolsEi
movl $.L.str.1, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbx, 16(%rsp) # 8-byte Spill
movslq %ebx, %rbp
movq %rbp, %r12
shlq $7, %r12
movq %r12, %rdi
callq malloc
movq %rax, %r15
movq %r12, %rdi
callq malloc
movq %rax, %r14
movl %ebp, %ebx
testl %ebp, %ebp
jle .LBB4_3
# %bb.1: # %.lr.ph.preheader.i
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB4_2: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI4_0(%rip), %xmm0
movss %xmm0, (%r15,%rbp,4)
incq %rbp
cmpq %rbp, %rbx
jne .LBB4_2
.LBB4_3: # %_Z10RandomInitPfi.exit
leaq 32(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB4_44
# %bb.4: # %_Z17__checkCudaErrors10hipError_tPKci.exit
leaq 24(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB4_45
# %bb.5: # %_Z17__checkCudaErrors10hipError_tPKci.exit53
movq 32(%rsp), %rdi
movq %r15, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB4_46
# %bb.6: # %_Z17__checkCudaErrors10hipError_tPKci.exit55
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB4_47
# %bb.7: # %_Z17__checkCudaErrors10hipError_tPKci.exit57
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq 16(%rsp), %rax # 8-byte Reload
addl %r13d, %eax
decl %eax
cltd
idivl %r13d
# kill: def $eax killed $eax def $rax
movabsq $4294967296, %rcx # imm = 0x100000000
leaq (%rax,%rcx), %rdi
movl %r13d, %edx
orq %rcx, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_9
# %bb.8:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl 12(%rsp), %eax # 4-byte Reload
movl %eax, 44(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 44(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z14l1_stride_consPKfPfi, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_9:
callq hipGetLastError
testl %eax, %eax
jne .LBB4_48
# %bb.10: # %_Z18__getLastCudaErrorPKcS0_i.exit
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB4_49
# %bb.11: # %_Z17__checkCudaErrors10hipError_tPKci.exit60
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
xorps %xmm0, %xmm0
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %r13
movl $.L.str.6, %esi
movl $4, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%r13), %rax
movq -24(%rax), %rax
movq 240(%r13,%rax), %rbp
testq %rbp, %rbp
je .LBB4_43
# %bb.12: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
xorps %xmm0, %xmm0
cvtsi2ssl 16(%rsp), %xmm0 # 4-byte Folded Reload
cvtss2sd %xmm0, %xmm0
mulsd .LCPI4_1(%rip), %xmm0
divsd .LCPI4_4(%rip), %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 12(%rsp) # 4-byte Spill
cmpb $0, 56(%rbp)
je .LBB4_14
# %bb.13:
movzbl 67(%rbp), %eax
jmp .LBB4_15
.LBB4_14:
movq %rbp, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
.LBB4_15: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movq %r13, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %rax, %r13
movl $.L.str.7, %esi
movl $9, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movq %r13, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r13
testq %r13, %r13
je .LBB4_43
# %bb.16: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i64
cmpb $0, 56(%r13)
je .LBB4_18
# %bb.17:
movzbl 67(%r13), %ecx
jmp .LBB4_19
.LBB4_18:
movq %r13, %rdi
movq %rax, %rbp
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbp, %rax
.LBB4_19: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit67
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 24(%rsp), %rsi
movq %r14, %rdi
movq %r12, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB4_50
# %bb.20: # %_Z17__checkCudaErrors10hipError_tPKci.exit62.preheader
cmpl $0, 16(%rsp) # 4-byte Folded Reload
jle .LBB4_25
# %bb.21: # %.lr.ph.preheader
xorl %r12d, %r12d
movaps .LCPI4_2(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
movsd .LCPI4_3(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB4_22: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%r14,%r12,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
subss (%r15,%r12,4), %xmm2
andps %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm1, %xmm2
ja .LBB4_26
# %bb.23: # %_Z17__checkCudaErrors10hipError_tPKci.exit62
# in Loop: Header=BB4_22 Depth=1
incq %r12
cmpq %r12, %rbx
jne .LBB4_22
# %bb.24: # %._crit_edge.loopexit
movl %ebx, %r12d
jmp .LBB4_26
.LBB4_25:
xorl %r12d, %r12d
.LBB4_26: # %._crit_edge
movq 32(%rsp), %rdi
testq %rdi, %rdi
je .LBB4_28
# %bb.27:
callq hipFree
.LBB4_28:
movq 24(%rsp), %rdi
testq %rdi, %rdi
je .LBB4_30
# %bb.29:
callq hipFree
.LBB4_30:
testq %r15, %r15
je .LBB4_32
# %bb.31:
movq %r15, %rdi
callq free
.LBB4_32:
testq %r14, %r14
je .LBB4_34
# %bb.33:
movq %r14, %rdi
callq free
.LBB4_34:
callq hipDeviceReset
cmpl 16(%rsp), %r12d # 4-byte Folded Reload
jne .LBB4_38
# %bb.35:
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $8, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB4_43
# %bb.36: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i69
cmpb $0, 56(%rbx)
je .LBB4_41
.LBB4_37:
movzbl 67(%rbx), %eax
jmp .LBB4_42
.LBB4_38:
movl $_ZSt4cout, %edi
movl $.L.str.9, %esi
movl $6, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB4_43
# %bb.39: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i74
cmpb $0, 56(%rbx)
jne .LBB4_37
.LBB4_41:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB4_42: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit72
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
addq $136, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB4_43:
.cfi_def_cfa_offset 192
callq _ZSt16__throw_bad_castv
.LBB4_44:
movl %eax, %ebp
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $98, %ecx
jmp .LBB4_51
.LBB4_45:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $99, %ecx
jmp .LBB4_51
.LBB4_46:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $103, %ecx
jmp .LBB4_51
.LBB4_47:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $105, %ecx
jmp .LBB4_51
.LBB4_48:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movq %rax, %r10
subq $8, %rsp
.cfi_adjust_cfa_offset 8
movl $.L.str.12, %esi
movl $.L.str.2, %edx
movl $.L.str.4, %r8d
movq %rbx, %rdi
movl $114, %ecx
movl %ebp, %r9d
xorl %eax, %eax
pushq %r10
.cfi_adjust_cfa_offset 8
callq fprintf
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $-1, %edi
callq exit
.LBB4_49:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $115, %ecx
jmp .LBB4_51
.LBB4_50:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $125, %ecx
.LBB4_51:
movl %ebp, %r8d
movq %rax, %r9
xorl %eax, %eax
callq fprintf
movl $-1, %edi
callq exit
.Lfunc_end4:
.size _Z14VectorAdditioniii, .Lfunc_end4-_Z14VectorAdditioniii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
cmpl $3, %edi
jg .LBB5_2
# %bb.1:
movl $.Lstr, %edi
callq puts@PLT
jmp .LBB5_3
.LBB5_2:
movq 8(%rsi), %rdi
movq %rsi, %r15
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movq 16(%r15), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movq 24(%r15), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %ebx, %edi
movl %r14d, %esi
movl %eax, %edx
callq _Z14VectorAdditioniii
.LBB5_3:
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14l1_stride_consPKfPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9l1_stridePKfPfi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14l1_stride_consPKfPfi,@object # @_Z14l1_stride_consPKfPfi
.section .rodata,"a",@progbits
.globl _Z14l1_stride_consPKfPfi
.p2align 3, 0x0
_Z14l1_stride_consPKfPfi:
.quad _Z29__device_stub__l1_stride_consPKfPfi
.size _Z14l1_stride_consPKfPfi, 8
.type _Z9l1_stridePKfPfi,@object # @_Z9l1_stridePKfPfi
.globl _Z9l1_stridePKfPfi
.p2align 3, 0x0
_Z9l1_stridePKfPfi:
.quad _Z24__device_stub__l1_stridePKfPfi
.size _Z9l1_stridePKfPfi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Vector Addition for input size "
.size .L.str, 32
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " :\n"
.size .L.str.1, 4
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/shen203/GPU_Microbenchmark/master/coalescer/l1_stride.hip"
.size .L.str.2, 115
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Invoke Kernel\n"
.size .L.str.3, 15
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "kernel launch failure"
.size .L.str.4, 22
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Time = "
.size .L.str.5, 8
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "msec"
.size .L.str.6, 5
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "gflops = "
.size .L.str.7, 10
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "SUCCSESS"
.size .L.str.8, 9
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "FAILED"
.size .L.str.9, 7
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "%s(%i) : CUDA Runtime API error %d: %s.\n"
.size .L.str.11, 41
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n"
.size .L.str.12, 56
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14l1_stride_consPKfPfi"
.size .L__unnamed_1, 25
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z9l1_stridePKfPfi"
.size .L__unnamed_2, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Unsuffcient number of arguments!"
.size .Lstr, 33
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__l1_stride_consPKfPfi
.addrsig_sym _Z24__device_stub__l1_stridePKfPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14l1_stride_consPKfPfi
.addrsig_sym _Z9l1_stridePKfPfi
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <cuda.h>
#include "math.h"
typedef
union
{
int32_t i;
struct
{
int16_t lo; // endian-specific!
int16_t hi;
};
} fixed_point;
void checkCUDAError(const char* msg);
__global__ void kernel(char *pairpixelsD, int nP, int width, unsigned char *pairsD,
float* outmD, int degree, const int xheight, int nPairs) {
// just use global memory for now
// get threadID:
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= nPairs) return;
// first, get the first and second pixel from pairsD
unsigned char *pairPtr = &pairsD[6*idx];
int x0 = ( pairPtr[0] << 8 ) + pairPtr[1];
int x1 = ( pairPtr[2] << 8 ) + pairPtr[3];
int y0 = pairPtr[4];
int y1 = pairPtr[5];
// calculate the first three variables
float vdx = (x1 - x0)/(float)xheight;
float vdy = (y1 - y0)/(float)xheight;
float vyy = 0.5*(y0 + y1)/(float)xheight;
// now calculate amount of black
int btotal = 0;
int bblack = 0;
fixed_point f;
if(abs(y1-y0) < abs(x1-x0)) {
int x;
int32_t m=((int32_t)(y1-y0)<<16)/(x1-x0);
f.i=y0<<16;
for (x=x0;x<=x1;x++,f.i+=m)
{
fixed_point g=f;
g.i+=32767;
btotal++;
bblack += pairpixelsD[width * g.hi + x];
}
} else {
int y;
int32_t m=((int32_t)(x1-x0)<<16)/(y1-y0);
f.i=x0<<16;
for (y=y0;y<=y1;y++,f.i+=m)
{
fixed_point g=f;
g.i+=32767;
btotal++;
bblack += pairpixelsD[width * y + g.hi];
}
}
float vbl = bblack/(float)btotal;
// now calculate monomial results and store
// should maybe be done locally and then pushed to global memory
for(int evdx = 0; evdx <= degree; evdx++) {
for(int evdy = 0; evdy <= degree; evdy++) {
for(int evyy = 0; evyy <= degree; evyy++) {
for(int evbl = 0; evbl <= degree; evbl++) {
outmD[evbl + degree*evyy + (degree*degree)*evdy +
(degree*degree*degree)*evdx] += powf(vdx, evdx) + powf(vdy, evdy)
+ powf(vyy, evyy) + powf(vbl, evbl);
}
}
}
}
}
void kernel_wrapper(char* pairpixelsH, int width, int height, unsigned char*
pairsH, int nPairs, float* outmH, const int degree, const int nMonomials,
const int xheight) {
// create matrix with both letters in it
// one-dimensional, with one byte per pixel, going from bottom to top,
// left to right.
// also, create matrix for polynomial output on device
float *outmD;
cudaMalloc((void**) &outmD, nMonomials*sizeof(float));
cudaMemset(outmD, 0.f, nMonomials*sizeof(float));
// copy matrix into CUDA memory
char *pairpixelsD;
cudaMalloc((void**) &pairpixelsD, width*height*sizeof(char));
cudaMemcpy(pairpixelsD, pairpixelsH, width*height*sizeof(char), cudaMemcpyHostToDevice);
// copy list of pixel pairs into CUDA memory
unsigned char* pairsD;
cudaMalloc((void**) &pairsD, nPairs*6*sizeof(unsigned char));
checkCUDAError("Couldn't create pairs");
cudaMemcpy(pairsD, pairsH, nPairs*6*sizeof(unsigned char), cudaMemcpyHostToDevice);
checkCUDAError("Couldn't copy pairs");
// create tons of threads with ID composed of left and right
int nThreadsPerBlock = 512;
int nBlocks = ceil(nPairs/(float)nThreadsPerBlock);
// calculate number of shared memory bytes needed
printf("xheight: %d\n", xheight);
// call the kernel
kernel<<< nBlocks, nThreadsPerBlock >>>( pairpixelsD, width*height, width, pairsD, outmD, degree, xheight, nPairs);
cudaThreadSynchronize();
checkCUDAError("Kernel Failed!");
// copy over results
cudaMemcpy(outmH, outmD, nMonomials*sizeof(float), cudaMemcpyDeviceToHost);
// finally: free memory
cudaFree(pairpixelsD);
cudaFree(pairsD);
cudaFree(outmD);
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
} | .file "tmpxft_000ac643_00000000-6_kernelwrapper.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Cuda error: %s: %s.\n"
.text
.globl _Z14checkCUDAErrorPKc
.type _Z14checkCUDAErrorPKc, @function
_Z14checkCUDAErrorPKc:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rdi, %rbx
call cudaGetLastError@PLT
testl %eax, %eax
jne .L6
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r8
movq %rbx, %rcx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size _Z14checkCUDAErrorPKc, .-_Z14checkCUDAErrorPKc
.globl _Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii
.type _Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii, @function
_Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii:
.LFB2083:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movl %edx, 32(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z6kernelPciiPhPfiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii, .-_Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii
.globl _Z6kernelPciiPhPfiii
.type _Z6kernelPciiPhPfiii, @function
_Z6kernelPciiPhPfiii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z6kernelPciiPhPfiii, .-_Z6kernelPciiPhPfiii
.section .rodata.str1.1
.LC1:
.string "Couldn't create pairs"
.LC2:
.string "Couldn't copy pairs"
.LC7:
.string "xheight: %d\n"
.LC8:
.string "Kernel Failed!"
.text
.globl _Z14kernel_wrapperPciiPhiPfiii
.type _Z14kernel_wrapperPciiPhiPfiii, @function
_Z14kernel_wrapperPciiPhiPfiii:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %rdi, %r14
movl %esi, %r13d
movl %edx, %ebx
movq %rcx, (%rsp)
movl %r8d, %ebp
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movslq 152(%rsp), %r12
salq $2, %r12
leaq 24(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movq %r12, %rdx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
imull %r13d, %ebx
movslq %ebx, %r15
leaq 32(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r15, %rdx
movq %r14, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
leal 0(%rbp,%rbp,2), %r14d
addl %r14d, %r14d
movslq %r14d, %r14
leaq 40(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq .LC1(%rip), %rdi
call _Z14checkCUDAErrorPKc
movl $1, %ecx
movq %r14, %rdx
movq (%rsp), %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC2(%rip), %rdi
call _Z14checkCUDAErrorPKc
pxor %xmm0, %xmm0
cvtsi2ssl %ebp, %xmm0
mulss .LC3(%rip), %xmm0
movss %xmm0, (%rsp)
movss .LC9(%rip), %xmm2
movaps %xmm0, %xmm1
andps %xmm2, %xmm1
movss .LC4(%rip), %xmm3
ucomiss %xmm1, %xmm3
jbe .L16
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
movaps %xmm0, %xmm3
cmpnless %xmm1, %xmm3
movss .LC6(%rip), %xmm4
andps %xmm4, %xmm3
addss %xmm3, %xmm1
andnps %xmm0, %xmm2
orps %xmm2, %xmm1
movss %xmm1, (%rsp)
.L16:
movl 160(%rsp), %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $512, 60(%rsp)
movl $1, 64(%rsp)
cvttss2sil (%rsp), %eax
movl %eax, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L20
.L17:
call cudaThreadSynchronize@PLT
leaq .LC8(%rip), %rdi
call _Z14checkCUDAErrorPKc
movl $2, %ecx
movq %r12, %rdx
movq 24(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq %rbp
.cfi_def_cfa_offset 152
movl 168(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 160
movl 160(%rsp), %r9d
movq 40(%rsp), %r8
movq 56(%rsp), %rcx
movl %r13d, %edx
movl %ebx, %esi
movq 48(%rsp), %rdi
call _Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L17
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z14kernel_wrapperPciiPhiPfiii, .-_Z14kernel_wrapperPciiPhiPfiii
.section .rodata.str1.1
.LC10:
.string "_Z6kernelPciiPhPfiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPciiPhPfiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 989855744
.align 4
.LC4:
.long 1258291200
.align 4
.LC6:
.long 1065353216
.align 4
.LC9:
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <cuda.h>
#include "math.h"
typedef
union
{
int32_t i;
struct
{
int16_t lo; // endian-specific!
int16_t hi;
};
} fixed_point;
void checkCUDAError(const char* msg);
__global__ void kernel(char *pairpixelsD, int nP, int width, unsigned char *pairsD,
float* outmD, int degree, const int xheight, int nPairs) {
// just use global memory for now
// get threadID:
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= nPairs) return;
// first, get the first and second pixel from pairsD
unsigned char *pairPtr = &pairsD[6*idx];
int x0 = ( pairPtr[0] << 8 ) + pairPtr[1];
int x1 = ( pairPtr[2] << 8 ) + pairPtr[3];
int y0 = pairPtr[4];
int y1 = pairPtr[5];
// calculate the first three variables
float vdx = (x1 - x0)/(float)xheight;
float vdy = (y1 - y0)/(float)xheight;
float vyy = 0.5*(y0 + y1)/(float)xheight;
// now calculate amount of black
int btotal = 0;
int bblack = 0;
fixed_point f;
if(abs(y1-y0) < abs(x1-x0)) {
int x;
int32_t m=((int32_t)(y1-y0)<<16)/(x1-x0);
f.i=y0<<16;
for (x=x0;x<=x1;x++,f.i+=m)
{
fixed_point g=f;
g.i+=32767;
btotal++;
bblack += pairpixelsD[width * g.hi + x];
}
} else {
int y;
int32_t m=((int32_t)(x1-x0)<<16)/(y1-y0);
f.i=x0<<16;
for (y=y0;y<=y1;y++,f.i+=m)
{
fixed_point g=f;
g.i+=32767;
btotal++;
bblack += pairpixelsD[width * y + g.hi];
}
}
float vbl = bblack/(float)btotal;
// now calculate monomial results and store
// should maybe be done locally and then pushed to global memory
for(int evdx = 0; evdx <= degree; evdx++) {
for(int evdy = 0; evdy <= degree; evdy++) {
for(int evyy = 0; evyy <= degree; evyy++) {
for(int evbl = 0; evbl <= degree; evbl++) {
outmD[evbl + degree*evyy + (degree*degree)*evdy +
(degree*degree*degree)*evdx] += powf(vdx, evdx) + powf(vdy, evdy)
+ powf(vyy, evyy) + powf(vbl, evbl);
}
}
}
}
}
void kernel_wrapper(char* pairpixelsH, int width, int height, unsigned char*
pairsH, int nPairs, float* outmH, const int degree, const int nMonomials,
const int xheight) {
// create matrix with both letters in it
// one-dimensional, with one byte per pixel, going from bottom to top,
// left to right.
// also, create matrix for polynomial output on device
float *outmD;
cudaMalloc((void**) &outmD, nMonomials*sizeof(float));
cudaMemset(outmD, 0.f, nMonomials*sizeof(float));
// copy matrix into CUDA memory
char *pairpixelsD;
cudaMalloc((void**) &pairpixelsD, width*height*sizeof(char));
cudaMemcpy(pairpixelsD, pairpixelsH, width*height*sizeof(char), cudaMemcpyHostToDevice);
// copy list of pixel pairs into CUDA memory
unsigned char* pairsD;
cudaMalloc((void**) &pairsD, nPairs*6*sizeof(unsigned char));
checkCUDAError("Couldn't create pairs");
cudaMemcpy(pairsD, pairsH, nPairs*6*sizeof(unsigned char), cudaMemcpyHostToDevice);
checkCUDAError("Couldn't copy pairs");
// create tons of threads with ID composed of left and right
int nThreadsPerBlock = 512;
int nBlocks = ceil(nPairs/(float)nThreadsPerBlock);
// calculate number of shared memory bytes needed
printf("xheight: %d\n", xheight);
// call the kernel
kernel<<< nBlocks, nThreadsPerBlock >>>( pairpixelsD, width*height, width, pairsD, outmD, degree, xheight, nPairs);
cudaThreadSynchronize();
checkCUDAError("Kernel Failed!");
// copy over results
cudaMemcpy(outmH, outmD, nMonomials*sizeof(float), cudaMemcpyDeviceToHost);
// finally: free memory
cudaFree(pairpixelsD);
cudaFree(pairsD);
cudaFree(outmD);
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
} | #include <stdio.h>
#include <hip/hip_runtime.h>
#include "math.h"
typedef
union
{
int32_t i;
struct
{
int16_t lo; // endian-specific!
int16_t hi;
};
} fixed_point;
void checkCUDAError(const char* msg);
__global__ void kernel(char *pairpixelsD, int nP, int width, unsigned char *pairsD,
float* outmD, int degree, const int xheight, int nPairs) {
// just use global memory for now
// get threadID:
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= nPairs) return;
// first, get the first and second pixel from pairsD
unsigned char *pairPtr = &pairsD[6*idx];
int x0 = ( pairPtr[0] << 8 ) + pairPtr[1];
int x1 = ( pairPtr[2] << 8 ) + pairPtr[3];
int y0 = pairPtr[4];
int y1 = pairPtr[5];
// calculate the first three variables
float vdx = (x1 - x0)/(float)xheight;
float vdy = (y1 - y0)/(float)xheight;
float vyy = 0.5*(y0 + y1)/(float)xheight;
// now calculate amount of black
int btotal = 0;
int bblack = 0;
fixed_point f;
if(abs(y1-y0) < abs(x1-x0)) {
int x;
int32_t m=((int32_t)(y1-y0)<<16)/(x1-x0);
f.i=y0<<16;
for (x=x0;x<=x1;x++,f.i+=m)
{
fixed_point g=f;
g.i+=32767;
btotal++;
bblack += pairpixelsD[width * g.hi + x];
}
} else {
int y;
int32_t m=((int32_t)(x1-x0)<<16)/(y1-y0);
f.i=x0<<16;
for (y=y0;y<=y1;y++,f.i+=m)
{
fixed_point g=f;
g.i+=32767;
btotal++;
bblack += pairpixelsD[width * y + g.hi];
}
}
float vbl = bblack/(float)btotal;
// now calculate monomial results and store
// should maybe be done locally and then pushed to global memory
for(int evdx = 0; evdx <= degree; evdx++) {
for(int evdy = 0; evdy <= degree; evdy++) {
for(int evyy = 0; evyy <= degree; evyy++) {
for(int evbl = 0; evbl <= degree; evbl++) {
outmD[evbl + degree*evyy + (degree*degree)*evdy +
(degree*degree*degree)*evdx] += powf(vdx, evdx) + powf(vdy, evdy)
+ powf(vyy, evyy) + powf(vbl, evbl);
}
}
}
}
}
void kernel_wrapper(char* pairpixelsH, int width, int height, unsigned char*
pairsH, int nPairs, float* outmH, const int degree, const int nMonomials,
const int xheight) {
// create matrix with both letters in it
// one-dimensional, with one byte per pixel, going from bottom to top,
// left to right.
// also, create matrix for polynomial output on device
float *outmD;
hipMalloc((void**) &outmD, nMonomials*sizeof(float));
hipMemset(outmD, 0.f, nMonomials*sizeof(float));
// copy matrix into CUDA memory
char *pairpixelsD;
hipMalloc((void**) &pairpixelsD, width*height*sizeof(char));
hipMemcpy(pairpixelsD, pairpixelsH, width*height*sizeof(char), hipMemcpyHostToDevice);
// copy list of pixel pairs into CUDA memory
unsigned char* pairsD;
hipMalloc((void**) &pairsD, nPairs*6*sizeof(unsigned char));
checkCUDAError("Couldn't create pairs");
hipMemcpy(pairsD, pairsH, nPairs*6*sizeof(unsigned char), hipMemcpyHostToDevice);
checkCUDAError("Couldn't copy pairs");
// create tons of threads with ID composed of left and right
int nThreadsPerBlock = 512;
int nBlocks = ceil(nPairs/(float)nThreadsPerBlock);
// calculate number of shared memory bytes needed
printf("xheight: %d\n", xheight);
// call the kernel
kernel<<< nBlocks, nThreadsPerBlock >>>( pairpixelsD, width*height, width, pairsD, outmD, degree, xheight, nPairs);
hipDeviceSynchronize();
checkCUDAError("Kernel Failed!");
// copy over results
hipMemcpy(outmH, outmD, nMonomials*sizeof(float), hipMemcpyDeviceToHost);
// finally: free memory
hipFree(pairpixelsD);
hipFree(pairsD);
hipFree(outmD);
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
#include "math.h"
typedef
union
{
int32_t i;
struct
{
int16_t lo; // endian-specific!
int16_t hi;
};
} fixed_point;
void checkCUDAError(const char* msg);
__global__ void kernel(char *pairpixelsD, int nP, int width, unsigned char *pairsD,
float* outmD, int degree, const int xheight, int nPairs) {
// just use global memory for now
// get threadID:
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= nPairs) return;
// first, get the first and second pixel from pairsD
unsigned char *pairPtr = &pairsD[6*idx];
int x0 = ( pairPtr[0] << 8 ) + pairPtr[1];
int x1 = ( pairPtr[2] << 8 ) + pairPtr[3];
int y0 = pairPtr[4];
int y1 = pairPtr[5];
// calculate the first three variables
float vdx = (x1 - x0)/(float)xheight;
float vdy = (y1 - y0)/(float)xheight;
float vyy = 0.5*(y0 + y1)/(float)xheight;
// now calculate amount of black
int btotal = 0;
int bblack = 0;
fixed_point f;
if(abs(y1-y0) < abs(x1-x0)) {
int x;
int32_t m=((int32_t)(y1-y0)<<16)/(x1-x0);
f.i=y0<<16;
for (x=x0;x<=x1;x++,f.i+=m)
{
fixed_point g=f;
g.i+=32767;
btotal++;
bblack += pairpixelsD[width * g.hi + x];
}
} else {
int y;
int32_t m=((int32_t)(x1-x0)<<16)/(y1-y0);
f.i=x0<<16;
for (y=y0;y<=y1;y++,f.i+=m)
{
fixed_point g=f;
g.i+=32767;
btotal++;
bblack += pairpixelsD[width * y + g.hi];
}
}
float vbl = bblack/(float)btotal;
// now calculate monomial results and store
// should maybe be done locally and then pushed to global memory
for(int evdx = 0; evdx <= degree; evdx++) {
for(int evdy = 0; evdy <= degree; evdy++) {
for(int evyy = 0; evyy <= degree; evyy++) {
for(int evbl = 0; evbl <= degree; evbl++) {
outmD[evbl + degree*evyy + (degree*degree)*evdy +
(degree*degree*degree)*evdx] += powf(vdx, evdx) + powf(vdy, evdy)
+ powf(vyy, evyy) + powf(vbl, evbl);
}
}
}
}
}
void kernel_wrapper(char* pairpixelsH, int width, int height, unsigned char*
pairsH, int nPairs, float* outmH, const int degree, const int nMonomials,
const int xheight) {
// create matrix with both letters in it
// one-dimensional, with one byte per pixel, going from bottom to top,
// left to right.
// also, create matrix for polynomial output on device
float *outmD;
hipMalloc((void**) &outmD, nMonomials*sizeof(float));
hipMemset(outmD, 0.f, nMonomials*sizeof(float));
// copy matrix into CUDA memory
char *pairpixelsD;
hipMalloc((void**) &pairpixelsD, width*height*sizeof(char));
hipMemcpy(pairpixelsD, pairpixelsH, width*height*sizeof(char), hipMemcpyHostToDevice);
// copy list of pixel pairs into CUDA memory
unsigned char* pairsD;
hipMalloc((void**) &pairsD, nPairs*6*sizeof(unsigned char));
checkCUDAError("Couldn't create pairs");
hipMemcpy(pairsD, pairsH, nPairs*6*sizeof(unsigned char), hipMemcpyHostToDevice);
checkCUDAError("Couldn't copy pairs");
// create tons of threads with ID composed of left and right
int nThreadsPerBlock = 512;
int nBlocks = ceil(nPairs/(float)nThreadsPerBlock);
// calculate number of shared memory bytes needed
printf("xheight: %d\n", xheight);
// call the kernel
kernel<<< nBlocks, nThreadsPerBlock >>>( pairpixelsD, width*height, width, pairsD, outmD, degree, xheight, nPairs);
hipDeviceSynchronize();
checkCUDAError("Kernel Failed!");
// copy over results
hipMemcpy(outmH, outmD, nMonomials*sizeof(float), hipMemcpyDeviceToHost);
// finally: free memory
hipFree(pairpixelsD);
hipFree(pairsD);
hipFree(outmD);
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
} | .text
.file "kernelwrapper.hip"
.globl _Z21__device_stub__kernelPciiPhPfiii # -- Begin function _Z21__device_stub__kernelPciiPhPfiii
.p2align 4, 0x90
.type _Z21__device_stub__kernelPciiPhPfiii,@function
_Z21__device_stub__kernelPciiPhPfiii: # @_Z21__device_stub__kernelPciiPhPfiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z6kernelPciiPhPfiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPciiPhPfiii, .Lfunc_end0-_Z21__device_stub__kernelPciiPhPfiii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z14kernel_wrapperPciiPhiPfiii
.LCPI1_0:
.long 0x3b000000 # float 0.001953125
.text
.globl _Z14kernel_wrapperPciiPhiPfiii
.p2align 4, 0x90
.type _Z14kernel_wrapperPciiPhiPfiii,@function
_Z14kernel_wrapperPciiPhiPfiii: # @_Z14kernel_wrapperPciiPhiPfiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 48(%rsp) # 8-byte Spill
movl %r8d, %r15d
movq %rcx, %r13
movl %edx, %ebp
movl %esi, %r12d
movq %rdi, %rbx
movslq 264(%rsp), %r14
shlq $2, %r14
movq %rsp, %rdi
movq %r14, %rsi
callq hipMalloc
movq (%rsp), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl %r12d, 24(%rsp) # 4-byte Spill
imull %r12d, %ebp
movslq %ebp, %r12
leaq 16(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
leal (%r15,%r15), %eax
leal (%rax,%rax,2), %eax
movslq %eax, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB1_1
# %bb.3: # %_Z14checkCUDAErrorPKc.exit
movq 8(%rsp), %rdi
movq %r13, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
jne .LBB1_4
# %bb.5: # %_Z14checkCUDAErrorPKc.exit25
cvtsi2ss %r15d, %xmm0
movl 272(%rsp), %r13d
mulss .LCPI1_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %ebx
movl $.L.str.2, %edi
movl %r13d, %esi
xorl %eax, %eax
callq printf
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rbx
orq $512, %rdx # imm = 0x200
movq %rbx, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_7
# %bb.6:
movl 256(%rsp), %eax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq (%rsp), %rsi
movq %rcx, 120(%rsp)
movl %ebp, 44(%rsp)
movl 24(%rsp), %ecx # 4-byte Reload
movl %ecx, 40(%rsp)
movq %rdx, 112(%rsp)
movq %rsi, 104(%rsp)
movl %eax, 36(%rsp)
movl %r13d, 32(%rsp)
movl %r15d, 28(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 44(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 112(%rsp), %rax
movq %rax, 152(%rsp)
leaq 104(%rsp), %rax
movq %rax, 160(%rsp)
leaq 36(%rsp), %rax
movq %rax, 168(%rsp)
leaq 32(%rsp), %rax
movq %rax, 176(%rsp)
leaq 28(%rsp), %rax
movq %rax, 184(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z6kernelPciiPhPfiii, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_7:
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
jne .LBB1_8
# %bb.9: # %_Z14checkCUDAErrorPKc.exit27
movq (%rsp), %rsi
movq 48(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_1:
.cfi_def_cfa_offset 256
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.4, %esi
movl $.L.str, %edx
jmp .LBB1_2
.LBB1_4:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.4, %esi
movl $.L.str.1, %edx
jmp .LBB1_2
.LBB1_8:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.4, %esi
movl $.L.str.3, %edx
.LBB1_2:
movq %rbx, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end1:
.size _Z14kernel_wrapperPciiPhiPfiii, .Lfunc_end1-_Z14kernel_wrapperPciiPhiPfiii
.cfi_endproc
# -- End function
.globl _Z14checkCUDAErrorPKc # -- Begin function _Z14checkCUDAErrorPKc
.p2align 4, 0x90
.type _Z14checkCUDAErrorPKc,@function
_Z14checkCUDAErrorPKc: # @_Z14checkCUDAErrorPKc
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdi, %rbx
callq hipGetLastError
testl %eax, %eax
jne .LBB2_2
# %bb.1:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB2_2:
.cfi_def_cfa_offset 32
movq stderr(%rip), %r14
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.4, %esi
movq %r14, %rdi
movq %rbx, %rdx
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end2:
.size _Z14checkCUDAErrorPKc, .Lfunc_end2-_Z14checkCUDAErrorPKc
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPciiPhPfiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPciiPhPfiii,@object # @_Z6kernelPciiPhPfiii
.section .rodata,"a",@progbits
.globl _Z6kernelPciiPhPfiii
.p2align 3, 0x0
_Z6kernelPciiPhPfiii:
.quad _Z21__device_stub__kernelPciiPhPfiii
.size _Z6kernelPciiPhPfiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Couldn't create pairs"
.size .L.str, 22
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Couldn't copy pairs"
.size .L.str.1, 20
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "xheight: %d\n"
.size .L.str.2, 13
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Kernel Failed!"
.size .L.str.3, 15
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Cuda error: %s: %s.\n"
.size .L.str.4, 21
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelPciiPhPfiii"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPciiPhPfiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPciiPhPfiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000ac643_00000000-6_kernelwrapper.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Cuda error: %s: %s.\n"
.text
.globl _Z14checkCUDAErrorPKc
.type _Z14checkCUDAErrorPKc, @function
_Z14checkCUDAErrorPKc:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rdi, %rbx
call cudaGetLastError@PLT
testl %eax, %eax
jne .L6
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r8
movq %rbx, %rcx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size _Z14checkCUDAErrorPKc, .-_Z14checkCUDAErrorPKc
.globl _Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii
.type _Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii, @function
_Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii:
.LFB2083:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movl %edx, 32(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z6kernelPciiPhPfiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii, .-_Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii
.globl _Z6kernelPciiPhPfiii
.type _Z6kernelPciiPhPfiii, @function
_Z6kernelPciiPhPfiii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z6kernelPciiPhPfiii, .-_Z6kernelPciiPhPfiii
.section .rodata.str1.1
.LC1:
.string "Couldn't create pairs"
.LC2:
.string "Couldn't copy pairs"
.LC7:
.string "xheight: %d\n"
.LC8:
.string "Kernel Failed!"
.text
.globl _Z14kernel_wrapperPciiPhiPfiii
.type _Z14kernel_wrapperPciiPhiPfiii, @function
_Z14kernel_wrapperPciiPhiPfiii:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %rdi, %r14
movl %esi, %r13d
movl %edx, %ebx
movq %rcx, (%rsp)
movl %r8d, %ebp
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movslq 152(%rsp), %r12
salq $2, %r12
leaq 24(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movq %r12, %rdx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
imull %r13d, %ebx
movslq %ebx, %r15
leaq 32(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r15, %rdx
movq %r14, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
leal 0(%rbp,%rbp,2), %r14d
addl %r14d, %r14d
movslq %r14d, %r14
leaq 40(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq .LC1(%rip), %rdi
call _Z14checkCUDAErrorPKc
movl $1, %ecx
movq %r14, %rdx
movq (%rsp), %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC2(%rip), %rdi
call _Z14checkCUDAErrorPKc
pxor %xmm0, %xmm0
cvtsi2ssl %ebp, %xmm0
mulss .LC3(%rip), %xmm0
movss %xmm0, (%rsp)
movss .LC9(%rip), %xmm2
movaps %xmm0, %xmm1
andps %xmm2, %xmm1
movss .LC4(%rip), %xmm3
ucomiss %xmm1, %xmm3
jbe .L16
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
movaps %xmm0, %xmm3
cmpnless %xmm1, %xmm3
movss .LC6(%rip), %xmm4
andps %xmm4, %xmm3
addss %xmm3, %xmm1
andnps %xmm0, %xmm2
orps %xmm2, %xmm1
movss %xmm1, (%rsp)
.L16:
movl 160(%rsp), %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $512, 60(%rsp)
movl $1, 64(%rsp)
cvttss2sil (%rsp), %eax
movl %eax, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L20
.L17:
call cudaThreadSynchronize@PLT
leaq .LC8(%rip), %rdi
call _Z14checkCUDAErrorPKc
movl $2, %ecx
movq %r12, %rdx
movq 24(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq %rbp
.cfi_def_cfa_offset 152
movl 168(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 160
movl 160(%rsp), %r9d
movq 40(%rsp), %r8
movq 56(%rsp), %rcx
movl %r13d, %edx
movl %ebx, %esi
movq 48(%rsp), %rdi
call _Z34__device_stub__Z6kernelPciiPhPfiiiPciiPhPfiii
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L17
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z14kernel_wrapperPciiPhiPfiii, .-_Z14kernel_wrapperPciiPhiPfiii
.section .rodata.str1.1
.LC10:
.string "_Z6kernelPciiPhPfiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPciiPhPfiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 989855744
.align 4
.LC4:
.long 1258291200
.align 4
.LC6:
.long 1065353216
.align 4
.LC9:
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernelwrapper.hip"
.globl _Z21__device_stub__kernelPciiPhPfiii # -- Begin function _Z21__device_stub__kernelPciiPhPfiii
.p2align 4, 0x90
.type _Z21__device_stub__kernelPciiPhPfiii,@function
_Z21__device_stub__kernelPciiPhPfiii: # @_Z21__device_stub__kernelPciiPhPfiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z6kernelPciiPhPfiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPciiPhPfiii, .Lfunc_end0-_Z21__device_stub__kernelPciiPhPfiii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z14kernel_wrapperPciiPhiPfiii
.LCPI1_0:
.long 0x3b000000 # float 0.001953125
.text
.globl _Z14kernel_wrapperPciiPhiPfiii
.p2align 4, 0x90
.type _Z14kernel_wrapperPciiPhiPfiii,@function
_Z14kernel_wrapperPciiPhiPfiii: # @_Z14kernel_wrapperPciiPhiPfiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 48(%rsp) # 8-byte Spill
movl %r8d, %r15d
movq %rcx, %r13
movl %edx, %ebp
movl %esi, %r12d
movq %rdi, %rbx
movslq 264(%rsp), %r14
shlq $2, %r14
movq %rsp, %rdi
movq %r14, %rsi
callq hipMalloc
movq (%rsp), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movl %r12d, 24(%rsp) # 4-byte Spill
imull %r12d, %ebp
movslq %ebp, %r12
leaq 16(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
leal (%r15,%r15), %eax
leal (%rax,%rax,2), %eax
movslq %eax, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB1_1
# %bb.3: # %_Z14checkCUDAErrorPKc.exit
movq 8(%rsp), %rdi
movq %r13, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
jne .LBB1_4
# %bb.5: # %_Z14checkCUDAErrorPKc.exit25
cvtsi2ss %r15d, %xmm0
movl 272(%rsp), %r13d
mulss .LCPI1_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %ebx
movl $.L.str.2, %edi
movl %r13d, %esi
xorl %eax, %eax
callq printf
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rbx
orq $512, %rdx # imm = 0x200
movq %rbx, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_7
# %bb.6:
movl 256(%rsp), %eax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq (%rsp), %rsi
movq %rcx, 120(%rsp)
movl %ebp, 44(%rsp)
movl 24(%rsp), %ecx # 4-byte Reload
movl %ecx, 40(%rsp)
movq %rdx, 112(%rsp)
movq %rsi, 104(%rsp)
movl %eax, 36(%rsp)
movl %r13d, 32(%rsp)
movl %r15d, 28(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 44(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 112(%rsp), %rax
movq %rax, 152(%rsp)
leaq 104(%rsp), %rax
movq %rax, 160(%rsp)
leaq 36(%rsp), %rax
movq %rax, 168(%rsp)
leaq 32(%rsp), %rax
movq %rax, 176(%rsp)
leaq 28(%rsp), %rax
movq %rax, 184(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z6kernelPciiPhPfiii, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_7:
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
jne .LBB1_8
# %bb.9: # %_Z14checkCUDAErrorPKc.exit27
movq (%rsp), %rsi
movq 48(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_1:
.cfi_def_cfa_offset 256
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.4, %esi
movl $.L.str, %edx
jmp .LBB1_2
.LBB1_4:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.4, %esi
movl $.L.str.1, %edx
jmp .LBB1_2
.LBB1_8:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.4, %esi
movl $.L.str.3, %edx
.LBB1_2:
movq %rbx, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end1:
.size _Z14kernel_wrapperPciiPhiPfiii, .Lfunc_end1-_Z14kernel_wrapperPciiPhiPfiii
.cfi_endproc
# -- End function
.globl _Z14checkCUDAErrorPKc # -- Begin function _Z14checkCUDAErrorPKc
.p2align 4, 0x90
.type _Z14checkCUDAErrorPKc,@function
_Z14checkCUDAErrorPKc: # @_Z14checkCUDAErrorPKc
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdi, %rbx
callq hipGetLastError
testl %eax, %eax
jne .LBB2_2
# %bb.1:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB2_2:
.cfi_def_cfa_offset 32
movq stderr(%rip), %r14
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.4, %esi
movq %r14, %rdi
movq %rbx, %rdx
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end2:
.size _Z14checkCUDAErrorPKc, .Lfunc_end2-_Z14checkCUDAErrorPKc
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPciiPhPfiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPciiPhPfiii,@object # @_Z6kernelPciiPhPfiii
.section .rodata,"a",@progbits
.globl _Z6kernelPciiPhPfiii
.p2align 3, 0x0
_Z6kernelPciiPhPfiii:
.quad _Z21__device_stub__kernelPciiPhPfiii
.size _Z6kernelPciiPhPfiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Couldn't create pairs"
.size .L.str, 22
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Couldn't copy pairs"
.size .L.str.1, 20
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "xheight: %d\n"
.size .L.str.2, 13
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Kernel Failed!"
.size .L.str.3, 15
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Cuda error: %s: %s.\n"
.size .L.str.4, 21
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelPciiPhPfiii"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPciiPhPfiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPciiPhPfiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
#define ITER 1000000000
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CPU version of the vector add function
void vector_add_cpu(int *a, int *b, int *c, int n) {
int i;
// Add the vector elements a and b to the vector c
for (i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
// GPU version of the vector add function
__global__ void vector_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c, int n) {
int i = blockIdx.x;
// No for loop needed because the CUDA runtime
// will thread this ITER times
gpu_c[i] = gpu_a[i] + gpu_b[i];
}
int main() {
int *a, *b, *c;
int *gpu_a, *gpu_b, *gpu_c;
int *ha, *hb, *hc;
a = (int *)malloc(ITER * sizeof(int));
b = (int *)malloc(ITER * sizeof(int));
c = (int *)malloc(ITER * sizeof(int));
ha = (int *)malloc(ITER * sizeof(int));
hb = (int *)malloc(ITER * sizeof(int));
hc = (int *)malloc(ITER * sizeof(int));
// We need variables accessible to the GPU,
// so cudaMallocManaged provides these
cudaMallocManaged(&gpu_a, ITER * sizeof(int));
cudaMallocManaged(&gpu_b, ITER * sizeof(int));
cudaMallocManaged(&gpu_c, ITER * sizeof(int));
for (int i = 0; i < ITER; ++i) {
a[i] = i;
b[i] = i;
c[i] = i;
}
// Call the CPU function and time it
auto cpu_start = Clock::now();
vector_add_cpu(a, b, c, ITER);
auto cpu_end = Clock::now();
std::cout << "vector_add_cpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count()
<< " nanoseconds.\n";
// Call the GPU function and time it
// The triple angle brakets is a CUDA runtime extension that allows
// parameters of a CUDA kernel call to be passed.
// In this example, we are passing one thread block with ITER threads.
cudaMemcpy(gpu_a,a,ITER*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b,b,ITER*sizeof(int),cudaMemcpyHostToDevice);
auto gpu_start = Clock::now();
vector_add_gpu <<<ITER, 1024>>> (gpu_a, gpu_b, gpu_c, ITER);
cudaDeviceSynchronize();
auto gpu_end = Clock::now();
std::cout << "vector_add_gpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count()
<< " nanoseconds.\n";
cudaMemcpy(hc,gpu_c,ITER*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(hb,gpu_b,ITER*sizeof(int),cudaMemcpyDeviceToHost);
gpuErrchk(cudaMemcpy(ha,gpu_a,ITER*sizeof(int),cudaMemcpyDeviceToHost));
std::cout<<c[0]<<"\t"<<hc[0]<<std::endl;
std::cout<<c[int(ITER/3)]<<"\t"<<hc[int(ITER/3)]<<std::endl;
std::cout<<c[int(2*ITER/3)]<<"\t"<<hc[int(2*ITER/3)]<<std::endl;
std::cout<<c[int(ITER-1)]<<"\t"<<hc[int(ITER-1)]<<std::endl;
// Free the GPU-function based memory allocations
cudaFree(a);
cudaFree(b);
cudaFree(c);
// Free the CPU-function based memory allocations
free(a);
free(b);
free(c);
return 0;
} | code for sm_80
Function : _Z14vector_add_gpuPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
#define ITER 1000000000
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CPU version of the vector add function
void vector_add_cpu(int *a, int *b, int *c, int n) {
int i;
// Add the vector elements a and b to the vector c
for (i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
// GPU version of the vector add function
__global__ void vector_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c, int n) {
int i = blockIdx.x;
// No for loop needed because the CUDA runtime
// will thread this ITER times
gpu_c[i] = gpu_a[i] + gpu_b[i];
}
int main() {
int *a, *b, *c;
int *gpu_a, *gpu_b, *gpu_c;
int *ha, *hb, *hc;
a = (int *)malloc(ITER * sizeof(int));
b = (int *)malloc(ITER * sizeof(int));
c = (int *)malloc(ITER * sizeof(int));
ha = (int *)malloc(ITER * sizeof(int));
hb = (int *)malloc(ITER * sizeof(int));
hc = (int *)malloc(ITER * sizeof(int));
// We need variables accessible to the GPU,
// so cudaMallocManaged provides these
cudaMallocManaged(&gpu_a, ITER * sizeof(int));
cudaMallocManaged(&gpu_b, ITER * sizeof(int));
cudaMallocManaged(&gpu_c, ITER * sizeof(int));
for (int i = 0; i < ITER; ++i) {
a[i] = i;
b[i] = i;
c[i] = i;
}
// Call the CPU function and time it
auto cpu_start = Clock::now();
vector_add_cpu(a, b, c, ITER);
auto cpu_end = Clock::now();
std::cout << "vector_add_cpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count()
<< " nanoseconds.\n";
// Call the GPU function and time it
// The triple angle brakets is a CUDA runtime extension that allows
// parameters of a CUDA kernel call to be passed.
// In this example, we are passing one thread block with ITER threads.
cudaMemcpy(gpu_a,a,ITER*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b,b,ITER*sizeof(int),cudaMemcpyHostToDevice);
auto gpu_start = Clock::now();
vector_add_gpu <<<ITER, 1024>>> (gpu_a, gpu_b, gpu_c, ITER);
cudaDeviceSynchronize();
auto gpu_end = Clock::now();
std::cout << "vector_add_gpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count()
<< " nanoseconds.\n";
cudaMemcpy(hc,gpu_c,ITER*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(hb,gpu_b,ITER*sizeof(int),cudaMemcpyDeviceToHost);
gpuErrchk(cudaMemcpy(ha,gpu_a,ITER*sizeof(int),cudaMemcpyDeviceToHost));
std::cout<<c[0]<<"\t"<<hc[0]<<std::endl;
std::cout<<c[int(ITER/3)]<<"\t"<<hc[int(ITER/3)]<<std::endl;
std::cout<<c[int(2*ITER/3)]<<"\t"<<hc[int(2*ITER/3)]<<std::endl;
std::cout<<c[int(ITER-1)]<<"\t"<<hc[int(ITER-1)]<<std::endl;
// Free the GPU-function based memory allocations
cudaFree(a);
cudaFree(b);
cudaFree(c);
// Free the CPU-function based memory allocations
free(a);
free(b);
free(c);
return 0;
} | .file "tmpxft_0003b58d_00000000-6_gpu-example.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3775:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3775:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z14vector_add_cpuPiS_S_i
.type _Z14vector_add_cpuPiS_S_i, @function
_Z14vector_add_cpuPiS_S_i:
.LFB3769:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L3
movslq %ecx, %rcx
leaq 0(,%rcx,4), %r8
movl $0, %eax
.L5:
movl (%rsi,%rax), %ecx
addl (%rdi,%rax), %ecx
movl %ecx, (%rdx,%rax)
addq $4, %rax
cmpq %r8, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE3769:
.size _Z14vector_add_cpuPiS_S_i, .-_Z14vector_add_cpuPiS_S_i
.globl _Z39__device_stub__Z14vector_add_gpuPiS_S_iPiS_S_i
.type _Z39__device_stub__Z14vector_add_gpuPiS_S_iPiS_S_i, @function
_Z39__device_stub__Z14vector_add_gpuPiS_S_iPiS_S_i:
.LFB3797:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14vector_add_gpuPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3797:
.size _Z39__device_stub__Z14vector_add_gpuPiS_S_iPiS_S_i, .-_Z39__device_stub__Z14vector_add_gpuPiS_S_iPiS_S_i
.globl _Z14vector_add_gpuPiS_S_i
.type _Z14vector_add_gpuPiS_S_i, @function
_Z14vector_add_gpuPiS_S_i:
.LFB3798:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z14vector_add_gpuPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3798:
.size _Z14vector_add_gpuPiS_S_i, .-_Z14vector_add_gpuPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "vector_add_cpu: "
.LC1:
.string " nanoseconds.\n"
.LC2:
.string "vector_add_gpu: "
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "/home/ubuntu/Datasets/stackv2/train-structured/rmhsik/TFM/master/CUDATest/gpu-example.cu"
.section .rodata.str1.1
.LC4:
.string "GPUassert: %s %s %d\n"
.LC5:
.string "\t"
.text
.globl main
.type main, @function
main:
.LFB3770:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $4000000000, %r14d
movq %r14, %rdi
call malloc@PLT
movq %rax, %r12
movq %r14, %rdi
call malloc@PLT
movq %rax, %rbp
movq %r14, %rdi
call malloc@PLT
movq %rax, %rbx
movq %r14, %rdi
call malloc@PLT
movq %rax, (%rsp)
movq %r14, %rdi
call malloc@PLT
movq %rax, 8(%rsp)
movq %r14, %rdi
call malloc@PLT
movq %rax, %r13
leaq 24(%rsp), %rdi
movl $1, %edx
movq %r14, %rsi
call cudaMallocManaged@PLT
leaq 32(%rsp), %rdi
movl $1, %edx
movq %r14, %rsi
call cudaMallocManaged@PLT
leaq 40(%rsp), %rdi
movl $1, %edx
movq %r14, %rsi
call cudaMallocManaged@PLT
movl $0, %eax
.L16:
movl %eax, (%r12,%rax,4)
movl %eax, 0(%rbp,%rax,4)
movl %eax, (%rbx,%rax,4)
addq $1, %rax
cmpq $1000000000, %rax
jne .L16
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %r15
movl $1000000000, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq %r12, %rdi
call _Z14vector_add_cpuPiS_S_i
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %r14
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r14, %rsi
subq %r15, %rsi
call _ZNSo9_M_insertIlEERSoT_@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movl $1, %ecx
movl $4000000000, %r14d
movq %r14, %rdx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r14, %rdx
movq %rbp, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %r15
movl $1024, 60(%rsp)
movl $1, 64(%rsp)
movl $1000000000, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L22
.L17:
call cudaDeviceSynchronize@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %r14
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r14, %rsi
subq %r15, %rsi
call _ZNSo9_M_insertIlEERSoT_@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movl $2, %ecx
movl $4000000000, %r14d
movq %r14, %rdx
movq 40(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %r14, %rdx
movq 32(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %r14, %rdx
movq 24(%rsp), %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L23
movl (%rbx), %esi
leaq _ZSt4cout(%rip), %r14
movq %r14, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC5(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 0(%r13), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl 1333333332(%rbx), %esi
movq %r14, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC5(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 1333333332(%r13), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $2666666664, %r15d
movl (%rbx,%r15), %esi
movq %r14, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC5(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 0(%r13,%r15), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq 1333333332(%r15), %r15
movl (%rbx,%r15), %esi
movq %r14, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC5(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 0(%r13,%r15), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %r12, %rdi
call cudaFree@PLT
movq %rbp, %rdi
call cudaFree@PLT
movq %rbx, %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore_state
movl $1000000000, %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z39__device_stub__Z14vector_add_gpuPiS_S_iPiS_S_i
jmp .L17
.L23:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl $90, %r9d
leaq .LC3(%rip), %r8
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl %r14d, %edi
call exit@PLT
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3770:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z14vector_add_gpuPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3800:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z14vector_add_gpuPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3800:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
#define ITER 1000000000
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CPU version of the vector add function
void vector_add_cpu(int *a, int *b, int *c, int n) {
int i;
// Add the vector elements a and b to the vector c
for (i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
// GPU version of the vector add function
__global__ void vector_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c, int n) {
int i = blockIdx.x;
// No for loop needed because the CUDA runtime
// will thread this ITER times
gpu_c[i] = gpu_a[i] + gpu_b[i];
}
int main() {
int *a, *b, *c;
int *gpu_a, *gpu_b, *gpu_c;
int *ha, *hb, *hc;
a = (int *)malloc(ITER * sizeof(int));
b = (int *)malloc(ITER * sizeof(int));
c = (int *)malloc(ITER * sizeof(int));
ha = (int *)malloc(ITER * sizeof(int));
hb = (int *)malloc(ITER * sizeof(int));
hc = (int *)malloc(ITER * sizeof(int));
// We need variables accessible to the GPU,
// so cudaMallocManaged provides these
cudaMallocManaged(&gpu_a, ITER * sizeof(int));
cudaMallocManaged(&gpu_b, ITER * sizeof(int));
cudaMallocManaged(&gpu_c, ITER * sizeof(int));
for (int i = 0; i < ITER; ++i) {
a[i] = i;
b[i] = i;
c[i] = i;
}
// Call the CPU function and time it
auto cpu_start = Clock::now();
vector_add_cpu(a, b, c, ITER);
auto cpu_end = Clock::now();
std::cout << "vector_add_cpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count()
<< " nanoseconds.\n";
// Call the GPU function and time it
// The triple angle brakets is a CUDA runtime extension that allows
// parameters of a CUDA kernel call to be passed.
// In this example, we are passing one thread block with ITER threads.
cudaMemcpy(gpu_a,a,ITER*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b,b,ITER*sizeof(int),cudaMemcpyHostToDevice);
auto gpu_start = Clock::now();
vector_add_gpu <<<ITER, 1024>>> (gpu_a, gpu_b, gpu_c, ITER);
cudaDeviceSynchronize();
auto gpu_end = Clock::now();
std::cout << "vector_add_gpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count()
<< " nanoseconds.\n";
cudaMemcpy(hc,gpu_c,ITER*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(hb,gpu_b,ITER*sizeof(int),cudaMemcpyDeviceToHost);
gpuErrchk(cudaMemcpy(ha,gpu_a,ITER*sizeof(int),cudaMemcpyDeviceToHost));
std::cout<<c[0]<<"\t"<<hc[0]<<std::endl;
std::cout<<c[int(ITER/3)]<<"\t"<<hc[int(ITER/3)]<<std::endl;
std::cout<<c[int(2*ITER/3)]<<"\t"<<hc[int(2*ITER/3)]<<std::endl;
std::cout<<c[int(ITER-1)]<<"\t"<<hc[int(ITER-1)]<<std::endl;
// Free the GPU-function based memory allocations
cudaFree(a);
cudaFree(b);
cudaFree(c);
// Free the CPU-function based memory allocations
free(a);
free(b);
free(c);
return 0;
} | #include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
#define ITER 1000000000
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CPU version of the vector add function
void vector_add_cpu(int *a, int *b, int *c, int n) {
int i;
// Add the vector elements a and b to the vector c
for (i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
// GPU version of the vector add function
__global__ void vector_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c, int n) {
int i = blockIdx.x;
// No for loop needed because the CUDA runtime
// will thread this ITER times
gpu_c[i] = gpu_a[i] + gpu_b[i];
}
int main() {
int *a, *b, *c;
int *gpu_a, *gpu_b, *gpu_c;
int *ha, *hb, *hc;
a = (int *)malloc(ITER * sizeof(int));
b = (int *)malloc(ITER * sizeof(int));
c = (int *)malloc(ITER * sizeof(int));
ha = (int *)malloc(ITER * sizeof(int));
hb = (int *)malloc(ITER * sizeof(int));
hc = (int *)malloc(ITER * sizeof(int));
// We need variables accessible to the GPU,
// so cudaMallocManaged provides these
hipMallocManaged(&gpu_a, ITER * sizeof(int));
hipMallocManaged(&gpu_b, ITER * sizeof(int));
hipMallocManaged(&gpu_c, ITER * sizeof(int));
for (int i = 0; i < ITER; ++i) {
a[i] = i;
b[i] = i;
c[i] = i;
}
// Call the CPU function and time it
auto cpu_start = Clock::now();
vector_add_cpu(a, b, c, ITER);
auto cpu_end = Clock::now();
std::cout << "vector_add_cpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count()
<< " nanoseconds.\n";
// Call the GPU function and time it
// The triple angle brakets is a CUDA runtime extension that allows
// parameters of a CUDA kernel call to be passed.
// In this example, we are passing one thread block with ITER threads.
hipMemcpy(gpu_a,a,ITER*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(gpu_b,b,ITER*sizeof(int),hipMemcpyHostToDevice);
auto gpu_start = Clock::now();
vector_add_gpu <<<ITER, 1024>>> (gpu_a, gpu_b, gpu_c, ITER);
hipDeviceSynchronize();
auto gpu_end = Clock::now();
std::cout << "vector_add_gpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count()
<< " nanoseconds.\n";
hipMemcpy(hc,gpu_c,ITER*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(hb,gpu_b,ITER*sizeof(int),hipMemcpyDeviceToHost);
gpuErrchk(hipMemcpy(ha,gpu_a,ITER*sizeof(int),hipMemcpyDeviceToHost));
std::cout<<c[0]<<"\t"<<hc[0]<<std::endl;
std::cout<<c[int(ITER/3)]<<"\t"<<hc[int(ITER/3)]<<std::endl;
std::cout<<c[int(2*ITER/3)]<<"\t"<<hc[int(2*ITER/3)]<<std::endl;
std::cout<<c[int(ITER-1)]<<"\t"<<hc[int(ITER-1)]<<std::endl;
// Free the GPU-function based memory allocations
hipFree(a);
hipFree(b);
hipFree(c);
// Free the CPU-function based memory allocations
free(a);
free(b);
free(c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
#define ITER 1000000000
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CPU version of the vector add function
void vector_add_cpu(int *a, int *b, int *c, int n) {
int i;
// Add the vector elements a and b to the vector c
for (i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
// GPU version of the vector add function
__global__ void vector_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c, int n) {
int i = blockIdx.x;
// No for loop needed because the CUDA runtime
// will thread this ITER times
gpu_c[i] = gpu_a[i] + gpu_b[i];
}
int main() {
int *a, *b, *c;
int *gpu_a, *gpu_b, *gpu_c;
int *ha, *hb, *hc;
a = (int *)malloc(ITER * sizeof(int));
b = (int *)malloc(ITER * sizeof(int));
c = (int *)malloc(ITER * sizeof(int));
ha = (int *)malloc(ITER * sizeof(int));
hb = (int *)malloc(ITER * sizeof(int));
hc = (int *)malloc(ITER * sizeof(int));
// We need variables accessible to the GPU,
// so cudaMallocManaged provides these
hipMallocManaged(&gpu_a, ITER * sizeof(int));
hipMallocManaged(&gpu_b, ITER * sizeof(int));
hipMallocManaged(&gpu_c, ITER * sizeof(int));
for (int i = 0; i < ITER; ++i) {
a[i] = i;
b[i] = i;
c[i] = i;
}
// Call the CPU function and time it
auto cpu_start = Clock::now();
vector_add_cpu(a, b, c, ITER);
auto cpu_end = Clock::now();
std::cout << "vector_add_cpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count()
<< " nanoseconds.\n";
// Call the GPU function and time it
// The triple angle brakets is a CUDA runtime extension that allows
// parameters of a CUDA kernel call to be passed.
// In this example, we are passing one thread block with ITER threads.
hipMemcpy(gpu_a,a,ITER*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(gpu_b,b,ITER*sizeof(int),hipMemcpyHostToDevice);
auto gpu_start = Clock::now();
vector_add_gpu <<<ITER, 1024>>> (gpu_a, gpu_b, gpu_c, ITER);
hipDeviceSynchronize();
auto gpu_end = Clock::now();
std::cout << "vector_add_gpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count()
<< " nanoseconds.\n";
hipMemcpy(hc,gpu_c,ITER*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(hb,gpu_b,ITER*sizeof(int),hipMemcpyDeviceToHost);
gpuErrchk(hipMemcpy(ha,gpu_a,ITER*sizeof(int),hipMemcpyDeviceToHost));
std::cout<<c[0]<<"\t"<<hc[0]<<std::endl;
std::cout<<c[int(ITER/3)]<<"\t"<<hc[int(ITER/3)]<<std::endl;
std::cout<<c[int(2*ITER/3)]<<"\t"<<hc[int(2*ITER/3)]<<std::endl;
std::cout<<c[int(ITER-1)]<<"\t"<<hc[int(ITER-1)]<<std::endl;
// Free the GPU-function based memory allocations
hipFree(a);
hipFree(b);
hipFree(c);
// Free the CPU-function based memory allocations
free(a);
free(b);
free(c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14vector_add_gpuPiS_S_i
.globl _Z14vector_add_gpuPiS_S_i
.p2align 8
.type _Z14vector_add_gpuPiS_S_i,@function
_Z14vector_add_gpuPiS_S_i:
s_load_b128 s[4:7], s[0:1], 0x0
s_mov_b32 s2, s15
s_ashr_i32 s3, s15, 31
s_load_b64 s[0:1], s[0:1], 0x10
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s4, s2
s_addc_u32 s5, s5, s3
s_add_u32 s6, s6, s2
s_addc_u32 s7, s7, s3
s_load_b32 s4, s[4:5], 0x0
s_load_b32 s5, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s4, s5, s4
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14vector_add_gpuPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14vector_add_gpuPiS_S_i, .Lfunc_end0-_Z14vector_add_gpuPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14vector_add_gpuPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z14vector_add_gpuPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.