system
stringclasses
7 values
input
stringlengths
10
94.5k
output
stringlengths
14
55.5k
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0011ef86_00000000-6_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z31__device_stub__Z9test_ProgPiS_iPiS_i .type _Z31__device_stub__Z9test_ProgPiS_iPiS_i, @function _Z31__device_stub__Z9test_ProgPiS_iPiS_i: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9test_ProgPiS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z31__device_stub__Z9test_ProgPiS_iPiS_i, .-_Z31__device_stub__Z9test_ProgPiS_iPiS_i .globl _Z9test_ProgPiS_i .type _Z9test_ProgPiS_i, @function _Z9test_ProgPiS_i: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z9test_ProgPiS_iPiS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z9test_ProgPiS_i, .-_Z9test_ProgPiS_i .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z9test_ProgPiS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z9test_ProgPiS_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "kernel.hip" .globl _Z24__device_stub__test_ProgPiS_i # -- Begin function _Z24__device_stub__test_ProgPiS_i .p2align 4, 0x90 .type _Z24__device_stub__test_ProgPiS_i,@function _Z24__device_stub__test_ProgPiS_i: # @_Z24__device_stub__test_ProgPiS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9test_ProgPiS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z24__device_stub__test_ProgPiS_i, .Lfunc_end0-_Z24__device_stub__test_ProgPiS_i .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9test_ProgPiS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z9test_ProgPiS_i,@object # @_Z9test_ProgPiS_i .section .rodata,"a",@progbits .globl _Z9test_ProgPiS_i .p2align 3, 0x0 _Z9test_ProgPiS_i: .quad _Z24__device_stub__test_ProgPiS_i .size _Z9test_ProgPiS_i, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z9test_ProgPiS_i" .size .L__unnamed_1, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__test_ProgPiS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9test_ProgPiS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void kernelCulcRhoReal(const int N, double *rho, double *q, double *p, const double lambda, const double g) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { double qi = q[i]; double pi = p[i]; rho[i] = 0.5 * qi * qi; rho[i] += 0.5 * pi * pi; rho[i] += (lambda / 4.0) * qi * qi * qi * qi; rho[i] += (g / 6.0) * qi * qi * qi * qi * qi * qi; } }
code for sm_80 Function : _Z17kernelCulcRhoRealiPdS_S_dd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */ /* 0x000e280000002500 */ /*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */ /* 0x001fca00078e0200 */ /*0040*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x160], PT ; /* 0x0000580007007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ IMAD.MOV.U32 R6, RZ, RZ, 0x8 ; /* 0x00000008ff067424 */ /* 0x000fe200078e00ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0080*/ IMAD.WIDE R4, R7, R6, c[0x0][0x178] ; /* 0x00005e0007047625 */ /* 0x000fc800078e0206 */ /*0090*/ IMAD.WIDE R2, R7, R6, c[0x0][0x170] ; /* 0x00005c0007027625 */ /* 0x000fe400078e0206 */ /*00a0*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1b00 */ /*00b0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ee2000c1e1b00 */ /*00c0*/ MUFU.RCP64H R11, 6 ; /* 0x40180000000b7908 */ /* 0x000e220000001800 */ /*00d0*/ IMAD.MOV.U32 R8, RZ, RZ, 0x0 ; /* 0x00000000ff087424 */ /* 0x000fe400078e00ff */ /*00e0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x40180000 ; /* 0x40180000ff097424 */ /* 0x000fc400078e00ff */ /*00f0*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */ /* 0x000fe400078e00ff */ /*0100*/ IMAD.WIDE R6, R7, R6, c[0x0][0x168] ; /* 0x00005a0007067625 */ /* 0x000fc800078e0206 */ /*0110*/ DFMA R12, R10, -R8, 1 ; /* 0x3ff000000a0c742b */ /* 0x001e0c0000000808 */ /*0120*/ DFMA R12, R12, R12, R12 ; /* 0x0000000c0c0c722b */ /* 0x001e0c000000000c */ /*0130*/ DFMA R12, R10, R12, R10 ; /* 0x0000000c0a0c722b */ /* 0x001e0c000000000a */ /*0140*/ DFMA R10, R12, -R8, 1 ; /* 0x3ff000000c0a742b */ /* 0x001e0c0000000808 */ /*0150*/ DFMA R14, R12, R10, R12 ; /* 0x0000000a0c0e722b */ /* 0x001064000000000c */ /*0160*/ IMAD.MOV.U32 R12, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff0c7624 */ /* 0x001fe400078e00ff */ /*0170*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff0d7624 */ /* 0x000fe400078e00ff */ /*0180*/ DMUL R16, R14, c[0x0][0x188] ; /* 0x000062000e107a28 */ /* 0x002e080000000000 */ /*0190*/ DMUL R12, R12, 0.25 ; /* 0x3fd000000c0c7828 */ /* 0x000fc80000000000 */ /*01a0*/ DFMA R8, R16, -R8, c[0x0][0x188] ; /* 0x000062001008762b */ /* 0x001e0c0000000808 */ /*01b0*/ DFMA R8, R14, R8, R16 ; /* 0x000000080e08722b */ /* 0x001e140000000010 */ /*01c0*/ FFMA R0, RZ, 2.375, R9 ; /* 0x40180000ff007823 */ /* 0x001fca0000000009 */ /*01d0*/ FSETP.GT.AND P0, PT, |R0|, 1.469367938527859385e-39, PT ; /* 0x001000000000780b */ /* 0x000fe20003f04200 */ /*01e0*/ DMUL R10, R4, 0.5 ; /* 0x3fe00000040a7828 */ /* 0x004e080000000000 */ /*01f0*/ DMUL R12, R2, R12 ; /* 0x0000000c020c7228 */ /* 0x008e480000000000 */ /*0200*/ DMUL R10, R4, R10 ; /* 0x0000000a040a7228 */ /* 0x001fc80000000000 */ /*0210*/ DMUL R4, R2, 0.5 ; /* 0x3fe0000002047828 */ /* 0x000e080000000000 */ /*0220*/ DMUL R12, R2, R12 ; /* 0x0000000c020c7228 */ /* 0x002e480000000000 */ /*0230*/ DFMA R4, R2.reuse, R4, R10 ; /* 0x000000040204722b */ /* 0x0411e4000000000a */ /*0240*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff0a7624 */ /* 0x001fe400078e00ff */ /*0250*/ DMUL R12, R2, R12 ; /* 0x0000000c020c7228 */ /* 0x002e060000000000 */ /*0260*/ FSETP.GEU.AND P1, PT, |R10|, 6.5827683646048100446e-37, PT ; /* 0x036000000a00780b */ /* 0x000fc60003f2e200 */ /*0270*/ DFMA R4, R2, R12, R4 ; /* 0x0000000c0204722b */ /* 0x0010540000000004 */ /*0280*/ @P0 BRA P1, 0x2d0 ; /* 0x0000004000000947 */ /* 0x000fea0000800000 */ /*0290*/ MOV R0, 0x2b0 ; /* 0x000002b000007802 */ /* 0x003fe40000000f00 */ /*02a0*/ CALL.REL.NOINC 0x350 ; /* 0x000000a000007944 */ /* 0x000fea0003c00000 */ /*02b0*/ IMAD.MOV.U32 R8, RZ, RZ, R14 ; /* 0x000000ffff087224 */ /* 0x000fe400078e000e */ /*02c0*/ IMAD.MOV.U32 R9, RZ, RZ, R15 ; /* 0x000000ffff097224 */ /* 0x000fcc00078e000f */ /*02d0*/ DMUL R8, R2, R8 ; /* 0x0000000802087228 */ /* 0x003e0c0000000000 */ /*02e0*/ DMUL R8, R2, R8 ; /* 0x0000000802087228 */ /* 0x001e0c0000000000 */ /*02f0*/ DMUL R8, R2, R8 ; /* 0x0000000802087228 */ /* 0x001e0c0000000000 */ /*0300*/ DMUL R8, R2, R8 ; /* 0x0000000802087228 */ /* 0x001e0c0000000000 */ /*0310*/ DMUL R8, R2, R8 ; /* 0x0000000802087228 */ /* 0x001e0c0000000000 */ /*0320*/ DFMA R4, R2, R8, R4 ; /* 0x000000080204722b */ /* 0x001e0e0000000004 */ /*0330*/ STG.E.64 [R6.64], R4 ; /* 0x0000000406007986 */ /* 0x001fe2000c101b04 */ /*0340*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0350*/ IMAD.MOV.U32 R9, RZ, RZ, 0x3ff80000 ; /* 0x3ff80000ff097424 */ /* 0x000fe400078e00ff */ /*0360*/ IMAD.MOV.U32 R12, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff0c7624 */ /* 0x000fe400078e00ff */ /*0370*/ MUFU.RCP64H R11, R9 ; /* 0x00000009000b7308 */ /* 0x000e220000001800 */ /*0380*/ IMAD.MOV.U32 R8, RZ, RZ, 0x0 ; /* 0x00000000ff087424 */ /* 0x000fe400078e00ff */ /*0390*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */ /* 0x000fe200078e00ff */ /*03a0*/ FSETP.GEU.AND P1, PT, |R12|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000c00780b */ /* 0x040fe20003f2e200 */ /*03b0*/ IMAD.MOV.U32 R18, RZ, RZ, 0x1ca00000 ; /* 0x1ca00000ff127424 */ /* 0x000fe200078e00ff */ /*03c0*/ LOP3.LUT R13, R12, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff000000c0d7812 */ /* 0x000fe200078ec0ff */ /*03d0*/ IMAD.MOV.U32 R23, RZ, RZ, 0x40100000 ; /* 0x40100000ff177424 */ /* 0x000fc600078e00ff */ /*03e0*/ ISETP.GE.U32.AND P0, PT, R13, 0x40100000, PT ; /* 0x401000000d00780c */ /* 0x000fe40003f06070 */ /*03f0*/ MOV R22, R13 ; /* 0x0000000d00167202 */ /* 0x000fe40000000f00 */ /*0400*/ IADD3 R24, R23, -0x1, RZ ; /* 0xffffffff17187810 */ /* 0x000fe20007ffe0ff */ /*0410*/ DFMA R14, R10, -R8, 1 ; /* 0x3ff000000a0e742b */ /* 0x001e0c0000000808 */ /*0420*/ DFMA R16, R14, R14, R14 ; /* 0x0000000e0e10722b */ /* 0x001064000000000e */ /*0430*/ SEL R15, R18, 0x63400000, !P0 ; /* 0x63400000120f7807 */ /* 0x001fc80004000000 */ /*0440*/ @!P1 LOP3.LUT R14, R15.reuse, 0x80000000, R12.reuse, 0xf8, !PT ; /* 0x800000000f0e9812 */ /* 0x140fe200078ef80c */ /*0450*/ DFMA R18, R10, R16, R10 ; /* 0x000000100a12722b */ /* 0x002064000000000a */ /*0460*/ LOP3.LUT R11, R15, 0x800fffff, R12, 0xf8, !PT ; /* 0x800fffff0f0b7812 */ /* 0x001fe200078ef80c */ /*0470*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff0a7624 */ /* 0x000fe200078e00ff */ /*0480*/ @!P1 LOP3.LUT R17, R14, 0x100000, RZ, 0xfc, !PT ; /* 0x001000000e119812 */ /* 0x000fe200078efcff */ /*0490*/ @!P1 IMAD.MOV.U32 R16, RZ, RZ, RZ ; /* 0x000000ffff109224 */ /* 0x000fe200078e00ff */ /*04a0*/ DFMA R20, R18, -R8, 1 ; /* 0x3ff000001214742b */ /* 0x002e0a0000000808 */ /*04b0*/ @!P1 DFMA R10, R10, 2, -R16 ; /* 0x400000000a0a982b */ /* 0x000e480000000810 */ /*04c0*/ DFMA R16, R18, R20, R18 ; /* 0x000000141210722b */ /* 0x001e0c0000000012 */ /*04d0*/ @!P1 LOP3.LUT R22, R11, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff000000b169812 */ /* 0x002fe200078ec0ff */ /*04e0*/ DMUL R18, R16, R10 ; /* 0x0000000a10127228 */ /* 0x001e060000000000 */ /*04f0*/ IADD3 R14, R22, -0x1, RZ ; /* 0xffffffff160e7810 */ /* 0x000fc60007ffe0ff */ /*0500*/ DFMA R20, R18, -R8, R10 ; /* 0x800000081214722b */ /* 0x001e22000000000a */ /*0510*/ ISETP.GT.U32.AND P0, PT, R14, 0x7feffffe, PT ; /* 0x7feffffe0e00780c */ /* 0x000fc80003f04070 */ /*0520*/ ISETP.GT.U32.OR P0, PT, R24, 0x7feffffe, P0 ; /* 0x7feffffe1800780c */ /* 0x000fe20000704470 */ /*0530*/ DFMA R16, R16, R20, R18 ; /* 0x000000141010722b */ /* 0x0010580000000012 */ /*0540*/ @P0 BRA 0x6f0 ; /* 0x000001a000000947 */ /* 0x000fea0003800000 */ /*0550*/ IADD3 R13, R13, -0x40100000, RZ ; /* 0xbff000000d0d7810 */ /* 0x003fe20007ffe0ff */ /*0560*/ IMAD.MOV.U32 R18, RZ, RZ, RZ ; /* 0x000000ffff127224 */ /* 0x000fc600078e00ff */ /*0570*/ IMNMX R13, R13, -0x46a00000, !PT ; /* 0xb96000000d0d7817 */ /* 0x000fc80007800200 */ /*0580*/ IMNMX R12, R13, 0x46a00000, PT ; /* 0x46a000000d0c7817 */ /* 0x000fca0003800200 */ /*0590*/ IMAD.IADD R12, R12, 0x1, -R15 ; /* 0x000000010c0c7824 */ /* 0x000fca00078e0a0f */ /*05a0*/ IADD3 R19, R12, 0x7fe00000, RZ ; /* 0x7fe000000c137810 */ /* 0x000fcc0007ffe0ff */ /*05b0*/ DMUL R14, R16, R18 ; /* 0x00000012100e7228 */ /* 0x000e140000000000 */ /*05c0*/ FSETP.GTU.AND P0, PT, |R15|, 1.469367938527859385e-39, PT ; /* 0x001000000f00780b */ /* 0x001fda0003f0c200 */ /*05d0*/ @P0 BRA 0x820 ; /* 0x0000024000000947 */ /* 0x000fea0003800000 */ /*05e0*/ DFMA R8, R16, -R8, R10 ; /* 0x800000081008722b */ /* 0x000e22000000000a */ /*05f0*/ IMAD.MOV.U32 R18, RZ, RZ, RZ ; /* 0x000000ffff127224 */ /* 0x000fd200078e00ff */ /*0600*/ FSETP.NEU.AND P0, PT, R9.reuse, RZ, PT ; /* 0x000000ff0900720b */ /* 0x041fe40003f0d000 */ /*0610*/ LOP3.LUT R8, R9, 0x40180000, RZ, 0x3c, !PT ; /* 0x4018000009087812 */ /* 0x000fc800078e3cff */ /*0620*/ LOP3.LUT R11, R8, 0x80000000, RZ, 0xc0, !PT ; /* 0x80000000080b7812 */ /* 0x000fc800078ec0ff */ /*0630*/ LOP3.LUT R19, R11, R19, RZ, 0xfc, !PT ; /* 0x000000130b137212 */ /* 0x000fc600078efcff */ /*0640*/ @!P0 BRA 0x820 ; /* 0x000001d000008947 */ /* 0x000fea0003800000 */ /*0650*/ IMAD.MOV R9, RZ, RZ, -R12 ; /* 0x000000ffff097224 */ /* 0x000fe400078e0a0c */ /*0660*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */ /* 0x000fcc00078e00ff */ /*0670*/ DFMA R8, R14, -R8, R16 ; /* 0x800000080e08722b */ /* 0x000e080000000010 */ /*0680*/ DMUL.RP R16, R16, R18 ; /* 0x0000001210107228 */ /* 0x000e640000008000 */ /*0690*/ IADD3 R8, -R12, -0x43300000, RZ ; /* 0xbcd000000c087810 */ /* 0x001fc80007ffe1ff */ /*06a0*/ FSETP.NEU.AND P0, PT, |R9|, R8, PT ; /* 0x000000080900720b */ /* 0x000fc80003f0d200 */ /*06b0*/ LOP3.LUT R11, R17, R11, RZ, 0x3c, !PT ; /* 0x0000000b110b7212 */ /* 0x002fe400078e3cff */ /*06c0*/ FSEL R14, R16, R14, !P0 ; /* 0x0000000e100e7208 */ /* 0x000fe40004000000 */ /*06d0*/ FSEL R15, R11, R15, !P0 ; /* 0x0000000f0b0f7208 */ /* 0x000fe20004000000 */ /*06e0*/ BRA 0x820 ; /* 0x0000013000007947 */ /* 0x000fea0003800000 */ /*06f0*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff087624 */ /* 0x003fe400078e00ff */ /*0700*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff097624 */ /* 0x000fcc00078e00ff */ /*0710*/ DSETP.NAN.AND P0, PT, R8, c[0x0][0x188], PT ; /* 0x000062000800762a */ /* 0x000e1c0003f08000 */ /*0720*/ @P0 BRA 0x800 ; /* 0x000000d000000947 */ /* 0x001fea0003800000 */ /*0730*/ ISETP.NE.AND P0, PT, R22, R23, PT ; /* 0x000000171600720c */ /* 0x000fe20003f05270 */ /*0740*/ IMAD.MOV.U32 R15, RZ, RZ, -0x80000 ; /* 0xfff80000ff0f7424 */ /* 0x000fe200078e00ff */ /*0750*/ MOV R14, 0x0 ; /* 0x00000000000e7802 */ /* 0x000fd60000000f00 */ /*0760*/ @!P0 BRA 0x820 ; /* 0x000000b000008947 */ /* 0x000fea0003800000 */ /*0770*/ ISETP.NE.AND P0, PT, R22, 0x7ff00000, PT ; /* 0x7ff000001600780c */ /* 0x000fe40003f05270 */ /*0780*/ LOP3.LUT R12, R12, 0x40180000, RZ, 0x3c, !PT ; /* 0x401800000c0c7812 */ /* 0x000fe400078e3cff */ /*0790*/ ISETP.EQ.OR P0, PT, R23, RZ, !P0 ; /* 0x000000ff1700720c */ /* 0x000fe40004702670 */ /*07a0*/ LOP3.LUT R15, R12, 0x80000000, RZ, 0xc0, !PT ; /* 0x800000000c0f7812 */ /* 0x000fd600078ec0ff */ /*07b0*/ @P0 LOP3.LUT R8, R15, 0x7ff00000, RZ, 0xfc, !PT ; /* 0x7ff000000f080812 */ /* 0x000fe200078efcff */ /*07c0*/ @!P0 IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e8224 */ /* 0x000fe400078e00ff */ /*07d0*/ @P0 IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e0224 */ /* 0x000fe400078e00ff */ /*07e0*/ @P0 IMAD.MOV.U32 R15, RZ, RZ, R8 ; /* 0x000000ffff0f0224 */ /* 0x000fe200078e0008 */ /*07f0*/ BRA 0x820 ; /* 0x0000002000007947 */ /* 0x000fea0003800000 */ /*0800*/ LOP3.LUT R15, R12, 0x80000, RZ, 0xfc, !PT ; /* 0x000800000c0f7812 */ /* 0x000fe200078efcff */ /*0810*/ IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff0e7624 */ /* 0x000fe400078e00ff */ /*0820*/ IMAD.MOV.U32 R8, RZ, RZ, R0 ; /* 0x000000ffff087224 */ /* 0x000fe400078e0000 */ /*0830*/ IMAD.MOV.U32 R9, RZ, RZ, 0x0 ; /* 0x00000000ff097424 */ /* 0x000fc800078e00ff */ /*0840*/ RET.REL.NODEC R8 0x0 ; /* 0xfffff7b008007950 */ /* 0x000fea0003c3ffff */ /*0850*/ BRA 0x850; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0860*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0870*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0880*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0890*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void kernelCulcRhoReal(const int N, double *rho, double *q, double *p, const double lambda, const double g) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { double qi = q[i]; double pi = p[i]; rho[i] = 0.5 * qi * qi; rho[i] += 0.5 * pi * pi; rho[i] += (lambda / 4.0) * qi * qi * qi * qi; rho[i] += (g / 6.0) * qi * qi * qi * qi * qi * qi; } }
.file "tmpxft_000721d6_00000000-6_kernelCulcRhoReal.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z44__device_stub__Z17kernelCulcRhoRealiPdS_S_ddiPdS_S_dd .type _Z44__device_stub__Z17kernelCulcRhoRealiPdS_S_ddiPdS_S_dd, @function _Z44__device_stub__Z17kernelCulcRhoRealiPdS_S_ddiPdS_S_dd: .LFB2051: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movl %edi, 44(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movsd %xmm0, 8(%rsp) movsd %xmm1, (%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 44(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) movq %rsp, %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z17kernelCulcRhoRealiPdS_S_dd(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z44__device_stub__Z17kernelCulcRhoRealiPdS_S_ddiPdS_S_dd, .-_Z44__device_stub__Z17kernelCulcRhoRealiPdS_S_ddiPdS_S_dd .globl _Z17kernelCulcRhoRealiPdS_S_dd .type _Z17kernelCulcRhoRealiPdS_S_dd, @function _Z17kernelCulcRhoRealiPdS_S_dd: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z44__device_stub__Z17kernelCulcRhoRealiPdS_S_ddiPdS_S_dd addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z17kernelCulcRhoRealiPdS_S_dd, .-_Z17kernelCulcRhoRealiPdS_S_dd .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z17kernelCulcRhoRealiPdS_S_dd" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z17kernelCulcRhoRealiPdS_S_dd(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void kernelCulcRhoReal(const int N, double *rho, double *q, double *p, const double lambda, const double g) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { double qi = q[i]; double pi = p[i]; rho[i] = 0.5 * qi * qi; rho[i] += 0.5 * pi * pi; rho[i] += (lambda / 4.0) * qi * qi * qi * qi; rho[i] += (g / 6.0) * qi * qi * qi * qi * qi * qi; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernelCulcRhoReal(const int N, double *rho, double *q, double *p, const double lambda, const double g) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { double qi = q[i]; double pi = p[i]; rho[i] = 0.5 * qi * qi; rho[i] += 0.5 * pi * pi; rho[i] += (lambda / 4.0) * qi * qi * qi * qi; rho[i] += (g / 6.0) * qi * qi * qi * qi * qi * qi; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernelCulcRhoReal(const int N, double *rho, double *q, double *p, const double lambda, const double g) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { double qi = q[i]; double pi = p[i]; rho[i] = 0.5 * qi * qi; rho[i] += 0.5 * pi * pi; rho[i] += (lambda / 4.0) * qi * qi * qi * qi; rho[i] += (g / 6.0) * qi * qi * qi * qi * qi * qi; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17kernelCulcRhoRealiPdS_S_dd .globl _Z17kernelCulcRhoRealiPdS_S_dd .p2align 8 .type _Z17kernelCulcRhoRealiPdS_S_dd,@function _Z17kernelCulcRhoRealiPdS_S_dd: s_clause 0x1 s_load_b32 s2, s[0:1], 0x3c s_load_b32 s3, s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b64 s[8:9], s[0:1], 0x28 s_mov_b32 s2, 0 s_mov_b32 s3, 0x40180000 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) v_lshlrev_b64 v[0:1], 3, v[1:2] s_waitcnt lgkmcnt(0) v_div_scale_f64 v[3:4], null, s[2:3], s[2:3], s[8:9] s_load_b256 s[0:7], s[0:1], 0x8 s_waitcnt lgkmcnt(0) v_add_co_u32 v7, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v8, vcc_lo, s3, v1, vcc_lo v_add_co_u32 v11, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v12, vcc_lo, s5, v1, vcc_lo global_load_b64 v[7:8], v[7:8], off global_load_b64 v[11:12], v[11:12], off v_rcp_f64_e32 v[5:6], v[3:4] s_waitcnt_depctr 0xfff v_fma_f64 v[9:10], -v[3:4], v[5:6], 1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[5:6], v[5:6], v[9:10], v[5:6] v_fma_f64 v[9:10], -v[3:4], v[5:6], 1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_fma_f64 v[5:6], v[5:6], v[9:10], v[5:6] v_div_scale_f64 v[9:10], vcc_lo, s[8:9], 0x40180000, s[8:9] v_mul_f64 v[13:14], v[9:10], v[5:6] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[2:3], -v[3:4], v[13:14], v[9:10] v_div_fmas_f64 v[2:3], v[2:3], v[5:6], v[13:14] v_ldexp_f64 v[4:5], s[6:7], -2 v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) v_div_fixup_f64 v[2:3], v[2:3], 0x40180000, s[8:9] s_waitcnt vmcnt(1) v_mul_f64 v[4:5], v[4:5], v[7:8] v_mul_f64 v[13:14], v[7:8], 0.5 s_waitcnt vmcnt(0) v_mul_f64 v[9:10], v[11:12], 0.5 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_mul_f64 v[2:3], v[2:3], v[7:8] v_mul_f64 v[4:5], v[7:8], v[4:5] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_mul_f64 v[9:10], v[11:12], v[9:10] v_mul_f64 v[2:3], v[7:8], v[2:3] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_mul_f64 v[4:5], v[7:8], v[4:5] v_fma_f64 v[9:10], v[7:8], v[13:14], v[9:10] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_f64 v[2:3], v[7:8], v[2:3] v_fma_f64 v[4:5], v[7:8], v[4:5], v[9:10] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f64 v[2:3], v[7:8], v[2:3] v_mul_f64 v[2:3], v[7:8], v[2:3] s_delay_alu instid0(VALU_DEP_1) v_fma_f64 v[2:3], v[7:8], v[2:3], v[4:5] global_store_b64 v[0:1], v[2:3], off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17kernelCulcRhoRealiPdS_S_dd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 304 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 15 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17kernelCulcRhoRealiPdS_S_dd, .Lfunc_end0-_Z17kernelCulcRhoRealiPdS_S_dd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 8 .value_kind: by_value - .offset: 40 .size: 8 .value_kind: by_value - .offset: 48 .size: 4 .value_kind: hidden_block_count_x - .offset: 52 .size: 4 .value_kind: hidden_block_count_y - .offset: 56 .size: 4 .value_kind: hidden_block_count_z - .offset: 60 .size: 2 .value_kind: hidden_group_size_x - .offset: 62 .size: 2 .value_kind: hidden_group_size_y - .offset: 64 .size: 2 .value_kind: hidden_group_size_z - .offset: 66 .size: 2 .value_kind: hidden_remainder_x - .offset: 68 .size: 2 .value_kind: hidden_remainder_y - .offset: 70 .size: 2 .value_kind: hidden_remainder_z - .offset: 88 .size: 8 .value_kind: hidden_global_offset_x - .offset: 96 .size: 8 .value_kind: hidden_global_offset_y - .offset: 104 .size: 8 .value_kind: hidden_global_offset_z - .offset: 112 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 304 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17kernelCulcRhoRealiPdS_S_dd .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17kernelCulcRhoRealiPdS_S_dd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 15 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernelCulcRhoReal(const int N, double *rho, double *q, double *p, const double lambda, const double g) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { double qi = q[i]; double pi = p[i]; rho[i] = 0.5 * qi * qi; rho[i] += 0.5 * pi * pi; rho[i] += (lambda / 4.0) * qi * qi * qi * qi; rho[i] += (g / 6.0) * qi * qi * qi * qi * qi * qi; } }
.text .file "kernelCulcRhoReal.hip" .globl _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd # -- Begin function _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd .p2align 4, 0x90 .type _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd,@function _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd: # @_Z32__device_stub__kernelCulcRhoRealiPdS_S_dd .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 4(%rsp) movq %rsi, 88(%rsp) movq %rdx, 80(%rsp) movq %rcx, 72(%rsp) movsd %xmm0, 64(%rsp) movsd %xmm1, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 88(%rsp), %rax movq %rax, 104(%rsp) leaq 80(%rsp), %rax movq %rax, 112(%rsp) leaq 72(%rsp), %rax movq %rax, 120(%rsp) leaq 64(%rsp), %rax movq %rax, 128(%rsp) leaq 56(%rsp), %rax movq %rax, 136(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z17kernelCulcRhoRealiPdS_S_dd, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd, .Lfunc_end0-_Z32__device_stub__kernelCulcRhoRealiPdS_S_dd .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17kernelCulcRhoRealiPdS_S_dd, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z17kernelCulcRhoRealiPdS_S_dd,@object # @_Z17kernelCulcRhoRealiPdS_S_dd .section .rodata,"a",@progbits .globl _Z17kernelCulcRhoRealiPdS_S_dd .p2align 3, 0x0 _Z17kernelCulcRhoRealiPdS_S_dd: .quad _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd .size _Z17kernelCulcRhoRealiPdS_S_dd, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z17kernelCulcRhoRealiPdS_S_dd" .size .L__unnamed_1, 31 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z17kernelCulcRhoRealiPdS_S_dd .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z17kernelCulcRhoRealiPdS_S_dd .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */ /* 0x000e280000002500 */ /*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */ /* 0x001fca00078e0200 */ /*0040*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x160], PT ; /* 0x0000580007007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ IMAD.MOV.U32 R6, RZ, RZ, 0x8 ; /* 0x00000008ff067424 */ /* 0x000fe200078e00ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0080*/ IMAD.WIDE R4, R7, R6, c[0x0][0x178] ; /* 0x00005e0007047625 */ /* 0x000fc800078e0206 */ /*0090*/ IMAD.WIDE R2, R7, R6, c[0x0][0x170] ; /* 0x00005c0007027625 */ /* 0x000fe400078e0206 */ /*00a0*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1b00 */ /*00b0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ee2000c1e1b00 */ /*00c0*/ MUFU.RCP64H R11, 6 ; /* 0x40180000000b7908 */ /* 0x000e220000001800 */ /*00d0*/ IMAD.MOV.U32 R8, RZ, RZ, 0x0 ; /* 0x00000000ff087424 */ /* 0x000fe400078e00ff */ /*00e0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x40180000 ; /* 0x40180000ff097424 */ /* 0x000fc400078e00ff */ /*00f0*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */ /* 0x000fe400078e00ff */ /*0100*/ IMAD.WIDE R6, R7, R6, c[0x0][0x168] ; /* 0x00005a0007067625 */ /* 0x000fc800078e0206 */ /*0110*/ DFMA R12, R10, -R8, 1 ; /* 0x3ff000000a0c742b */ /* 0x001e0c0000000808 */ /*0120*/ DFMA R12, R12, R12, R12 ; /* 0x0000000c0c0c722b */ /* 0x001e0c000000000c */ /*0130*/ DFMA R12, R10, R12, R10 ; /* 0x0000000c0a0c722b */ /* 0x001e0c000000000a */ /*0140*/ DFMA R10, R12, -R8, 1 ; /* 0x3ff000000c0a742b */ /* 0x001e0c0000000808 */ /*0150*/ DFMA R14, R12, R10, R12 ; /* 0x0000000a0c0e722b */ /* 0x001064000000000c */ /*0160*/ IMAD.MOV.U32 R12, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff0c7624 */ /* 0x001fe400078e00ff */ /*0170*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff0d7624 */ /* 0x000fe400078e00ff */ /*0180*/ DMUL R16, R14, c[0x0][0x188] ; /* 0x000062000e107a28 */ /* 0x002e080000000000 */ /*0190*/ DMUL R12, R12, 0.25 ; /* 0x3fd000000c0c7828 */ /* 0x000fc80000000000 */ /*01a0*/ DFMA R8, R16, -R8, c[0x0][0x188] ; /* 0x000062001008762b */ /* 0x001e0c0000000808 */ /*01b0*/ DFMA R8, R14, R8, R16 ; /* 0x000000080e08722b */ /* 0x001e140000000010 */ /*01c0*/ FFMA R0, RZ, 2.375, R9 ; /* 0x40180000ff007823 */ /* 0x001fca0000000009 */ /*01d0*/ FSETP.GT.AND P0, PT, |R0|, 1.469367938527859385e-39, PT ; /* 0x001000000000780b */ /* 0x000fe20003f04200 */ /*01e0*/ DMUL R10, R4, 0.5 ; /* 0x3fe00000040a7828 */ /* 0x004e080000000000 */ /*01f0*/ DMUL R12, R2, R12 ; /* 0x0000000c020c7228 */ /* 0x008e480000000000 */ /*0200*/ DMUL R10, R4, R10 ; /* 0x0000000a040a7228 */ /* 0x001fc80000000000 */ /*0210*/ DMUL R4, R2, 0.5 ; /* 0x3fe0000002047828 */ /* 0x000e080000000000 */ /*0220*/ DMUL R12, R2, R12 ; /* 0x0000000c020c7228 */ /* 0x002e480000000000 */ /*0230*/ DFMA R4, R2.reuse, R4, R10 ; /* 0x000000040204722b */ /* 0x0411e4000000000a */ /*0240*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff0a7624 */ /* 0x001fe400078e00ff */ /*0250*/ DMUL R12, R2, R12 ; /* 0x0000000c020c7228 */ /* 0x002e060000000000 */ /*0260*/ FSETP.GEU.AND P1, PT, |R10|, 6.5827683646048100446e-37, PT ; /* 0x036000000a00780b */ /* 0x000fc60003f2e200 */ /*0270*/ DFMA R4, R2, R12, R4 ; /* 0x0000000c0204722b */ /* 0x0010540000000004 */ /*0280*/ @P0 BRA P1, 0x2d0 ; /* 0x0000004000000947 */ /* 0x000fea0000800000 */ /*0290*/ MOV R0, 0x2b0 ; /* 0x000002b000007802 */ /* 0x003fe40000000f00 */ /*02a0*/ CALL.REL.NOINC 0x350 ; /* 0x000000a000007944 */ /* 0x000fea0003c00000 */ /*02b0*/ IMAD.MOV.U32 R8, RZ, RZ, R14 ; /* 0x000000ffff087224 */ /* 0x000fe400078e000e */ /*02c0*/ IMAD.MOV.U32 R9, RZ, RZ, R15 ; /* 0x000000ffff097224 */ /* 0x000fcc00078e000f */ /*02d0*/ DMUL R8, R2, R8 ; /* 0x0000000802087228 */ /* 0x003e0c0000000000 */ /*02e0*/ DMUL R8, R2, R8 ; /* 0x0000000802087228 */ /* 0x001e0c0000000000 */ /*02f0*/ DMUL R8, R2, R8 ; /* 0x0000000802087228 */ /* 0x001e0c0000000000 */ /*0300*/ DMUL R8, R2, R8 ; /* 0x0000000802087228 */ /* 0x001e0c0000000000 */ /*0310*/ DMUL R8, R2, R8 ; /* 0x0000000802087228 */ /* 0x001e0c0000000000 */ /*0320*/ DFMA R4, R2, R8, R4 ; /* 0x000000080204722b */ /* 0x001e0e0000000004 */ /*0330*/ STG.E.64 [R6.64], R4 ; /* 0x0000000406007986 */ /* 0x001fe2000c101b04 */ /*0340*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0350*/ IMAD.MOV.U32 R9, RZ, RZ, 0x3ff80000 ; /* 0x3ff80000ff097424 */ /* 0x000fe400078e00ff */ /*0360*/ IMAD.MOV.U32 R12, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff0c7624 */ /* 0x000fe400078e00ff */ /*0370*/ MUFU.RCP64H R11, R9 ; /* 0x00000009000b7308 */ /* 0x000e220000001800 */ /*0380*/ IMAD.MOV.U32 R8, RZ, RZ, 0x0 ; /* 0x00000000ff087424 */ /* 0x000fe400078e00ff */ /*0390*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */ /* 0x000fe200078e00ff */ /*03a0*/ FSETP.GEU.AND P1, PT, |R12|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000c00780b */ /* 0x040fe20003f2e200 */ /*03b0*/ IMAD.MOV.U32 R18, RZ, RZ, 0x1ca00000 ; /* 0x1ca00000ff127424 */ /* 0x000fe200078e00ff */ /*03c0*/ LOP3.LUT R13, R12, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff000000c0d7812 */ /* 0x000fe200078ec0ff */ /*03d0*/ IMAD.MOV.U32 R23, RZ, RZ, 0x40100000 ; /* 0x40100000ff177424 */ /* 0x000fc600078e00ff */ /*03e0*/ ISETP.GE.U32.AND P0, PT, R13, 0x40100000, PT ; /* 0x401000000d00780c */ /* 0x000fe40003f06070 */ /*03f0*/ MOV R22, R13 ; /* 0x0000000d00167202 */ /* 0x000fe40000000f00 */ /*0400*/ IADD3 R24, R23, -0x1, RZ ; /* 0xffffffff17187810 */ /* 0x000fe20007ffe0ff */ /*0410*/ DFMA R14, R10, -R8, 1 ; /* 0x3ff000000a0e742b */ /* 0x001e0c0000000808 */ /*0420*/ DFMA R16, R14, R14, R14 ; /* 0x0000000e0e10722b */ /* 0x001064000000000e */ /*0430*/ SEL R15, R18, 0x63400000, !P0 ; /* 0x63400000120f7807 */ /* 0x001fc80004000000 */ /*0440*/ @!P1 LOP3.LUT R14, R15.reuse, 0x80000000, R12.reuse, 0xf8, !PT ; /* 0x800000000f0e9812 */ /* 0x140fe200078ef80c */ /*0450*/ DFMA R18, R10, R16, R10 ; /* 0x000000100a12722b */ /* 0x002064000000000a */ /*0460*/ LOP3.LUT R11, R15, 0x800fffff, R12, 0xf8, !PT ; /* 0x800fffff0f0b7812 */ /* 0x001fe200078ef80c */ /*0470*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff0a7624 */ /* 0x000fe200078e00ff */ /*0480*/ @!P1 LOP3.LUT R17, R14, 0x100000, RZ, 0xfc, !PT ; /* 0x001000000e119812 */ /* 0x000fe200078efcff */ /*0490*/ @!P1 IMAD.MOV.U32 R16, RZ, RZ, RZ ; /* 0x000000ffff109224 */ /* 0x000fe200078e00ff */ /*04a0*/ DFMA R20, R18, -R8, 1 ; /* 0x3ff000001214742b */ /* 0x002e0a0000000808 */ /*04b0*/ @!P1 DFMA R10, R10, 2, -R16 ; /* 0x400000000a0a982b */ /* 0x000e480000000810 */ /*04c0*/ DFMA R16, R18, R20, R18 ; /* 0x000000141210722b */ /* 0x001e0c0000000012 */ /*04d0*/ @!P1 LOP3.LUT R22, R11, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff000000b169812 */ /* 0x002fe200078ec0ff */ /*04e0*/ DMUL R18, R16, R10 ; /* 0x0000000a10127228 */ /* 0x001e060000000000 */ /*04f0*/ IADD3 R14, R22, -0x1, RZ ; /* 0xffffffff160e7810 */ /* 0x000fc60007ffe0ff */ /*0500*/ DFMA R20, R18, -R8, R10 ; /* 0x800000081214722b */ /* 0x001e22000000000a */ /*0510*/ ISETP.GT.U32.AND P0, PT, R14, 0x7feffffe, PT ; /* 0x7feffffe0e00780c */ /* 0x000fc80003f04070 */ /*0520*/ ISETP.GT.U32.OR P0, PT, R24, 0x7feffffe, P0 ; /* 0x7feffffe1800780c */ /* 0x000fe20000704470 */ /*0530*/ DFMA R16, R16, R20, R18 ; /* 0x000000141010722b */ /* 0x0010580000000012 */ /*0540*/ @P0 BRA 0x6f0 ; /* 0x000001a000000947 */ /* 0x000fea0003800000 */ /*0550*/ IADD3 R13, R13, -0x40100000, RZ ; /* 0xbff000000d0d7810 */ /* 0x003fe20007ffe0ff */ /*0560*/ IMAD.MOV.U32 R18, RZ, RZ, RZ ; /* 0x000000ffff127224 */ /* 0x000fc600078e00ff */ /*0570*/ IMNMX R13, R13, -0x46a00000, !PT ; /* 0xb96000000d0d7817 */ /* 0x000fc80007800200 */ /*0580*/ IMNMX R12, R13, 0x46a00000, PT ; /* 0x46a000000d0c7817 */ /* 0x000fca0003800200 */ /*0590*/ IMAD.IADD R12, R12, 0x1, -R15 ; /* 0x000000010c0c7824 */ /* 0x000fca00078e0a0f */ /*05a0*/ IADD3 R19, R12, 0x7fe00000, RZ ; /* 0x7fe000000c137810 */ /* 0x000fcc0007ffe0ff */ /*05b0*/ DMUL R14, R16, R18 ; /* 0x00000012100e7228 */ /* 0x000e140000000000 */ /*05c0*/ FSETP.GTU.AND P0, PT, |R15|, 1.469367938527859385e-39, PT ; /* 0x001000000f00780b */ /* 0x001fda0003f0c200 */ /*05d0*/ @P0 BRA 0x820 ; /* 0x0000024000000947 */ /* 0x000fea0003800000 */ /*05e0*/ DFMA R8, R16, -R8, R10 ; /* 0x800000081008722b */ /* 0x000e22000000000a */ /*05f0*/ IMAD.MOV.U32 R18, RZ, RZ, RZ ; /* 0x000000ffff127224 */ /* 0x000fd200078e00ff */ /*0600*/ FSETP.NEU.AND P0, PT, R9.reuse, RZ, PT ; /* 0x000000ff0900720b */ /* 0x041fe40003f0d000 */ /*0610*/ LOP3.LUT R8, R9, 0x40180000, RZ, 0x3c, !PT ; /* 0x4018000009087812 */ /* 0x000fc800078e3cff */ /*0620*/ LOP3.LUT R11, R8, 0x80000000, RZ, 0xc0, !PT ; /* 0x80000000080b7812 */ /* 0x000fc800078ec0ff */ /*0630*/ LOP3.LUT R19, R11, R19, RZ, 0xfc, !PT ; /* 0x000000130b137212 */ /* 0x000fc600078efcff */ /*0640*/ @!P0 BRA 0x820 ; /* 0x000001d000008947 */ /* 0x000fea0003800000 */ /*0650*/ IMAD.MOV R9, RZ, RZ, -R12 ; /* 0x000000ffff097224 */ /* 0x000fe400078e0a0c */ /*0660*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */ /* 0x000fcc00078e00ff */ /*0670*/ DFMA R8, R14, -R8, R16 ; /* 0x800000080e08722b */ /* 0x000e080000000010 */ /*0680*/ DMUL.RP R16, R16, R18 ; /* 0x0000001210107228 */ /* 0x000e640000008000 */ /*0690*/ IADD3 R8, -R12, -0x43300000, RZ ; /* 0xbcd000000c087810 */ /* 0x001fc80007ffe1ff */ /*06a0*/ FSETP.NEU.AND P0, PT, |R9|, R8, PT ; /* 0x000000080900720b */ /* 0x000fc80003f0d200 */ /*06b0*/ LOP3.LUT R11, R17, R11, RZ, 0x3c, !PT ; /* 0x0000000b110b7212 */ /* 0x002fe400078e3cff */ /*06c0*/ FSEL R14, R16, R14, !P0 ; /* 0x0000000e100e7208 */ /* 0x000fe40004000000 */ /*06d0*/ FSEL R15, R11, R15, !P0 ; /* 0x0000000f0b0f7208 */ /* 0x000fe20004000000 */ /*06e0*/ BRA 0x820 ; /* 0x0000013000007947 */ /* 0x000fea0003800000 */ /*06f0*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff087624 */ /* 0x003fe400078e00ff */ /*0700*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff097624 */ /* 0x000fcc00078e00ff */ /*0710*/ DSETP.NAN.AND P0, PT, R8, c[0x0][0x188], PT ; /* 0x000062000800762a */ /* 0x000e1c0003f08000 */ /*0720*/ @P0 BRA 0x800 ; /* 0x000000d000000947 */ /* 0x001fea0003800000 */ /*0730*/ ISETP.NE.AND P0, PT, R22, R23, PT ; /* 0x000000171600720c */ /* 0x000fe20003f05270 */ /*0740*/ IMAD.MOV.U32 R15, RZ, RZ, -0x80000 ; /* 0xfff80000ff0f7424 */ /* 0x000fe200078e00ff */ /*0750*/ MOV R14, 0x0 ; /* 0x00000000000e7802 */ /* 0x000fd60000000f00 */ /*0760*/ @!P0 BRA 0x820 ; /* 0x000000b000008947 */ /* 0x000fea0003800000 */ /*0770*/ ISETP.NE.AND P0, PT, R22, 0x7ff00000, PT ; /* 0x7ff000001600780c */ /* 0x000fe40003f05270 */ /*0780*/ LOP3.LUT R12, R12, 0x40180000, RZ, 0x3c, !PT ; /* 0x401800000c0c7812 */ /* 0x000fe400078e3cff */ /*0790*/ ISETP.EQ.OR P0, PT, R23, RZ, !P0 ; /* 0x000000ff1700720c */ /* 0x000fe40004702670 */ /*07a0*/ LOP3.LUT R15, R12, 0x80000000, RZ, 0xc0, !PT ; /* 0x800000000c0f7812 */ /* 0x000fd600078ec0ff */ /*07b0*/ @P0 LOP3.LUT R8, R15, 0x7ff00000, RZ, 0xfc, !PT ; /* 0x7ff000000f080812 */ /* 0x000fe200078efcff */ /*07c0*/ @!P0 IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e8224 */ /* 0x000fe400078e00ff */ /*07d0*/ @P0 IMAD.MOV.U32 R14, RZ, RZ, RZ ; /* 0x000000ffff0e0224 */ /* 0x000fe400078e00ff */ /*07e0*/ @P0 IMAD.MOV.U32 R15, RZ, RZ, R8 ; /* 0x000000ffff0f0224 */ /* 0x000fe200078e0008 */ /*07f0*/ BRA 0x820 ; /* 0x0000002000007947 */ /* 0x000fea0003800000 */ /*0800*/ LOP3.LUT R15, R12, 0x80000, RZ, 0xfc, !PT ; /* 0x000800000c0f7812 */ /* 0x000fe200078efcff */ /*0810*/ IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff0e7624 */ /* 0x000fe400078e00ff */ /*0820*/ IMAD.MOV.U32 R8, RZ, RZ, R0 ; /* 0x000000ffff087224 */ /* 0x000fe400078e0000 */ /*0830*/ IMAD.MOV.U32 R9, RZ, RZ, 0x0 ; /* 0x00000000ff097424 */ /* 0x000fc800078e00ff */ /*0840*/ RET.REL.NODEC R8 0x0 ; /* 0xfffff7b008007950 */ /* 0x000fea0003c3ffff */ /*0850*/ BRA 0x850; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0860*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0870*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0880*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0890*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*08f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17kernelCulcRhoRealiPdS_S_dd .globl _Z17kernelCulcRhoRealiPdS_S_dd .p2align 8 .type _Z17kernelCulcRhoRealiPdS_S_dd,@function _Z17kernelCulcRhoRealiPdS_S_dd: s_clause 0x1 s_load_b32 s2, s[0:1], 0x3c s_load_b32 s3, s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b64 s[8:9], s[0:1], 0x28 s_mov_b32 s2, 0 s_mov_b32 s3, 0x40180000 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) v_lshlrev_b64 v[0:1], 3, v[1:2] s_waitcnt lgkmcnt(0) v_div_scale_f64 v[3:4], null, s[2:3], s[2:3], s[8:9] s_load_b256 s[0:7], s[0:1], 0x8 s_waitcnt lgkmcnt(0) v_add_co_u32 v7, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v8, vcc_lo, s3, v1, vcc_lo v_add_co_u32 v11, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v12, vcc_lo, s5, v1, vcc_lo global_load_b64 v[7:8], v[7:8], off global_load_b64 v[11:12], v[11:12], off v_rcp_f64_e32 v[5:6], v[3:4] s_waitcnt_depctr 0xfff v_fma_f64 v[9:10], -v[3:4], v[5:6], 1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[5:6], v[5:6], v[9:10], v[5:6] v_fma_f64 v[9:10], -v[3:4], v[5:6], 1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_fma_f64 v[5:6], v[5:6], v[9:10], v[5:6] v_div_scale_f64 v[9:10], vcc_lo, s[8:9], 0x40180000, s[8:9] v_mul_f64 v[13:14], v[9:10], v[5:6] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[2:3], -v[3:4], v[13:14], v[9:10] v_div_fmas_f64 v[2:3], v[2:3], v[5:6], v[13:14] v_ldexp_f64 v[4:5], s[6:7], -2 v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) v_div_fixup_f64 v[2:3], v[2:3], 0x40180000, s[8:9] s_waitcnt vmcnt(1) v_mul_f64 v[4:5], v[4:5], v[7:8] v_mul_f64 v[13:14], v[7:8], 0.5 s_waitcnt vmcnt(0) v_mul_f64 v[9:10], v[11:12], 0.5 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_mul_f64 v[2:3], v[2:3], v[7:8] v_mul_f64 v[4:5], v[7:8], v[4:5] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_mul_f64 v[9:10], v[11:12], v[9:10] v_mul_f64 v[2:3], v[7:8], v[2:3] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_mul_f64 v[4:5], v[7:8], v[4:5] v_fma_f64 v[9:10], v[7:8], v[13:14], v[9:10] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_f64 v[2:3], v[7:8], v[2:3] v_fma_f64 v[4:5], v[7:8], v[4:5], v[9:10] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f64 v[2:3], v[7:8], v[2:3] v_mul_f64 v[2:3], v[7:8], v[2:3] s_delay_alu instid0(VALU_DEP_1) v_fma_f64 v[2:3], v[7:8], v[2:3], v[4:5] global_store_b64 v[0:1], v[2:3], off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17kernelCulcRhoRealiPdS_S_dd .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 304 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 15 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17kernelCulcRhoRealiPdS_S_dd, .Lfunc_end0-_Z17kernelCulcRhoRealiPdS_S_dd .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 4 .value_kind: by_value - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 8 .value_kind: by_value - .offset: 40 .size: 8 .value_kind: by_value - .offset: 48 .size: 4 .value_kind: hidden_block_count_x - .offset: 52 .size: 4 .value_kind: hidden_block_count_y - .offset: 56 .size: 4 .value_kind: hidden_block_count_z - .offset: 60 .size: 2 .value_kind: hidden_group_size_x - .offset: 62 .size: 2 .value_kind: hidden_group_size_y - .offset: 64 .size: 2 .value_kind: hidden_group_size_z - .offset: 66 .size: 2 .value_kind: hidden_remainder_x - .offset: 68 .size: 2 .value_kind: hidden_remainder_y - .offset: 70 .size: 2 .value_kind: hidden_remainder_z - .offset: 88 .size: 8 .value_kind: hidden_global_offset_x - .offset: 96 .size: 8 .value_kind: hidden_global_offset_y - .offset: 104 .size: 8 .value_kind: hidden_global_offset_z - .offset: 112 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 304 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17kernelCulcRhoRealiPdS_S_dd .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17kernelCulcRhoRealiPdS_S_dd.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 15 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000721d6_00000000-6_kernelCulcRhoReal.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z44__device_stub__Z17kernelCulcRhoRealiPdS_S_ddiPdS_S_dd .type _Z44__device_stub__Z17kernelCulcRhoRealiPdS_S_ddiPdS_S_dd, @function _Z44__device_stub__Z17kernelCulcRhoRealiPdS_S_ddiPdS_S_dd: .LFB2051: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movl %edi, 44(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movsd %xmm0, 8(%rsp) movsd %xmm1, (%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 44(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) movq %rsp, %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z17kernelCulcRhoRealiPdS_S_dd(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z44__device_stub__Z17kernelCulcRhoRealiPdS_S_ddiPdS_S_dd, .-_Z44__device_stub__Z17kernelCulcRhoRealiPdS_S_ddiPdS_S_dd .globl _Z17kernelCulcRhoRealiPdS_S_dd .type _Z17kernelCulcRhoRealiPdS_S_dd, @function _Z17kernelCulcRhoRealiPdS_S_dd: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z44__device_stub__Z17kernelCulcRhoRealiPdS_S_ddiPdS_S_dd addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z17kernelCulcRhoRealiPdS_S_dd, .-_Z17kernelCulcRhoRealiPdS_S_dd .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z17kernelCulcRhoRealiPdS_S_dd" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z17kernelCulcRhoRealiPdS_S_dd(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "kernelCulcRhoReal.hip" .globl _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd # -- Begin function _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd .p2align 4, 0x90 .type _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd,@function _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd: # @_Z32__device_stub__kernelCulcRhoRealiPdS_S_dd .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movl %edi, 4(%rsp) movq %rsi, 88(%rsp) movq %rdx, 80(%rsp) movq %rcx, 72(%rsp) movsd %xmm0, 64(%rsp) movsd %xmm1, 56(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 88(%rsp), %rax movq %rax, 104(%rsp) leaq 80(%rsp), %rax movq %rax, 112(%rsp) leaq 72(%rsp), %rax movq %rax, 120(%rsp) leaq 64(%rsp), %rax movq %rax, 128(%rsp) leaq 56(%rsp), %rax movq %rax, 136(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z17kernelCulcRhoRealiPdS_S_dd, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd, .Lfunc_end0-_Z32__device_stub__kernelCulcRhoRealiPdS_S_dd .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17kernelCulcRhoRealiPdS_S_dd, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z17kernelCulcRhoRealiPdS_S_dd,@object # @_Z17kernelCulcRhoRealiPdS_S_dd .section .rodata,"a",@progbits .globl _Z17kernelCulcRhoRealiPdS_S_dd .p2align 3, 0x0 _Z17kernelCulcRhoRealiPdS_S_dd: .quad _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd .size _Z17kernelCulcRhoRealiPdS_S_dd, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z17kernelCulcRhoRealiPdS_S_dd" .size .L__unnamed_1, 31 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__kernelCulcRhoRealiPdS_S_dd .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z17kernelCulcRhoRealiPdS_S_dd .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* To Compile: nvcc 2039281_Task3_A.cu -o task3_A To Run: ./task3_A /***************************************************** BY Subin Shrestha ID 2039281 --Code to crack code with 2 letters and 2 numbers E.g AA12 using CUDA --A Custom encryption is made to run on device --This program encrypts the given text using custom encryption --Stores the encypted text in global variable --Decrypts the code stored in global variable using CUDA computaion ******************************************************/ #include <stdio.h> #include <stdlib.h> #include <time.h> //Global variable for device __device__ char* encText; //To calculate Time int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if (dn < 0) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } //Custom Encryption function to run on device __device__ char* CudaCrypt(char* rawPassword){ char * newPassword = (char *) malloc(sizeof(char) * 11); newPassword[0] = rawPassword[0] + 2; newPassword[1] = rawPassword[0] - 2; newPassword[2] = rawPassword[0] + 1; newPassword[3] = rawPassword[1] + 3; newPassword[4] = rawPassword[1] - 3; newPassword[5] = rawPassword[1] - 1; newPassword[6] = rawPassword[2] + 2; newPassword[7] = rawPassword[2] - 2; newPassword[8] = rawPassword[3] + 4; newPassword[9] = rawPassword[3] - 4; newPassword[10] = '\0'; for(int i =0; i<10; i++){ if(i >= 0 && i < 6){ //checking all lower case letter limits if(newPassword[i] > 122){ newPassword[i] = (newPassword[i] - 122) + 97; }else if(newPassword[i] < 97){ newPassword[i] = (97 - newPassword[i]) + 97; } }else{ //checking number section if(newPassword[i] > 57){ newPassword[i] = (newPassword[i] - 57) + 48; }else if(newPassword[i] < 48){ newPassword[i] = (48 - newPassword[i]) + 48; } } } return newPassword; } //Device function to match string __device__ int passwordMatch(char* currentEncText){ char* check = currentEncText; char* match = encText; while(*check == *match){ if(*check == '\0'){ return 1; } check++; match++; } return 0; } //Encrypts given plain text using custom encryption //Stores the encrypted text at global device variable __global__ void Encrypt(){ char genRawPass[5] = "cd20"; encText = CudaCrypt(genRawPass); //firstLetter - 'a' - 'z' (26 characters) //secondLetter - 'a' - 'z' (26 characters) //firstNum - '0' - '9' (10 characters) //secondNum - '0' - '9' (10 characters) printf("Encrypted text is: "); printf("%c %c %c %c = %s\n", genRawPass[0],genRawPass[1],genRawPass[2],genRawPass[3], encText); printf("Decrypting %s using Brute Force \n", encText); } //Cracks the the encrypted text in global variable __global__ void crack(char * alphabet, char * numbers){ char rawPass[5]; rawPass[0] = alphabet[blockIdx.x]; rawPass[1] = alphabet[blockIdx.y]; rawPass[2] = numbers[threadIdx.x]; rawPass[3] = numbers[threadIdx.y]; rawPass[4] = '\0'; //firstLetter - 'a' - 'z' (26 characters) //secondLetter - 'a' - 'z' (26 characters) //firstNum - '0' - '9' (10 characters) //secondNum - '0' - '9' (10 characters) if(passwordMatch(CudaCrypt(rawPass))){ printf("Match Found Your Password is %s \n", rawPass); } } //Main Function int main(int argc, char ** argv){ //starting clock struct timespec start, finish; long long int difference; clock_gettime(CLOCK_MONOTONIC, &start); //Calls Encryption method Encrypt<<< 1, 1 >>>(); char cpuAlphabet[26] = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'}; char cpuNumbers[26] = {'0','1','2','3','4','5','6','7','8','9'}; char * gpuAlphabet; cudaMalloc( (void**) &gpuAlphabet, sizeof(char) * 26); cudaMemcpy(gpuAlphabet, cpuAlphabet, sizeof(char) * 26, cudaMemcpyHostToDevice); char * gpuNumbers; cudaMalloc( (void**) &gpuNumbers, sizeof(char) * 26); cudaMemcpy(gpuNumbers, cpuNumbers, sizeof(char) * 26, cudaMemcpyHostToDevice); crack<<< dim3(26,26,1), dim3(10,10,1) >>>( gpuAlphabet, gpuNumbers ); cudaDeviceSynchronize(); //Stopping Clock clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &difference); printf("run lasted %lldns or %9.5lfs\n", difference, difference / 1000000000.0); return 0; }
.file "tmpxft_000ff836_00000000-6_2039281_Task3_A.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z15time_differenceP8timespecS0_Px .type _Z15time_differenceP8timespecS0_Px, @function _Z15time_differenceP8timespecS0_Px: .LFB2057: .cfi_startproc endbr64 movq (%rsi), %rax subq (%rdi), %rax movq 8(%rsi), %rcx subq 8(%rdi), %rcx js .L5 .L4: imulq $1000000000, %rax, %rax addq %rcx, %rax movq %rax, (%rdx) testq %rax, %rax setle %al movzbl %al, %eax ret .L5: subq $1, %rax addq $1000000000, %rcx jmp .L4 .cfi_endproc .LFE2057: .size _Z15time_differenceP8timespecS0_Px, .-_Z15time_differenceP8timespecS0_Px .globl _Z9CudaCryptPc .type _Z9CudaCryptPc, @function _Z9CudaCryptPc: .LFB2058: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2058: .size _Z9CudaCryptPc, .-_Z9CudaCryptPc .globl _Z13passwordMatchPc .type _Z13passwordMatchPc, @function _Z13passwordMatchPc: .LFB2059: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2059: .size _Z13passwordMatchPc, .-_Z13passwordMatchPc .globl _Z25__device_stub__Z7Encryptvv .type _Z25__device_stub__Z7Encryptvv, @function _Z25__device_stub__Z7Encryptvv: .LFB2085: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L14 .L10: movq 72(%rsp), %rax subq %fs:40, %rax jne .L15 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z7Encryptv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L10 .L15: call __stack_chk_fail@PLT .cfi_endproc .LFE2085: .size _Z25__device_stub__Z7Encryptvv, .-_Z25__device_stub__Z7Encryptvv .globl _Z7Encryptv .type _Z7Encryptv, @function _Z7Encryptv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z25__device_stub__Z7Encryptvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _Z7Encryptv, .-_Z7Encryptv .globl _Z26__device_stub__Z5crackPcS_PcS_ .type _Z26__device_stub__Z5crackPcS_PcS_, @function _Z26__device_stub__Z5crackPcS_PcS_: .LFB2087: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L22 .L18: movq 104(%rsp), %rax subq %fs:40, %rax jne .L23 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L22: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z5crackPcS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L18 .L23: call __stack_chk_fail@PLT .cfi_endproc .LFE2087: .size _Z26__device_stub__Z5crackPcS_PcS_, .-_Z26__device_stub__Z5crackPcS_PcS_ .globl _Z5crackPcS_ .type _Z5crackPcS_, @function _Z5crackPcS_: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z5crackPcS_PcS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _Z5crackPcS_, .-_Z5crackPcS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "run lasted %lldns or %9.5lfs\n" .text .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $144, %rsp .cfi_def_cfa_offset 160 movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 32(%rsp), %rsi movl $1, %edi call clock_gettime@PLT movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 48(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L31 .L27: movabsq $7523094288207667809, %rax movabsq $8101815670912281193, %rdx movq %rax, 64(%rsp) movq %rdx, 72(%rsp) movabsq $8246496016588434539, %rax movabsq $8825217399293047923, %rdx movq %rax, 74(%rsp) movq %rdx, 82(%rsp) movabsq $3978425819141910832, %rax movl $14648, %edx movq %rax, 96(%rsp) movq %rdx, 104(%rsp) movq $0, 106(%rsp) movq $0, 114(%rsp) movq %rsp, %rdi movl $26, %esi call cudaMalloc@PLT leaq 64(%rsp), %rsi movl $1, %ecx movl $26, %edx movq (%rsp), %rdi call cudaMemcpy@PLT leaq 8(%rsp), %rdi movl $26, %esi call cudaMalloc@PLT leaq 96(%rsp), %rsi movl $1, %ecx movl $26, %edx movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $10, 48(%rsp) movl $10, 52(%rsp) movl $1, 56(%rsp) movl $26, 16(%rsp) movl $26, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 48(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L32 .L28: call cudaDeviceSynchronize@PLT leaq 48(%rsp), %rbx movq %rbx, %rsi movl $1, %edi call clock_gettime@PLT leaq 16(%rsp), %rdx leaq 32(%rsp), %rdi movq %rbx, %rsi call _Z15time_differenceP8timespecS0_Px movq 16(%rsp), %rdx pxor %xmm0, %xmm0 cvtsi2sdq %rdx, %xmm0 divsd .LC0(%rip), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 136(%rsp), %rax subq %fs:40, %rax jne .L33 movl $0, %eax addq $144, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L31: .cfi_restore_state call _Z25__device_stub__Z7Encryptvv jmp .L27 .L32: movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z26__device_stub__Z5crackPcS_PcS_ jmp .L28 .L33: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size main, .-main .section .rodata.str1.1 .LC2: .string "_Z5crackPcS_" .LC3: .string "_Z7Encryptv" .LC4: .string "encText" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2090: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z5crackPcS_(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z7Encryptv(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $8, %r9d movl $0, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _ZL7encText(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2090: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .local _ZL7encText .comm _ZL7encText,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1104006501 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* To Compile: nvcc 2039281_Task3_A.cu -o task3_A To Run: ./task3_A /***************************************************** BY Subin Shrestha ID 2039281 --Code to crack code with 2 letters and 2 numbers E.g AA12 using CUDA --A Custom encryption is made to run on device --This program encrypts the given text using custom encryption --Stores the encypted text in global variable --Decrypts the code stored in global variable using CUDA computaion ******************************************************/ #include <stdio.h> #include <stdlib.h> #include <time.h> //Global variable for device __device__ char* encText; //To calculate Time int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if (dn < 0) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } //Custom Encryption function to run on device __device__ char* CudaCrypt(char* rawPassword){ char * newPassword = (char *) malloc(sizeof(char) * 11); newPassword[0] = rawPassword[0] + 2; newPassword[1] = rawPassword[0] - 2; newPassword[2] = rawPassword[0] + 1; newPassword[3] = rawPassword[1] + 3; newPassword[4] = rawPassword[1] - 3; newPassword[5] = rawPassword[1] - 1; newPassword[6] = rawPassword[2] + 2; newPassword[7] = rawPassword[2] - 2; newPassword[8] = rawPassword[3] + 4; newPassword[9] = rawPassword[3] - 4; newPassword[10] = '\0'; for(int i =0; i<10; i++){ if(i >= 0 && i < 6){ //checking all lower case letter limits if(newPassword[i] > 122){ newPassword[i] = (newPassword[i] - 122) + 97; }else if(newPassword[i] < 97){ newPassword[i] = (97 - newPassword[i]) + 97; } }else{ //checking number section if(newPassword[i] > 57){ newPassword[i] = (newPassword[i] - 57) + 48; }else if(newPassword[i] < 48){ newPassword[i] = (48 - newPassword[i]) + 48; } } } return newPassword; } //Device function to match string __device__ int passwordMatch(char* currentEncText){ char* check = currentEncText; char* match = encText; while(*check == *match){ if(*check == '\0'){ return 1; } check++; match++; } return 0; } //Encrypts given plain text using custom encryption //Stores the encrypted text at global device variable __global__ void Encrypt(){ char genRawPass[5] = "cd20"; encText = CudaCrypt(genRawPass); //firstLetter - 'a' - 'z' (26 characters) //secondLetter - 'a' - 'z' (26 characters) //firstNum - '0' - '9' (10 characters) //secondNum - '0' - '9' (10 characters) printf("Encrypted text is: "); printf("%c %c %c %c = %s\n", genRawPass[0],genRawPass[1],genRawPass[2],genRawPass[3], encText); printf("Decrypting %s using Brute Force \n", encText); } //Cracks the the encrypted text in global variable __global__ void crack(char * alphabet, char * numbers){ char rawPass[5]; rawPass[0] = alphabet[blockIdx.x]; rawPass[1] = alphabet[blockIdx.y]; rawPass[2] = numbers[threadIdx.x]; rawPass[3] = numbers[threadIdx.y]; rawPass[4] = '\0'; //firstLetter - 'a' - 'z' (26 characters) //secondLetter - 'a' - 'z' (26 characters) //firstNum - '0' - '9' (10 characters) //secondNum - '0' - '9' (10 characters) if(passwordMatch(CudaCrypt(rawPass))){ printf("Match Found Your Password is %s \n", rawPass); } } //Main Function int main(int argc, char ** argv){ //starting clock struct timespec start, finish; long long int difference; clock_gettime(CLOCK_MONOTONIC, &start); //Calls Encryption method Encrypt<<< 1, 1 >>>(); char cpuAlphabet[26] = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'}; char cpuNumbers[26] = {'0','1','2','3','4','5','6','7','8','9'}; char * gpuAlphabet; cudaMalloc( (void**) &gpuAlphabet, sizeof(char) * 26); cudaMemcpy(gpuAlphabet, cpuAlphabet, sizeof(char) * 26, cudaMemcpyHostToDevice); char * gpuNumbers; cudaMalloc( (void**) &gpuNumbers, sizeof(char) * 26); cudaMemcpy(gpuNumbers, cpuNumbers, sizeof(char) * 26, cudaMemcpyHostToDevice); crack<<< dim3(26,26,1), dim3(10,10,1) >>>( gpuAlphabet, gpuNumbers ); cudaDeviceSynchronize(); //Stopping Clock clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &difference); printf("run lasted %lldns or %9.5lfs\n", difference, difference / 1000000000.0); return 0; }
/* To Compile: nvcc 2039281_Task3_A.cu -o task3_A To Run: ./task3_A /***************************************************** BY Subin Shrestha ID 2039281 --Code to crack code with 2 letters and 2 numbers E.g AA12 using CUDA --A Custom encryption is made to run on device --This program encrypts the given text using custom encryption --Stores the encypted text in global variable --Decrypts the code stored in global variable using CUDA computaion ******************************************************/ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> //Global variable for device __device__ char* encText; //To calculate Time int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if (dn < 0) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } //Custom Encryption function to run on device __device__ char* CudaCrypt(char* rawPassword){ char * newPassword = (char *) malloc(sizeof(char) * 11); newPassword[0] = rawPassword[0] + 2; newPassword[1] = rawPassword[0] - 2; newPassword[2] = rawPassword[0] + 1; newPassword[3] = rawPassword[1] + 3; newPassword[4] = rawPassword[1] - 3; newPassword[5] = rawPassword[1] - 1; newPassword[6] = rawPassword[2] + 2; newPassword[7] = rawPassword[2] - 2; newPassword[8] = rawPassword[3] + 4; newPassword[9] = rawPassword[3] - 4; newPassword[10] = '\0'; for(int i =0; i<10; i++){ if(i >= 0 && i < 6){ //checking all lower case letter limits if(newPassword[i] > 122){ newPassword[i] = (newPassword[i] - 122) + 97; }else if(newPassword[i] < 97){ newPassword[i] = (97 - newPassword[i]) + 97; } }else{ //checking number section if(newPassword[i] > 57){ newPassword[i] = (newPassword[i] - 57) + 48; }else if(newPassword[i] < 48){ newPassword[i] = (48 - newPassword[i]) + 48; } } } return newPassword; } //Device function to match string __device__ int passwordMatch(char* currentEncText){ char* check = currentEncText; char* match = encText; while(*check == *match){ if(*check == '\0'){ return 1; } check++; match++; } return 0; } //Encrypts given plain text using custom encryption //Stores the encrypted text at global device variable __global__ void Encrypt(){ char genRawPass[5] = "cd20"; encText = CudaCrypt(genRawPass); //firstLetter - 'a' - 'z' (26 characters) //secondLetter - 'a' - 'z' (26 characters) //firstNum - '0' - '9' (10 characters) //secondNum - '0' - '9' (10 characters) printf("Encrypted text is: "); printf("%c %c %c %c = %s\n", genRawPass[0],genRawPass[1],genRawPass[2],genRawPass[3], encText); printf("Decrypting %s using Brute Force \n", encText); } //Cracks the the encrypted text in global variable __global__ void crack(char * alphabet, char * numbers){ char rawPass[5]; rawPass[0] = alphabet[blockIdx.x]; rawPass[1] = alphabet[blockIdx.y]; rawPass[2] = numbers[threadIdx.x]; rawPass[3] = numbers[threadIdx.y]; rawPass[4] = '\0'; //firstLetter - 'a' - 'z' (26 characters) //secondLetter - 'a' - 'z' (26 characters) //firstNum - '0' - '9' (10 characters) //secondNum - '0' - '9' (10 characters) if(passwordMatch(CudaCrypt(rawPass))){ printf("Match Found Your Password is %s \n", rawPass); } } //Main Function int main(int argc, char ** argv){ //starting clock struct timespec start, finish; long long int difference; clock_gettime(CLOCK_MONOTONIC, &start); //Calls Encryption method Encrypt<<< 1, 1 >>>(); char cpuAlphabet[26] = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'}; char cpuNumbers[26] = {'0','1','2','3','4','5','6','7','8','9'}; char * gpuAlphabet; hipMalloc( (void**) &gpuAlphabet, sizeof(char) * 26); hipMemcpy(gpuAlphabet, cpuAlphabet, sizeof(char) * 26, hipMemcpyHostToDevice); char * gpuNumbers; hipMalloc( (void**) &gpuNumbers, sizeof(char) * 26); hipMemcpy(gpuNumbers, cpuNumbers, sizeof(char) * 26, hipMemcpyHostToDevice); crack<<< dim3(26,26,1), dim3(10,10,1) >>>( gpuAlphabet, gpuNumbers ); hipDeviceSynchronize(); //Stopping Clock clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &difference); printf("run lasted %lldns or %9.5lfs\n", difference, difference / 1000000000.0); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* To Compile: nvcc 2039281_Task3_A.cu -o task3_A To Run: ./task3_A /***************************************************** BY Subin Shrestha ID 2039281 --Code to crack code with 2 letters and 2 numbers E.g AA12 using CUDA --A Custom encryption is made to run on device --This program encrypts the given text using custom encryption --Stores the encypted text in global variable --Decrypts the code stored in global variable using CUDA computaion ******************************************************/ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> //Global variable for device __device__ char* encText; //To calculate Time int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if (dn < 0) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } //Custom Encryption function to run on device __device__ char* CudaCrypt(char* rawPassword){ char * newPassword = (char *) malloc(sizeof(char) * 11); newPassword[0] = rawPassword[0] + 2; newPassword[1] = rawPassword[0] - 2; newPassword[2] = rawPassword[0] + 1; newPassword[3] = rawPassword[1] + 3; newPassword[4] = rawPassword[1] - 3; newPassword[5] = rawPassword[1] - 1; newPassword[6] = rawPassword[2] + 2; newPassword[7] = rawPassword[2] - 2; newPassword[8] = rawPassword[3] + 4; newPassword[9] = rawPassword[3] - 4; newPassword[10] = '\0'; for(int i =0; i<10; i++){ if(i >= 0 && i < 6){ //checking all lower case letter limits if(newPassword[i] > 122){ newPassword[i] = (newPassword[i] - 122) + 97; }else if(newPassword[i] < 97){ newPassword[i] = (97 - newPassword[i]) + 97; } }else{ //checking number section if(newPassword[i] > 57){ newPassword[i] = (newPassword[i] - 57) + 48; }else if(newPassword[i] < 48){ newPassword[i] = (48 - newPassword[i]) + 48; } } } return newPassword; } //Device function to match string __device__ int passwordMatch(char* currentEncText){ char* check = currentEncText; char* match = encText; while(*check == *match){ if(*check == '\0'){ return 1; } check++; match++; } return 0; } //Encrypts given plain text using custom encryption //Stores the encrypted text at global device variable __global__ void Encrypt(){ char genRawPass[5] = "cd20"; encText = CudaCrypt(genRawPass); //firstLetter - 'a' - 'z' (26 characters) //secondLetter - 'a' - 'z' (26 characters) //firstNum - '0' - '9' (10 characters) //secondNum - '0' - '9' (10 characters) printf("Encrypted text is: "); printf("%c %c %c %c = %s\n", genRawPass[0],genRawPass[1],genRawPass[2],genRawPass[3], encText); printf("Decrypting %s using Brute Force \n", encText); } //Cracks the the encrypted text in global variable __global__ void crack(char * alphabet, char * numbers){ char rawPass[5]; rawPass[0] = alphabet[blockIdx.x]; rawPass[1] = alphabet[blockIdx.y]; rawPass[2] = numbers[threadIdx.x]; rawPass[3] = numbers[threadIdx.y]; rawPass[4] = '\0'; //firstLetter - 'a' - 'z' (26 characters) //secondLetter - 'a' - 'z' (26 characters) //firstNum - '0' - '9' (10 characters) //secondNum - '0' - '9' (10 characters) if(passwordMatch(CudaCrypt(rawPass))){ printf("Match Found Your Password is %s \n", rawPass); } } //Main Function int main(int argc, char ** argv){ //starting clock struct timespec start, finish; long long int difference; clock_gettime(CLOCK_MONOTONIC, &start); //Calls Encryption method Encrypt<<< 1, 1 >>>(); char cpuAlphabet[26] = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'}; char cpuNumbers[26] = {'0','1','2','3','4','5','6','7','8','9'}; char * gpuAlphabet; hipMalloc( (void**) &gpuAlphabet, sizeof(char) * 26); hipMemcpy(gpuAlphabet, cpuAlphabet, sizeof(char) * 26, hipMemcpyHostToDevice); char * gpuNumbers; hipMalloc( (void**) &gpuNumbers, sizeof(char) * 26); hipMemcpy(gpuNumbers, cpuNumbers, sizeof(char) * 26, hipMemcpyHostToDevice); crack<<< dim3(26,26,1), dim3(10,10,1) >>>( gpuAlphabet, gpuNumbers ); hipDeviceSynchronize(); //Stopping Clock clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &difference); printf("run lasted %lldns or %9.5lfs\n", difference, difference / 1000000000.0); return 0; }
.text .file "2039281_Task3_A.hip" .globl _Z15time_differenceP8timespecS0_Px # -- Begin function _Z15time_differenceP8timespecS0_Px .p2align 4, 0x90 .type _Z15time_differenceP8timespecS0_Px,@function _Z15time_differenceP8timespecS0_Px: # @_Z15time_differenceP8timespecS0_Px .cfi_startproc # %bb.0: movq (%rsi), %rax subq (%rdi), %rax movq 8(%rsi), %rcx subq 8(%rdi), %rcx leaq 1000000000(%rcx), %rsi movq %rcx, %rdi sarq $63, %rdi addq %rax, %rdi testq %rcx, %rcx cmovnsq %rcx, %rsi imulq $1000000000, %rdi, %rcx # imm = 0x3B9ACA00 xorl %eax, %eax addq %rsi, %rcx movq %rcx, (%rdx) setle %al retq .Lfunc_end0: .size _Z15time_differenceP8timespecS0_Px, .Lfunc_end0-_Z15time_differenceP8timespecS0_Px .cfi_endproc # -- End function .globl _Z22__device_stub__Encryptv # -- Begin function _Z22__device_stub__Encryptv .p2align 4, 0x90 .type _Z22__device_stub__Encryptv,@function _Z22__device_stub__Encryptv: # @_Z22__device_stub__Encryptv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z7Encryptv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end1: .size _Z22__device_stub__Encryptv, .Lfunc_end1-_Z22__device_stub__Encryptv .cfi_endproc # -- End function .globl _Z20__device_stub__crackPcS_ # -- Begin function _Z20__device_stub__crackPcS_ .p2align 4, 0x90 .type _Z20__device_stub__crackPcS_,@function _Z20__device_stub__crackPcS_: # @_Z20__device_stub__crackPcS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z5crackPcS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end2: .size _Z20__device_stub__crackPcS_, .Lfunc_end2-_Z20__device_stub__crackPcS_ .cfi_endproc # -- End function .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function main .LCPI3_0: .byte 97 # 0x61 .byte 98 # 0x62 .byte 99 # 0x63 .byte 100 # 0x64 .byte 101 # 0x65 .byte 102 # 0x66 .byte 103 # 0x67 .byte 104 # 0x68 .byte 105 # 0x69 .byte 106 # 0x6a .byte 107 # 0x6b .byte 108 # 0x6c .byte 109 # 0x6d .byte 110 # 0x6e .byte 111 # 0x6f .byte 112 # 0x70 .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI3_1: .quad 0x41cdcd6500000000 # double 1.0E+9 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $184, %rsp .cfi_def_cfa_offset 192 leaq 168(%rsp), %rsi movl $1, %edi callq clock_gettime movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_2 # %bb.1: leaq 96(%rsp), %rdi leaq 64(%rsp), %rsi movq %rsp, %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 96(%rsp), %rsi movl 104(%rsp), %edx movq 64(%rsp), %rcx movl 72(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z7Encryptv, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 8(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_2: movaps .LCPI3_0(%rip), %xmm0 # xmm0 = [97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112] movaps %xmm0, 96(%rsp) movabsq $8680537053616894577, %rax # imm = 0x7877767574737271 movq %rax, 112(%rsp) movw $31353, 120(%rsp) # imm = 0x7A79 movabsq $3978425819141910832, %rax # imm = 0x3736353433323130 movq %rax, 64(%rsp) movw $14648, 72(%rsp) # imm = 0x3938 xorps %xmm0, %xmm0 movups %xmm0, 74(%rsp) leaq 40(%rsp), %rdi movl $26, %esi callq hipMalloc movq 40(%rsp), %rdi leaq 96(%rsp), %rsi movl $26, %edx movl $1, %ecx callq hipMemcpy leaq 32(%rsp), %rdi movl $26, %esi callq hipMalloc movq 32(%rsp), %rdi leaq 64(%rsp), %rsi movl $26, %edx movl $1, %ecx callq hipMemcpy movabsq $111669149722, %rdi # imm = 0x1A0000001A movabsq $42949672970, %rdx # imm = 0xA0000000A movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_4 # %bb.3: movq 40(%rsp), %rax movq 32(%rsp), %rcx movq %rax, 160(%rsp) movq %rcx, 152(%rsp) leaq 160(%rsp), %rax movq %rax, (%rsp) leaq 152(%rsp), %rax movq %rax, 8(%rsp) leaq 16(%rsp), %rdi leaq 48(%rsp), %rsi leaq 144(%rsp), %rdx leaq 136(%rsp), %rcx callq __hipPopCallConfiguration movq 16(%rsp), %rsi movl 24(%rsp), %edx movq 48(%rsp), %rcx movl 56(%rsp), %r8d movq %rsp, %r9 movl $_Z5crackPcS_, %edi pushq 136(%rsp) .cfi_adjust_cfa_offset 8 pushq 152(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_4: callq hipDeviceSynchronize movq %rsp, %rsi movl $1, %edi callq clock_gettime movq (%rsp), %rax subq 168(%rsp), %rax movq 8(%rsp), %rcx subq 176(%rsp), %rcx leaq 1000000000(%rcx), %rdx movq %rcx, %rsi sarq $63, %rsi addq %rax, %rsi testq %rcx, %rcx cmovnsq %rcx, %rdx imulq $1000000000, %rsi, %rsi # imm = 0x3B9ACA00 addq %rdx, %rsi xorps %xmm0, %xmm0 cvtsi2sd %rsi, %xmm0 divsd .LCPI3_1(%rip), %xmm0 movl $.L.str, %edi movb $1, %al callq printf xorl %eax, %eax addq $184, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7Encryptv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5crackPcS_, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $0, 8(%rsp) movl $0, (%rsp) movl $encText, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movl $8, %r9d movq %rbx, %rdi xorl %r8d, %r8d callq __hipRegisterVar movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type encText,@object # @encText .local encText .comm encText,8,8 .type _Z7Encryptv,@object # @_Z7Encryptv .section .rodata,"a",@progbits .globl _Z7Encryptv .p2align 3, 0x0 _Z7Encryptv: .quad _Z22__device_stub__Encryptv .size _Z7Encryptv, 8 .type _Z5crackPcS_,@object # @_Z5crackPcS_ .globl _Z5crackPcS_ .p2align 3, 0x0 _Z5crackPcS_: .quad _Z20__device_stub__crackPcS_ .size _Z5crackPcS_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "run lasted %lldns or %9.5lfs\n" .size .L.str, 30 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z7Encryptv" .size .L__unnamed_1, 12 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z5crackPcS_" .size .L__unnamed_2, 13 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "encText" .size .L__unnamed_3, 8 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__Encryptv .addrsig_sym _Z20__device_stub__crackPcS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym encText .addrsig_sym _Z7Encryptv .addrsig_sym _Z5crackPcS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000ff836_00000000-6_2039281_Task3_A.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z15time_differenceP8timespecS0_Px .type _Z15time_differenceP8timespecS0_Px, @function _Z15time_differenceP8timespecS0_Px: .LFB2057: .cfi_startproc endbr64 movq (%rsi), %rax subq (%rdi), %rax movq 8(%rsi), %rcx subq 8(%rdi), %rcx js .L5 .L4: imulq $1000000000, %rax, %rax addq %rcx, %rax movq %rax, (%rdx) testq %rax, %rax setle %al movzbl %al, %eax ret .L5: subq $1, %rax addq $1000000000, %rcx jmp .L4 .cfi_endproc .LFE2057: .size _Z15time_differenceP8timespecS0_Px, .-_Z15time_differenceP8timespecS0_Px .globl _Z9CudaCryptPc .type _Z9CudaCryptPc, @function _Z9CudaCryptPc: .LFB2058: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2058: .size _Z9CudaCryptPc, .-_Z9CudaCryptPc .globl _Z13passwordMatchPc .type _Z13passwordMatchPc, @function _Z13passwordMatchPc: .LFB2059: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2059: .size _Z13passwordMatchPc, .-_Z13passwordMatchPc .globl _Z25__device_stub__Z7Encryptvv .type _Z25__device_stub__Z7Encryptvv, @function _Z25__device_stub__Z7Encryptvv: .LFB2085: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L14 .L10: movq 72(%rsp), %rax subq %fs:40, %rax jne .L15 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z7Encryptv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L10 .L15: call __stack_chk_fail@PLT .cfi_endproc .LFE2085: .size _Z25__device_stub__Z7Encryptvv, .-_Z25__device_stub__Z7Encryptvv .globl _Z7Encryptv .type _Z7Encryptv, @function _Z7Encryptv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z25__device_stub__Z7Encryptvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _Z7Encryptv, .-_Z7Encryptv .globl _Z26__device_stub__Z5crackPcS_PcS_ .type _Z26__device_stub__Z5crackPcS_PcS_, @function _Z26__device_stub__Z5crackPcS_PcS_: .LFB2087: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L22 .L18: movq 104(%rsp), %rax subq %fs:40, %rax jne .L23 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L22: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z5crackPcS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L18 .L23: call __stack_chk_fail@PLT .cfi_endproc .LFE2087: .size _Z26__device_stub__Z5crackPcS_PcS_, .-_Z26__device_stub__Z5crackPcS_PcS_ .globl _Z5crackPcS_ .type _Z5crackPcS_, @function _Z5crackPcS_: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z5crackPcS_PcS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _Z5crackPcS_, .-_Z5crackPcS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "run lasted %lldns or %9.5lfs\n" .text .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 subq $144, %rsp .cfi_def_cfa_offset 160 movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 32(%rsp), %rsi movl $1, %edi call clock_gettime@PLT movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 48(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L31 .L27: movabsq $7523094288207667809, %rax movabsq $8101815670912281193, %rdx movq %rax, 64(%rsp) movq %rdx, 72(%rsp) movabsq $8246496016588434539, %rax movabsq $8825217399293047923, %rdx movq %rax, 74(%rsp) movq %rdx, 82(%rsp) movabsq $3978425819141910832, %rax movl $14648, %edx movq %rax, 96(%rsp) movq %rdx, 104(%rsp) movq $0, 106(%rsp) movq $0, 114(%rsp) movq %rsp, %rdi movl $26, %esi call cudaMalloc@PLT leaq 64(%rsp), %rsi movl $1, %ecx movl $26, %edx movq (%rsp), %rdi call cudaMemcpy@PLT leaq 8(%rsp), %rdi movl $26, %esi call cudaMalloc@PLT leaq 96(%rsp), %rsi movl $1, %ecx movl $26, %edx movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $10, 48(%rsp) movl $10, 52(%rsp) movl $1, 56(%rsp) movl $26, 16(%rsp) movl $26, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 48(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L32 .L28: call cudaDeviceSynchronize@PLT leaq 48(%rsp), %rbx movq %rbx, %rsi movl $1, %edi call clock_gettime@PLT leaq 16(%rsp), %rdx leaq 32(%rsp), %rdi movq %rbx, %rsi call _Z15time_differenceP8timespecS0_Px movq 16(%rsp), %rdx pxor %xmm0, %xmm0 cvtsi2sdq %rdx, %xmm0 divsd .LC0(%rip), %xmm0 leaq .LC1(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 136(%rsp), %rax subq %fs:40, %rax jne .L33 movl $0, %eax addq $144, %rsp .cfi_remember_state .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 ret .L31: .cfi_restore_state call _Z25__device_stub__Z7Encryptvv jmp .L27 .L32: movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z26__device_stub__Z5crackPcS_PcS_ jmp .L28 .L33: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size main, .-main .section .rodata.str1.1 .LC2: .string "_Z5crackPcS_" .LC3: .string "_Z7Encryptv" .LC4: .string "encText" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2090: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z5crackPcS_(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z7Encryptv(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 movl $8, %r9d movl $0, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _ZL7encText(%rip), %rsi movq %rbx, %rdi call __cudaRegisterVar@PLT addq $16, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2090: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .local _ZL7encText .comm _ZL7encText,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1104006501 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "2039281_Task3_A.hip" .globl _Z15time_differenceP8timespecS0_Px # -- Begin function _Z15time_differenceP8timespecS0_Px .p2align 4, 0x90 .type _Z15time_differenceP8timespecS0_Px,@function _Z15time_differenceP8timespecS0_Px: # @_Z15time_differenceP8timespecS0_Px .cfi_startproc # %bb.0: movq (%rsi), %rax subq (%rdi), %rax movq 8(%rsi), %rcx subq 8(%rdi), %rcx leaq 1000000000(%rcx), %rsi movq %rcx, %rdi sarq $63, %rdi addq %rax, %rdi testq %rcx, %rcx cmovnsq %rcx, %rsi imulq $1000000000, %rdi, %rcx # imm = 0x3B9ACA00 xorl %eax, %eax addq %rsi, %rcx movq %rcx, (%rdx) setle %al retq .Lfunc_end0: .size _Z15time_differenceP8timespecS0_Px, .Lfunc_end0-_Z15time_differenceP8timespecS0_Px .cfi_endproc # -- End function .globl _Z22__device_stub__Encryptv # -- Begin function _Z22__device_stub__Encryptv .p2align 4, 0x90 .type _Z22__device_stub__Encryptv,@function _Z22__device_stub__Encryptv: # @_Z22__device_stub__Encryptv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z7Encryptv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end1: .size _Z22__device_stub__Encryptv, .Lfunc_end1-_Z22__device_stub__Encryptv .cfi_endproc # -- End function .globl _Z20__device_stub__crackPcS_ # -- Begin function _Z20__device_stub__crackPcS_ .p2align 4, 0x90 .type _Z20__device_stub__crackPcS_,@function _Z20__device_stub__crackPcS_: # @_Z20__device_stub__crackPcS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z5crackPcS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end2: .size _Z20__device_stub__crackPcS_, .Lfunc_end2-_Z20__device_stub__crackPcS_ .cfi_endproc # -- End function .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 # -- Begin function main .LCPI3_0: .byte 97 # 0x61 .byte 98 # 0x62 .byte 99 # 0x63 .byte 100 # 0x64 .byte 101 # 0x65 .byte 102 # 0x66 .byte 103 # 0x67 .byte 104 # 0x68 .byte 105 # 0x69 .byte 106 # 0x6a .byte 107 # 0x6b .byte 108 # 0x6c .byte 109 # 0x6d .byte 110 # 0x6e .byte 111 # 0x6f .byte 112 # 0x70 .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 .LCPI3_1: .quad 0x41cdcd6500000000 # double 1.0E+9 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $184, %rsp .cfi_def_cfa_offset 192 leaq 168(%rsp), %rsi movl $1, %edi callq clock_gettime movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_2 # %bb.1: leaq 96(%rsp), %rdi leaq 64(%rsp), %rsi movq %rsp, %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 96(%rsp), %rsi movl 104(%rsp), %edx movq 64(%rsp), %rcx movl 72(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z7Encryptv, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 8(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_2: movaps .LCPI3_0(%rip), %xmm0 # xmm0 = [97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112] movaps %xmm0, 96(%rsp) movabsq $8680537053616894577, %rax # imm = 0x7877767574737271 movq %rax, 112(%rsp) movw $31353, 120(%rsp) # imm = 0x7A79 movabsq $3978425819141910832, %rax # imm = 0x3736353433323130 movq %rax, 64(%rsp) movw $14648, 72(%rsp) # imm = 0x3938 xorps %xmm0, %xmm0 movups %xmm0, 74(%rsp) leaq 40(%rsp), %rdi movl $26, %esi callq hipMalloc movq 40(%rsp), %rdi leaq 96(%rsp), %rsi movl $26, %edx movl $1, %ecx callq hipMemcpy leaq 32(%rsp), %rdi movl $26, %esi callq hipMalloc movq 32(%rsp), %rdi leaq 64(%rsp), %rsi movl $26, %edx movl $1, %ecx callq hipMemcpy movabsq $111669149722, %rdi # imm = 0x1A0000001A movabsq $42949672970, %rdx # imm = 0xA0000000A movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_4 # %bb.3: movq 40(%rsp), %rax movq 32(%rsp), %rcx movq %rax, 160(%rsp) movq %rcx, 152(%rsp) leaq 160(%rsp), %rax movq %rax, (%rsp) leaq 152(%rsp), %rax movq %rax, 8(%rsp) leaq 16(%rsp), %rdi leaq 48(%rsp), %rsi leaq 144(%rsp), %rdx leaq 136(%rsp), %rcx callq __hipPopCallConfiguration movq 16(%rsp), %rsi movl 24(%rsp), %edx movq 48(%rsp), %rcx movl 56(%rsp), %r8d movq %rsp, %r9 movl $_Z5crackPcS_, %edi pushq 136(%rsp) .cfi_adjust_cfa_offset 8 pushq 152(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_4: callq hipDeviceSynchronize movq %rsp, %rsi movl $1, %edi callq clock_gettime movq (%rsp), %rax subq 168(%rsp), %rax movq 8(%rsp), %rcx subq 176(%rsp), %rcx leaq 1000000000(%rcx), %rdx movq %rcx, %rsi sarq $63, %rsi addq %rax, %rsi testq %rcx, %rcx cmovnsq %rcx, %rdx imulq $1000000000, %rsi, %rsi # imm = 0x3B9ACA00 addq %rdx, %rsi xorps %xmm0, %xmm0 cvtsi2sd %rsi, %xmm0 divsd .LCPI3_1(%rip), %xmm0 movl $.L.str, %edi movb $1, %al callq printf xorl %eax, %eax addq $184, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7Encryptv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z5crackPcS_, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $0, 8(%rsp) movl $0, (%rsp) movl $encText, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movl $8, %r9d movq %rbx, %rdi xorl %r8d, %r8d callq __hipRegisterVar movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type encText,@object # @encText .local encText .comm encText,8,8 .type _Z7Encryptv,@object # @_Z7Encryptv .section .rodata,"a",@progbits .globl _Z7Encryptv .p2align 3, 0x0 _Z7Encryptv: .quad _Z22__device_stub__Encryptv .size _Z7Encryptv, 8 .type _Z5crackPcS_,@object # @_Z5crackPcS_ .globl _Z5crackPcS_ .p2align 3, 0x0 _Z5crackPcS_: .quad _Z20__device_stub__crackPcS_ .size _Z5crackPcS_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "run lasted %lldns or %9.5lfs\n" .size .L.str, 30 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z7Encryptv" .size .L__unnamed_1, 12 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z5crackPcS_" .size .L__unnamed_2, 13 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "encText" .size .L__unnamed_3, 8 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__Encryptv .addrsig_sym _Z20__device_stub__crackPcS_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym encText .addrsig_sym _Z7Encryptv .addrsig_sym _Z5crackPcS_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
// Assumption is input size == output size #include <stdio.h> #include <stdlib.h> #define mask_width 2 #define block_size o_tile_width + mask_width - 1 #define o_tile_width 2 __global__ void gpu_conv2d(float *d_out, float *d_in, float *d_filter, int height, int width){ __shared__ float sh_din[block_size][block_size]; int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y*o_tile_width + ty; int col_o = blockIdx.x * o_tile_width + tx; int diff = mask_width - 1; int row_i = row_o - diff; int col_i = col_o - diff; if ((row_i >= 0 && row_i < height) && (col_i >=0 && col_i < width )){ sh_din[ty][tx] = d_in[row_i*width + col_i]; } else{ sh_din[ty][tx] = 0.0; } __syncthreads(); float output = 0.0; if (tx < o_tile_width && ty < o_tile_width){ for (int i=0; i < mask_width; i++){ for (int j =0; j < mask_width; j++){ output += d_filter[i*mask_width + j] * sh_din[ty+i][tx+j]; } } } if (tx < o_tile_width && ty < o_tile_width){ d_out[row_o*width + col_o] = output; } } void init(float *arr, int h, int w, float val){ for (int r=0; r < h; r++){ for (int c=0; c < w; c++){ arr[r*w + c] = val; } } } void host_conv2d(float *h_out, float *h_in, float *h_filter, int height, int width){ for (int r=0; r<height; r++){ for (int c =0; c < width; c++){ float output = 0.0; int r_i = r - mask_width + 1; int c_i = c - mask_width + 1; //printf("r_i: %d , c_i: %d\n", r_i, c_i); for (int mr=0; mr<mask_width; mr++){ for (int mc=0; mc< mask_width; mc++){ if ( ((r_i+mr) >= 0 && (r_i + mr) < height) && ((c_i+mc) >=0 && (c_i+mc) < width) ) output+= h_in[(mr + r_i)*width + (c_i+mc)] * h_filter[mr*mask_width + mc]; } } h_out[r*width + c] = output; } } } int main(){ float *d_in, *d_filter, *d_out; float *h_in, *h_filter, *h_out; // Only for checking. Not needed for functionality int height = 6; int width = 6; size_t size_in = height*width*sizeof(float); size_t size_filter = mask_width*mask_width*sizeof(float); size_t size_out = height*width*sizeof(float); h_in = (float*) malloc (size_in); h_filter = (float*) malloc (size_filter); h_out = (float*) malloc (size_out); cudaMallocManaged(&d_in, size_in); cudaMallocManaged(&d_filter, size_filter); cudaMallocManaged(&d_out, size_out); init(d_in, height, width, 1.0); init(d_filter, mask_width, mask_width, 1.0); init(d_out, height, width, 0.0); dim3 num_threads (block_size, block_size); dim3 num_blocks ((height-1)/(o_tile_width) + 1, (width-1)/(o_tile_width) + 1) ; gpu_conv2d<<<num_blocks, num_threads>>>(d_out, d_in, d_filter, height, width); cudaDeviceSynchronize(); init(h_in, height, width, 1.0); init(h_filter, mask_width, mask_width, 1.0); init(h_out, height, width, 0.0); host_conv2d(h_out, h_in, h_filter, height, width); for(int i=0; i<height; i++){ for (int j=0; j<width; j++){ if (d_out[i*width +j] != h_out[i*width +j]){ printf(" h_out[%d][%d]: %f", i, j, h_out[i*width + j]); printf(" d_out[%d][%d]: %f", i, j, d_out[i*width + j]); return 0; } } } /* for (int i =0; i<dout_size; i++) if (d_out[i] != h_out[i]){ printf("Program failed!! Check the idx: %d", i); return 0; } */ printf("Success!!\n"); }
code for sm_80 Function : _Z10gpu_conv2dPfS_S_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */ /* 0x000e220000002100 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */ /* 0x000e280000002500 */ /*0040*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */ /* 0x000e680000002600 */ /*0050*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */ /* 0x000e620000002200 */ /*0060*/ IMAD R5, R5, 0x2, R4 ; /* 0x0000000205057824 */ /* 0x001fca00078e0204 */ /*0070*/ ISETP.GT.AND P1, PT, R5.reuse, c[0x0][0x17c], PT ; /* 0x00005f0005007a0c */ /* 0x040fe40003f24270 */ /*0080*/ LEA R0, R0, R7, 0x1 ; /* 0x0000000700007211 */ /* 0x002fe400078e08ff */ /*0090*/ ISETP.GT.AND P1, PT, R5, RZ, !P1 ; /* 0x000000ff0500720c */ /* 0x000fe40004f24270 */ /*00a0*/ ISETP.GT.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */ /* 0x000fc80003f04270 */ /*00b0*/ ISETP.GT.AND P0, PT, R0, RZ, !P0 ; /* 0x000000ff0000720c */ /* 0x000fc80004704270 */ /*00c0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000703070 */ /*00d0*/ @P0 IADD3 R2, R0, -0x1, RZ ; /* 0xffffffff00020810 */ /* 0x000fe40007ffe0ff */ /*00e0*/ @P0 MOV R3, 0x4 ; /* 0x0000000400030802 */ /* 0x000fc60000000f00 */ /*00f0*/ @P0 IMAD R2, R2, c[0x0][0x17c], R5 ; /* 0x00005f0002020a24 */ /* 0x000fca00078e0205 */ /*0100*/ @P0 IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02020810 */ /* 0x000fca0007ffe0ff */ /*0110*/ @P0 IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002020625 */ /* 0x000fcc00078e0203 */ /*0120*/ @P0 LDG.E R2, [R2.64] ; /* 0x0000000402020981 */ /* 0x000ea2000c1e1900 */ /*0130*/ IMAD R15, R7.reuse, 0x3, R4 ; /* 0x00000003070f7824 */ /* 0x040fe200078e0204 */ /*0140*/ ISETP.GT.AND P1, PT, R7, 0x1, PT ; /* 0x000000010700780c */ /* 0x000fc80003f24270 */ /*0150*/ @!P0 STS [R15.X4], RZ ; /* 0x000000ff0f008388 */ /* 0x0001e20000004800 */ /*0160*/ ISETP.GT.OR P1, PT, R4, 0x1, P1 ; /* 0x000000010400780c */ /* 0x000fc60000f24670 */ /*0170*/ @P0 STS [R15.X4], R2 ; /* 0x000000020f000388 */ /* 0x0041e80000004800 */ /*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0190*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*01a0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff027624 */ /* 0x001fe200078e00ff */ /*01b0*/ MOV R3, c[0x0][0x174] ; /* 0x00005d0000037a02 */ /* 0x000fe20000000f00 */ /*01c0*/ LDS R7, [R15.X4] ; /* 0x000000000f077984 */ /* 0x000e280000004800 */ /*01d0*/ LDG.E R4, [R2.64] ; /* 0x0000000402047981 */ /* 0x000e28000c1e1900 */ /*01e0*/ LDG.E R6, [R2.64+0x4] ; /* 0x0000040402067981 */ /* 0x000ea8000c1e1900 */ /*01f0*/ LDG.E R8, [R2.64+0x8] ; /* 0x0000080402087981 */ /* 0x000ee8000c1e1900 */ /*0200*/ LDG.E R10, [R2.64+0xc] ; /* 0x00000c04020a7981 */ /* 0x000f22000c1e1900 */ /*0210*/ IMAD R5, R0, c[0x0][0x17c], R5 ; /* 0x00005f0000057a24 */ /* 0x000fc600078e0205 */ /*0220*/ LDS R9, [R15.X4+0x4] ; /* 0x000004000f097984 */ /* 0x000ea80000004800 */ /*0230*/ LDS R11, [R15.X4+0xc] ; /* 0x00000c000f0b7984 */ /* 0x000ee80000004800 */ /*0240*/ LDS R13, [R15.X4+0x10] ; /* 0x000010000f0d7984 */ /* 0x000f220000004800 */ /*0250*/ FFMA R4, R4, R7, RZ ; /* 0x0000000704047223 */ /* 0x001fc800000000ff */ /*0260*/ FFMA R4, R9, R6, R4 ; /* 0x0000000609047223 */ /* 0x004fe40000000004 */ /*0270*/ IMAD.MOV.U32 R6, RZ, RZ, 0x4 ; /* 0x00000004ff067424 */ /* 0x000fe400078e00ff */ /*0280*/ FFMA R8, R11, R8, R4 ; /* 0x000000080b087223 */ /* 0x008fe40000000004 */ /*0290*/ IMAD.WIDE R4, R5, R6, c[0x0][0x160] ; /* 0x0000580005047625 */ /* 0x000fc800078e0206 */ /*02a0*/ FFMA R13, R13, R10, R8 ; /* 0x0000000a0d0d7223 */ /* 0x010fca0000000008 */ /*02b0*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */ /* 0x000fe2000c101904 */ /*02c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*02d0*/ BRA 0x2d0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
// Assumption is input size == output size #include <stdio.h> #include <stdlib.h> #define mask_width 2 #define block_size o_tile_width + mask_width - 1 #define o_tile_width 2 __global__ void gpu_conv2d(float *d_out, float *d_in, float *d_filter, int height, int width){ __shared__ float sh_din[block_size][block_size]; int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y*o_tile_width + ty; int col_o = blockIdx.x * o_tile_width + tx; int diff = mask_width - 1; int row_i = row_o - diff; int col_i = col_o - diff; if ((row_i >= 0 && row_i < height) && (col_i >=0 && col_i < width )){ sh_din[ty][tx] = d_in[row_i*width + col_i]; } else{ sh_din[ty][tx] = 0.0; } __syncthreads(); float output = 0.0; if (tx < o_tile_width && ty < o_tile_width){ for (int i=0; i < mask_width; i++){ for (int j =0; j < mask_width; j++){ output += d_filter[i*mask_width + j] * sh_din[ty+i][tx+j]; } } } if (tx < o_tile_width && ty < o_tile_width){ d_out[row_o*width + col_o] = output; } } void init(float *arr, int h, int w, float val){ for (int r=0; r < h; r++){ for (int c=0; c < w; c++){ arr[r*w + c] = val; } } } void host_conv2d(float *h_out, float *h_in, float *h_filter, int height, int width){ for (int r=0; r<height; r++){ for (int c =0; c < width; c++){ float output = 0.0; int r_i = r - mask_width + 1; int c_i = c - mask_width + 1; //printf("r_i: %d , c_i: %d\n", r_i, c_i); for (int mr=0; mr<mask_width; mr++){ for (int mc=0; mc< mask_width; mc++){ if ( ((r_i+mr) >= 0 && (r_i + mr) < height) && ((c_i+mc) >=0 && (c_i+mc) < width) ) output+= h_in[(mr + r_i)*width + (c_i+mc)] * h_filter[mr*mask_width + mc]; } } h_out[r*width + c] = output; } } } int main(){ float *d_in, *d_filter, *d_out; float *h_in, *h_filter, *h_out; // Only for checking. Not needed for functionality int height = 6; int width = 6; size_t size_in = height*width*sizeof(float); size_t size_filter = mask_width*mask_width*sizeof(float); size_t size_out = height*width*sizeof(float); h_in = (float*) malloc (size_in); h_filter = (float*) malloc (size_filter); h_out = (float*) malloc (size_out); cudaMallocManaged(&d_in, size_in); cudaMallocManaged(&d_filter, size_filter); cudaMallocManaged(&d_out, size_out); init(d_in, height, width, 1.0); init(d_filter, mask_width, mask_width, 1.0); init(d_out, height, width, 0.0); dim3 num_threads (block_size, block_size); dim3 num_blocks ((height-1)/(o_tile_width) + 1, (width-1)/(o_tile_width) + 1) ; gpu_conv2d<<<num_blocks, num_threads>>>(d_out, d_in, d_filter, height, width); cudaDeviceSynchronize(); init(h_in, height, width, 1.0); init(h_filter, mask_width, mask_width, 1.0); init(h_out, height, width, 0.0); host_conv2d(h_out, h_in, h_filter, height, width); for(int i=0; i<height; i++){ for (int j=0; j<width; j++){ if (d_out[i*width +j] != h_out[i*width +j]){ printf(" h_out[%d][%d]: %f", i, j, h_out[i*width + j]); printf(" d_out[%d][%d]: %f", i, j, d_out[i*width + j]); return 0; } } } /* for (int i =0; i<dout_size; i++) if (d_out[i] != h_out[i]){ printf("Program failed!! Check the idx: %d", i); return 0; } */ printf("Success!!\n"); }
.file "tmpxft_00022e97_00000000-6_conv2d.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z4initPfiif .type _Z4initPfiif, @function _Z4initPfiif: .LFB2057: .cfi_startproc endbr64 testl %esi, %esi jle .L3 movl $0, %r9d movl $0, %r8d movslq %edx, %r10 jmp .L5 .L7: movslq %r9d, %rcx leaq (%rdi,%rcx,4), %rax addq %r10, %rcx leaq (%rdi,%rcx,4), %rcx .L6: movss %xmm0, (%rax) addq $4, %rax cmpq %rcx, %rax jne .L6 .L8: addl $1, %r8d addl %edx, %r9d cmpl %r8d, %esi je .L3 .L5: testl %edx, %edx jg .L7 jmp .L8 .L3: ret .cfi_endproc .LFE2057: .size _Z4initPfiif, .-_Z4initPfiif .globl _Z11host_conv2dPfS_S_ii .type _Z11host_conv2dPfS_S_ii, @function _Z11host_conv2dPfS_S_ii: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 movq %rsi, -16(%rsp) testl %ecx, %ecx jle .L10 movq %rdx, %r15 movl %ecx, %r11d movl %r8d, %r12d negl %r12d leal -1(%rcx), %edx movl $-1, %eax movslq %r8d, %r14 movl %edx, %ecx movq %rdi, %rsi movq %r15, -8(%rsp) jmp .L12 .L24: movl %eax, %esi orl %edi, %esi js .L14 cmpl %eax, %r8d jle .L14 leal (%rax,%rbp), %esi movslq %esi, %rsi movq -16(%rsp), %rcx movss (%rcx,%rsi,4), %xmm1 movslq %edx, %rsi movq -8(%rsp), %rcx mulss (%rcx,%rsi,4), %xmm1 addss %xmm1, %xmm0 jmp .L14 .L16: movl %r15d, %eax movq -24(%rsp), %rsi movss %xmm0, -4(%rsi,%r9,4) leaq 1(%r9), %rsi addl $1, %ebx cmpq %r14, %r9 je .L23 movq %rsi, %r9 .L17: movl %eax, %edi movl %r12d, %ebp movl $0, %r10d pxor %xmm0, %xmm0 movl %eax, %r15d .L13: movl %ebx, %eax movl %r10d, %edx movq %rcx, -32(%rsp) .L15: cmpl %edi, %r11d jg .L24 .L14: addl $1, %edx addl $1, %eax cmpl %r9d, %eax jne .L15 movq -32(%rsp), %rcx addl %r8d, %ebp addl $2, %r10d addl $1, %edi cmpl $4, %r10d jne .L13 jmp .L16 .L23: movq %rcx, %rsi movl %r13d, %ecx .L19: addl $1, %eax addl %r8d, %r12d cmpl %ecx, %eax je .L10 .L12: testl %r8d, %r8d jle .L19 leal (%r12,%r8), %edx movslq %edx, %rdx leaq (%rsi,%rdx,4), %rdx movl $-1, %ebx movl $1, %r9d movq %rdx, -24(%rsp) movl %ecx, %r13d movq %rsi, %rcx jmp .L17 .L10: popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z11host_conv2dPfS_S_ii, .-_Z11host_conv2dPfS_S_ii .globl _Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii .type _Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii, @function _Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii: .LFB2084: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L29 .L25: movq 136(%rsp), %rax subq %fs:40, %rax jne .L30 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z10gpu_conv2dPfS_S_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L25 .L30: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii, .-_Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii .globl _Z10gpu_conv2dPfS_S_ii .type _Z10gpu_conv2dPfS_S_ii, @function _Z10gpu_conv2dPfS_S_ii: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z10gpu_conv2dPfS_S_ii, .-_Z10gpu_conv2dPfS_S_ii .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string " h_out[%d][%d]: %f" .LC3: .string " d_out[%d][%d]: %f" .LC4: .string "Success!!\n" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $72, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $144, %edi call malloc@PLT movq %rax, %rbx movl $16, %edi call malloc@PLT movq %rax, %rbp movl $144, %edi call malloc@PLT movq %rax, %r13 leaq 8(%rsp), %rdi movl $1, %edx movl $144, %esi call cudaMallocManaged@PLT leaq 16(%rsp), %rdi movl $1, %edx movl $16, %esi call cudaMallocManaged@PLT leaq 24(%rsp), %rdi movl $1, %edx movl $144, %esi call cudaMallocManaged@PLT movss .LC1(%rip), %xmm0 movl $6, %edx movl $6, %esi movq 8(%rsp), %rdi call _Z4initPfiif movss .LC1(%rip), %xmm0 movl $2, %edx movl $2, %esi movq 16(%rsp), %rdi call _Z4initPfiif pxor %xmm0, %xmm0 movl $6, %edx movl $6, %esi movq 24(%rsp), %rdi call _Z4initPfiif movl $3, 32(%rsp) movl $3, 36(%rsp) movl $1, 40(%rsp) movl $3, 44(%rsp) movl $3, 48(%rsp) movl $1, 52(%rsp) movl $0, %r9d movl $0, %r8d movq 32(%rsp), %rdx movl $1, %ecx movq 44(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L45 .L34: call cudaDeviceSynchronize@PLT movss .LC1(%rip), %xmm0 movl $6, %edx movl $6, %esi movq %rbx, %rdi call _Z4initPfiif movss .LC1(%rip), %xmm0 movl $2, %edx movl $2, %esi movq %rbp, %rdi call _Z4initPfiif pxor %xmm0, %xmm0 movl $6, %edx movl $6, %esi movq %r13, %rdi call _Z4initPfiif movl $6, %r8d movl $6, %ecx movq %rbp, %rdx movq %rbx, %rsi movq %r13, %rdi call _Z11host_conv2dPfS_S_ii movq 24(%rsp), %rax movl $0, %edx movl $0, %r12d jmp .L35 .L45: movl $6, %r8d movl $6, %ecx movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq 24(%rsp), %rdi call _Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii jmp .L34 .L42: cvtss2sd %xmm0, %xmm0 movl %ebp, %ecx movl %r12d, %edx leaq .LC2(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 24(%rsp), %rax pxor %xmm0, %xmm0 cvtss2sd (%rax,%rbx), %xmm0 movl %ebp, %ecx movl %r12d, %edx leaq .LC3(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT .L38: movq 56(%rsp), %rax subq %fs:40, %rax jne .L46 movl $0, %eax addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L47: .cfi_restore_state addl $1, %r12d addq $24, %rdx cmpl $6, %r12d je .L40 .L35: movq %rdx, %rbx movl $0, %ebp .L39: movss 0(%r13,%rbx), %xmm0 ucomiss (%rax,%rbx), %xmm0 jp .L42 jne .L42 addl $1, %ebp addq $4, %rbx cmpl $6, %ebp jne .L39 jmp .L47 .L40: leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L38 .L46: call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1 .LC5: .string "_Z10gpu_conv2dPfS_S_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z10gpu_conv2dPfS_S_ii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC1: .long 1065353216 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
// Assumption is input size == output size #include <stdio.h> #include <stdlib.h> #define mask_width 2 #define block_size o_tile_width + mask_width - 1 #define o_tile_width 2 __global__ void gpu_conv2d(float *d_out, float *d_in, float *d_filter, int height, int width){ __shared__ float sh_din[block_size][block_size]; int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y*o_tile_width + ty; int col_o = blockIdx.x * o_tile_width + tx; int diff = mask_width - 1; int row_i = row_o - diff; int col_i = col_o - diff; if ((row_i >= 0 && row_i < height) && (col_i >=0 && col_i < width )){ sh_din[ty][tx] = d_in[row_i*width + col_i]; } else{ sh_din[ty][tx] = 0.0; } __syncthreads(); float output = 0.0; if (tx < o_tile_width && ty < o_tile_width){ for (int i=0; i < mask_width; i++){ for (int j =0; j < mask_width; j++){ output += d_filter[i*mask_width + j] * sh_din[ty+i][tx+j]; } } } if (tx < o_tile_width && ty < o_tile_width){ d_out[row_o*width + col_o] = output; } } void init(float *arr, int h, int w, float val){ for (int r=0; r < h; r++){ for (int c=0; c < w; c++){ arr[r*w + c] = val; } } } void host_conv2d(float *h_out, float *h_in, float *h_filter, int height, int width){ for (int r=0; r<height; r++){ for (int c =0; c < width; c++){ float output = 0.0; int r_i = r - mask_width + 1; int c_i = c - mask_width + 1; //printf("r_i: %d , c_i: %d\n", r_i, c_i); for (int mr=0; mr<mask_width; mr++){ for (int mc=0; mc< mask_width; mc++){ if ( ((r_i+mr) >= 0 && (r_i + mr) < height) && ((c_i+mc) >=0 && (c_i+mc) < width) ) output+= h_in[(mr + r_i)*width + (c_i+mc)] * h_filter[mr*mask_width + mc]; } } h_out[r*width + c] = output; } } } int main(){ float *d_in, *d_filter, *d_out; float *h_in, *h_filter, *h_out; // Only for checking. Not needed for functionality int height = 6; int width = 6; size_t size_in = height*width*sizeof(float); size_t size_filter = mask_width*mask_width*sizeof(float); size_t size_out = height*width*sizeof(float); h_in = (float*) malloc (size_in); h_filter = (float*) malloc (size_filter); h_out = (float*) malloc (size_out); cudaMallocManaged(&d_in, size_in); cudaMallocManaged(&d_filter, size_filter); cudaMallocManaged(&d_out, size_out); init(d_in, height, width, 1.0); init(d_filter, mask_width, mask_width, 1.0); init(d_out, height, width, 0.0); dim3 num_threads (block_size, block_size); dim3 num_blocks ((height-1)/(o_tile_width) + 1, (width-1)/(o_tile_width) + 1) ; gpu_conv2d<<<num_blocks, num_threads>>>(d_out, d_in, d_filter, height, width); cudaDeviceSynchronize(); init(h_in, height, width, 1.0); init(h_filter, mask_width, mask_width, 1.0); init(h_out, height, width, 0.0); host_conv2d(h_out, h_in, h_filter, height, width); for(int i=0; i<height; i++){ for (int j=0; j<width; j++){ if (d_out[i*width +j] != h_out[i*width +j]){ printf(" h_out[%d][%d]: %f", i, j, h_out[i*width + j]); printf(" d_out[%d][%d]: %f", i, j, d_out[i*width + j]); return 0; } } } /* for (int i =0; i<dout_size; i++) if (d_out[i] != h_out[i]){ printf("Program failed!! Check the idx: %d", i); return 0; } */ printf("Success!!\n"); }
// Assumption is input size == output size #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define mask_width 2 #define block_size o_tile_width + mask_width - 1 #define o_tile_width 2 __global__ void gpu_conv2d(float *d_out, float *d_in, float *d_filter, int height, int width){ __shared__ float sh_din[block_size][block_size]; int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y*o_tile_width + ty; int col_o = blockIdx.x * o_tile_width + tx; int diff = mask_width - 1; int row_i = row_o - diff; int col_i = col_o - diff; if ((row_i >= 0 && row_i < height) && (col_i >=0 && col_i < width )){ sh_din[ty][tx] = d_in[row_i*width + col_i]; } else{ sh_din[ty][tx] = 0.0; } __syncthreads(); float output = 0.0; if (tx < o_tile_width && ty < o_tile_width){ for (int i=0; i < mask_width; i++){ for (int j =0; j < mask_width; j++){ output += d_filter[i*mask_width + j] * sh_din[ty+i][tx+j]; } } } if (tx < o_tile_width && ty < o_tile_width){ d_out[row_o*width + col_o] = output; } } void init(float *arr, int h, int w, float val){ for (int r=0; r < h; r++){ for (int c=0; c < w; c++){ arr[r*w + c] = val; } } } void host_conv2d(float *h_out, float *h_in, float *h_filter, int height, int width){ for (int r=0; r<height; r++){ for (int c =0; c < width; c++){ float output = 0.0; int r_i = r - mask_width + 1; int c_i = c - mask_width + 1; //printf("r_i: %d , c_i: %d\n", r_i, c_i); for (int mr=0; mr<mask_width; mr++){ for (int mc=0; mc< mask_width; mc++){ if ( ((r_i+mr) >= 0 && (r_i + mr) < height) && ((c_i+mc) >=0 && (c_i+mc) < width) ) output+= h_in[(mr + r_i)*width + (c_i+mc)] * h_filter[mr*mask_width + mc]; } } h_out[r*width + c] = output; } } } int main(){ float *d_in, *d_filter, *d_out; float *h_in, *h_filter, *h_out; // Only for checking. Not needed for functionality int height = 6; int width = 6; size_t size_in = height*width*sizeof(float); size_t size_filter = mask_width*mask_width*sizeof(float); size_t size_out = height*width*sizeof(float); h_in = (float*) malloc (size_in); h_filter = (float*) malloc (size_filter); h_out = (float*) malloc (size_out); hipMallocManaged(&d_in, size_in); hipMallocManaged(&d_filter, size_filter); hipMallocManaged(&d_out, size_out); init(d_in, height, width, 1.0); init(d_filter, mask_width, mask_width, 1.0); init(d_out, height, width, 0.0); dim3 num_threads (block_size, block_size); dim3 num_blocks ((height-1)/(o_tile_width) + 1, (width-1)/(o_tile_width) + 1) ; gpu_conv2d<<<num_blocks, num_threads>>>(d_out, d_in, d_filter, height, width); hipDeviceSynchronize(); init(h_in, height, width, 1.0); init(h_filter, mask_width, mask_width, 1.0); init(h_out, height, width, 0.0); host_conv2d(h_out, h_in, h_filter, height, width); for(int i=0; i<height; i++){ for (int j=0; j<width; j++){ if (d_out[i*width +j] != h_out[i*width +j]){ printf(" h_out[%d][%d]: %f", i, j, h_out[i*width + j]); printf(" d_out[%d][%d]: %f", i, j, d_out[i*width + j]); return 0; } } } /* for (int i =0; i<dout_size; i++) if (d_out[i] != h_out[i]){ printf("Program failed!! Check the idx: %d", i); return 0; } */ printf("Success!!\n"); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
// Assumption is input size == output size #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define mask_width 2 #define block_size o_tile_width + mask_width - 1 #define o_tile_width 2 __global__ void gpu_conv2d(float *d_out, float *d_in, float *d_filter, int height, int width){ __shared__ float sh_din[block_size][block_size]; int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y*o_tile_width + ty; int col_o = blockIdx.x * o_tile_width + tx; int diff = mask_width - 1; int row_i = row_o - diff; int col_i = col_o - diff; if ((row_i >= 0 && row_i < height) && (col_i >=0 && col_i < width )){ sh_din[ty][tx] = d_in[row_i*width + col_i]; } else{ sh_din[ty][tx] = 0.0; } __syncthreads(); float output = 0.0; if (tx < o_tile_width && ty < o_tile_width){ for (int i=0; i < mask_width; i++){ for (int j =0; j < mask_width; j++){ output += d_filter[i*mask_width + j] * sh_din[ty+i][tx+j]; } } } if (tx < o_tile_width && ty < o_tile_width){ d_out[row_o*width + col_o] = output; } } void init(float *arr, int h, int w, float val){ for (int r=0; r < h; r++){ for (int c=0; c < w; c++){ arr[r*w + c] = val; } } } void host_conv2d(float *h_out, float *h_in, float *h_filter, int height, int width){ for (int r=0; r<height; r++){ for (int c =0; c < width; c++){ float output = 0.0; int r_i = r - mask_width + 1; int c_i = c - mask_width + 1; //printf("r_i: %d , c_i: %d\n", r_i, c_i); for (int mr=0; mr<mask_width; mr++){ for (int mc=0; mc< mask_width; mc++){ if ( ((r_i+mr) >= 0 && (r_i + mr) < height) && ((c_i+mc) >=0 && (c_i+mc) < width) ) output+= h_in[(mr + r_i)*width + (c_i+mc)] * h_filter[mr*mask_width + mc]; } } h_out[r*width + c] = output; } } } int main(){ float *d_in, *d_filter, *d_out; float *h_in, *h_filter, *h_out; // Only for checking. Not needed for functionality int height = 6; int width = 6; size_t size_in = height*width*sizeof(float); size_t size_filter = mask_width*mask_width*sizeof(float); size_t size_out = height*width*sizeof(float); h_in = (float*) malloc (size_in); h_filter = (float*) malloc (size_filter); h_out = (float*) malloc (size_out); hipMallocManaged(&d_in, size_in); hipMallocManaged(&d_filter, size_filter); hipMallocManaged(&d_out, size_out); init(d_in, height, width, 1.0); init(d_filter, mask_width, mask_width, 1.0); init(d_out, height, width, 0.0); dim3 num_threads (block_size, block_size); dim3 num_blocks ((height-1)/(o_tile_width) + 1, (width-1)/(o_tile_width) + 1) ; gpu_conv2d<<<num_blocks, num_threads>>>(d_out, d_in, d_filter, height, width); hipDeviceSynchronize(); init(h_in, height, width, 1.0); init(h_filter, mask_width, mask_width, 1.0); init(h_out, height, width, 0.0); host_conv2d(h_out, h_in, h_filter, height, width); for(int i=0; i<height; i++){ for (int j=0; j<width; j++){ if (d_out[i*width +j] != h_out[i*width +j]){ printf(" h_out[%d][%d]: %f", i, j, h_out[i*width + j]); printf(" d_out[%d][%d]: %f", i, j, d_out[i*width + j]); return 0; } } } /* for (int i =0; i<dout_size; i++) if (d_out[i] != h_out[i]){ printf("Program failed!! Check the idx: %d", i); return 0; } */ printf("Success!!\n"); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10gpu_conv2dPfS_S_ii .globl _Z10gpu_conv2dPfS_S_ii .p2align 8 .type _Z10gpu_conv2dPfS_S_ii,@function _Z10gpu_conv2dPfS_S_ii: s_load_b32 s6, s[0:1], 0x1c v_bfe_u32 v3, v0, 10, 10 v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v4, 0x3ff, v0 v_mov_b32_e32 v5, 0 s_mov_b32 s4, exec_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshl_add_u32 v2, s15, 1, v3 v_lshl_add_u32 v0, s14, 1, v4 s_delay_alu instid0(VALU_DEP_2) v_cmpx_lt_i32_e32 0, v2 s_cbranch_execz .LBB0_4 s_load_b32 s2, s[0:1], 0x18 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1) v_cmp_lt_i32_e64 s3, 0, v0 v_mov_b32_e32 v5, 0 s_waitcnt lgkmcnt(0) v_cmp_ge_i32_e32 vcc_lo, s2, v2 v_cmp_ge_i32_e64 s2, s6, v0 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s3, s2, s3 s_and_saveexec_b32 s2, s3 s_cbranch_execz .LBB0_3 v_add_nc_u32_e32 v5, -1, v2 s_load_b64 s[8:9], s[0:1], 0x8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v5, v5, s6 v_add3_u32 v5, v0, v5, -1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v6, 31, v5 v_lshlrev_b64 v[5:6], 2, v[5:6] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v5, vcc_lo, s8, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s9, v6, vcc_lo global_load_b32 v5, v[5:6], off .LBB0_3: s_or_b32 exec_lo, exec_lo, s2 .LBB0_4: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_2) s_or_b32 exec_lo, exec_lo, s4 v_lshlrev_b32_e32 v6, 2, v4 v_or_b32_e32 v4, v4, v3 v_mad_u32_u24 v3, v3, 12, v6 s_delay_alu instid0(VALU_DEP_2) v_cmp_gt_u32_e32 vcc_lo, 2, v4 s_waitcnt vmcnt(0) ds_store_b32 v3, v5 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_and_saveexec_b32 s7, vcc_lo s_cbranch_execz .LBB0_10 s_load_b64 s[2:3], s[0:1], 0x10 v_mov_b32_e32 v1, 0 s_mov_b32 s8, 0 .p2align 6 .LBB0_6: s_waitcnt lgkmcnt(0) s_mov_b64 s[4:5], s[2:3] s_mov_b32 s9, 0 .LBB0_7: s_delay_alu instid0(SALU_CYCLE_1) v_add_nc_u32_e32 v4, s9, v3 s_load_b32 s10, s[4:5], 0x0 s_add_i32 s9, s9, 4 s_add_u32 s4, s4, 4 s_addc_u32 s5, s5, 0 ds_load_b32 v4, v4 s_cmp_lg_u32 s9, 4 s_waitcnt lgkmcnt(0) v_fmac_f32_e32 v1, s10, v4 s_cbranch_scc0 .LBB0_7 s_add_i32 s4, s8, 1 v_add_nc_u32_e32 v3, 12, v3 s_add_u32 s2, s2, 8 s_addc_u32 s3, s3, 0 s_cmp_lg_u32 s8, 0 s_cbranch_scc1 .LBB0_10 s_mov_b32 s8, s4 s_branch .LBB0_6 .LBB0_10: s_or_b32 exec_lo, exec_lo, s7 s_and_saveexec_b32 s2, vcc_lo s_cbranch_execz .LBB0_12 s_load_b64 s[0:1], s[0:1], 0x0 v_mad_u64_u32 v[3:4], null, v2, s6, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v4, 31, v3 v_lshlrev_b64 v[2:3], 2, v[3:4] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s0, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo global_store_b32 v[2:3], v1, off .LBB0_12: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10gpu_conv2dPfS_S_ii .amdhsa_group_segment_fixed_size 36 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 32 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10gpu_conv2dPfS_S_ii, .Lfunc_end0-_Z10gpu_conv2dPfS_S_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value .group_segment_fixed_size: 36 .kernarg_segment_align: 8 .kernarg_segment_size: 32 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10gpu_conv2dPfS_S_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10gpu_conv2dPfS_S_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
// Assumption is input size == output size #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define mask_width 2 #define block_size o_tile_width + mask_width - 1 #define o_tile_width 2 __global__ void gpu_conv2d(float *d_out, float *d_in, float *d_filter, int height, int width){ __shared__ float sh_din[block_size][block_size]; int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y*o_tile_width + ty; int col_o = blockIdx.x * o_tile_width + tx; int diff = mask_width - 1; int row_i = row_o - diff; int col_i = col_o - diff; if ((row_i >= 0 && row_i < height) && (col_i >=0 && col_i < width )){ sh_din[ty][tx] = d_in[row_i*width + col_i]; } else{ sh_din[ty][tx] = 0.0; } __syncthreads(); float output = 0.0; if (tx < o_tile_width && ty < o_tile_width){ for (int i=0; i < mask_width; i++){ for (int j =0; j < mask_width; j++){ output += d_filter[i*mask_width + j] * sh_din[ty+i][tx+j]; } } } if (tx < o_tile_width && ty < o_tile_width){ d_out[row_o*width + col_o] = output; } } void init(float *arr, int h, int w, float val){ for (int r=0; r < h; r++){ for (int c=0; c < w; c++){ arr[r*w + c] = val; } } } void host_conv2d(float *h_out, float *h_in, float *h_filter, int height, int width){ for (int r=0; r<height; r++){ for (int c =0; c < width; c++){ float output = 0.0; int r_i = r - mask_width + 1; int c_i = c - mask_width + 1; //printf("r_i: %d , c_i: %d\n", r_i, c_i); for (int mr=0; mr<mask_width; mr++){ for (int mc=0; mc< mask_width; mc++){ if ( ((r_i+mr) >= 0 && (r_i + mr) < height) && ((c_i+mc) >=0 && (c_i+mc) < width) ) output+= h_in[(mr + r_i)*width + (c_i+mc)] * h_filter[mr*mask_width + mc]; } } h_out[r*width + c] = output; } } } int main(){ float *d_in, *d_filter, *d_out; float *h_in, *h_filter, *h_out; // Only for checking. Not needed for functionality int height = 6; int width = 6; size_t size_in = height*width*sizeof(float); size_t size_filter = mask_width*mask_width*sizeof(float); size_t size_out = height*width*sizeof(float); h_in = (float*) malloc (size_in); h_filter = (float*) malloc (size_filter); h_out = (float*) malloc (size_out); hipMallocManaged(&d_in, size_in); hipMallocManaged(&d_filter, size_filter); hipMallocManaged(&d_out, size_out); init(d_in, height, width, 1.0); init(d_filter, mask_width, mask_width, 1.0); init(d_out, height, width, 0.0); dim3 num_threads (block_size, block_size); dim3 num_blocks ((height-1)/(o_tile_width) + 1, (width-1)/(o_tile_width) + 1) ; gpu_conv2d<<<num_blocks, num_threads>>>(d_out, d_in, d_filter, height, width); hipDeviceSynchronize(); init(h_in, height, width, 1.0); init(h_filter, mask_width, mask_width, 1.0); init(h_out, height, width, 0.0); host_conv2d(h_out, h_in, h_filter, height, width); for(int i=0; i<height; i++){ for (int j=0; j<width; j++){ if (d_out[i*width +j] != h_out[i*width +j]){ printf(" h_out[%d][%d]: %f", i, j, h_out[i*width + j]); printf(" d_out[%d][%d]: %f", i, j, d_out[i*width + j]); return 0; } } } /* for (int i =0; i<dout_size; i++) if (d_out[i] != h_out[i]){ printf("Program failed!! Check the idx: %d", i); return 0; } */ printf("Success!!\n"); }
.text .file "conv2d.hip" .globl _Z25__device_stub__gpu_conv2dPfS_S_ii # -- Begin function _Z25__device_stub__gpu_conv2dPfS_S_ii .p2align 4, 0x90 .type _Z25__device_stub__gpu_conv2dPfS_S_ii,@function _Z25__device_stub__gpu_conv2dPfS_S_ii: # @_Z25__device_stub__gpu_conv2dPfS_S_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z10gpu_conv2dPfS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z25__device_stub__gpu_conv2dPfS_S_ii, .Lfunc_end0-_Z25__device_stub__gpu_conv2dPfS_S_ii .cfi_endproc # -- End function .globl _Z4initPfiif # -- Begin function _Z4initPfiif .p2align 4, 0x90 .type _Z4initPfiif,@function _Z4initPfiif: # @_Z4initPfiif .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB1_6 # %bb.1: # %.preheader.lr.ph movl %esi, %eax movl %edx, %ecx xorl %esi, %esi xorl %r8d, %r8d jmp .LBB1_2 .p2align 4, 0x90 .LBB1_5: # %._crit_edge # in Loop: Header=BB1_2 Depth=1 incq %r8 addl %edx, %esi cmpq %rax, %r8 je .LBB1_6 .LBB1_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_4 Depth 2 testl %edx, %edx jle .LBB1_5 # %bb.3: # %.lr.ph # in Loop: Header=BB1_2 Depth=1 movl %esi, %r9d leaq (%rdi,%r9,4), %r9 xorl %r10d, %r10d .p2align 4, 0x90 .LBB1_4: # Parent Loop BB1_2 Depth=1 # => This Inner Loop Header: Depth=2 movss %xmm0, (%r9,%r10,4) incq %r10 cmpq %r10, %rcx jne .LBB1_4 jmp .LBB1_5 .LBB1_6: # %._crit_edge14 retq .Lfunc_end1: .size _Z4initPfiif, .Lfunc_end1-_Z4initPfiif .cfi_endproc # -- End function .globl _Z11host_conv2dPfS_S_ii # -- Begin function _Z11host_conv2dPfS_S_ii .p2align 4, 0x90 .type _Z11host_conv2dPfS_S_ii,@function _Z11host_conv2dPfS_S_ii: # @_Z11host_conv2dPfS_S_ii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rsi, -40(%rsp) # 8-byte Spill movq %rdi, -16(%rsp) # 8-byte Spill testl %ecx, %ecx jle .LBB2_13 # %bb.1: # %.preheader45.lr.ph movq %rdx, %rax movslq %r8d, %rdx movl %ecx, %esi movq %rsi, -8(%rsp) # 8-byte Spill movl %edx, %r10d movq %rdx, -24(%rsp) # 8-byte Spill leaq (,%rdx,4), %r11 subq %r11, -40(%rsp) # 8-byte Folded Spill xorl %edx, %edx movq %rdx, -32(%rsp) # 8-byte Spill jmp .LBB2_2 .p2align 4, 0x90 .LBB2_12: # %._crit_edge # in Loop: Header=BB2_2 Depth=1 movq -32(%rsp), %rsi # 8-byte Reload incq %rsi addq %r11, -40(%rsp) # 8-byte Folded Spill movq %rsi, %rdx movq %rsi, -32(%rsp) # 8-byte Spill cmpq -8(%rsp), %rsi # 8-byte Folded Reload je .LBB2_13 .LBB2_2: # %.preheader45 # =>This Loop Header: Depth=1 # Child Loop BB2_4 Depth 2 # Child Loop BB2_5 Depth 3 # Child Loop BB2_6 Depth 4 testl %r8d, %r8d jle .LBB2_12 # %bb.3: # %.lr.ph # in Loop: Header=BB2_2 Depth=1 movq -32(%rsp), %rdx # 8-byte Reload leaq -1(%rdx), %r14 imulq -24(%rsp), %rdx # 8-byte Folded Reload movq -16(%rsp), %rsi # 8-byte Reload leaq (%rsi,%rdx,4), %r15 movq -40(%rsp), %r12 # 8-byte Reload xorl %r13d, %r13d jmp .LBB2_4 .p2align 4, 0x90 .LBB2_11: # in Loop: Header=BB2_4 Depth=2 movss %xmm0, (%r15,%r13,4) incq %r13 addq $4, %r12 cmpq %r10, %r13 je .LBB2_12 .LBB2_4: # Parent Loop BB2_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB2_5 Depth 3 # Child Loop BB2_6 Depth 4 xorps %xmm0, %xmm0 movq %r12, %rbp movq %rax, %rdx xorl %edi, %edi jmp .LBB2_5 .p2align 4, 0x90 .LBB2_10: # in Loop: Header=BB2_5 Depth=3 leaq 1(%rdi), %r9 addq $8, %rdx addq %r11, %rbp testq %rdi, %rdi movq %r9, %rdi jne .LBB2_11 .LBB2_5: # %.preheader # Parent Loop BB2_2 Depth=1 # Parent Loop BB2_4 Depth=2 # => This Loop Header: Depth=3 # Child Loop BB2_6 Depth 4 leal (%r14,%rdi), %r9d movq $-1, %rbx jmp .LBB2_6 .p2align 4, 0x90 .LBB2_9: # in Loop: Header=BB2_6 Depth=4 incq %rbx jne .LBB2_10 .LBB2_6: # Parent Loop BB2_2 Depth=1 # Parent Loop BB2_4 Depth=2 # Parent Loop BB2_5 Depth=3 # => This Inner Loop Header: Depth=4 cmpl %ecx, %r9d jae .LBB2_9 # %bb.7: # in Loop: Header=BB2_6 Depth=4 leal (%rbx,%r13), %esi cmpl %r8d, %esi jae .LBB2_9 # %bb.8: # in Loop: Header=BB2_6 Depth=4 movss (%rbp,%rbx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss 4(%rdx,%rbx,4), %xmm1 addss %xmm1, %xmm0 jmp .LBB2_9 .LBB2_13: # %._crit_edge52 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z11host_conv2dPfS_S_ii, .Lfunc_end2-_Z11host_conv2dPfS_S_ii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $144, %edi callq malloc movq %rax, %r14 movq %rax, %r12 movl $16, %edi callq malloc movq %rax, %r15 movl $144, %edi callq malloc movq %rax, %rbx leaq 32(%rsp), %rdi movl $144, %esi movl $1, %edx callq hipMallocManaged leaq 24(%rsp), %rdi movl $16, %esi movl $1, %edx callq hipMallocManaged leaq 8(%rsp), %rdi movl $144, %esi movl $1, %edx callq hipMallocManaged xorl %eax, %eax movq 32(%rsp), %rcx .p2align 4, 0x90 .LBB3_1: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB3_2 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB3_2: # Parent Loop BB3_1 Depth=1 # => This Inner Loop Header: Depth=2 movl $1065353216, (%rcx,%rdx,4) # imm = 0x3F800000 incq %rdx cmpq $6, %rdx jne .LBB3_2 # %bb.3: # %._crit_edge.i # in Loop: Header=BB3_1 Depth=1 incq %rax addq $24, %rcx cmpq $6, %rax jne .LBB3_1 # %bb.4: # %_Z4initPfiif.exit movq 24(%rsp), %rax xorl %ecx, %ecx .p2align 4, 0x90 .LBB3_5: # %.preheader.i70 # =>This Loop Header: Depth=1 # Child Loop BB3_6 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB3_6: # Parent Loop BB3_5 Depth=1 # => This Inner Loop Header: Depth=2 movl $1065353216, (%rax,%rdx,4) # imm = 0x3F800000 incq %rdx cmpq $2, %rdx jne .LBB3_6 # %bb.7: # %._crit_edge.i75 # in Loop: Header=BB3_5 Depth=1 incq %rcx addq $8, %rax cmpq $2, %rcx jne .LBB3_5 # %bb.8: # %_Z4initPfiif.exit78 movq 8(%rsp), %rax xorps %xmm0, %xmm0 movups %xmm0, 128(%rax) movups %xmm0, 112(%rax) movups %xmm0, 96(%rax) movups %xmm0, 80(%rax) movups %xmm0, 64(%rax) movups %xmm0, 48(%rax) movups %xmm0, 32(%rax) movups %xmm0, 16(%rax) movups %xmm0, (%rax) movabsq $12884901891, %rdi # imm = 0x300000003 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_10 # %bb.9: movq 8(%rsp), %rax movq 32(%rsp), %rcx movq 24(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) movl $6, 20(%rsp) movl $6, 16(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 20(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z10gpu_conv2dPfS_S_ii, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_10: callq hipDeviceSynchronize xorl %eax, %eax .p2align 4, 0x90 .LBB3_11: # %.preheader.i88 # =>This Loop Header: Depth=1 # Child Loop BB3_12 Depth 2 xorl %ecx, %ecx .p2align 4, 0x90 .LBB3_12: # Parent Loop BB3_11 Depth=1 # => This Inner Loop Header: Depth=2 movl $1065353216, (%r12,%rcx,4) # imm = 0x3F800000 incq %rcx cmpq $6, %rcx jne .LBB3_12 # %bb.13: # %._crit_edge.i93 # in Loop: Header=BB3_11 Depth=1 incq %rax addq $24, %r12 cmpq $6, %rax jne .LBB3_11 # %bb.14: # %.preheader.i97.preheader xorl %eax, %eax movq %r15, %rcx .p2align 4, 0x90 .LBB3_15: # %.preheader.i97 # =>This Loop Header: Depth=1 # Child Loop BB3_16 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB3_16: # Parent Loop BB3_15 Depth=1 # => This Inner Loop Header: Depth=2 movl $1065353216, (%rcx,%rdx,4) # imm = 0x3F800000 incq %rdx cmpq $2, %rdx jne .LBB3_16 # %bb.17: # %._crit_edge.i102 # in Loop: Header=BB3_15 Depth=1 incq %rax addq $8, %rcx cmpq $2, %rax jne .LBB3_15 # %bb.18: # %.preheader.i106.preheader xorps %xmm0, %xmm0 movups %xmm0, 128(%rbx) movups %xmm0, 112(%rbx) movups %xmm0, 96(%rbx) movups %xmm0, 80(%rbx) movups %xmm0, 64(%rbx) movups %xmm0, 48(%rbx) movups %xmm0, 32(%rbx) movups %xmm0, 16(%rbx) movups %xmm0, (%rbx) addq $-28, %r14 xorl %eax, %eax jmp .LBB3_19 .p2align 4, 0x90 .LBB3_28: # %._crit_edge.i119 # in Loop: Header=BB3_19 Depth=1 incq %rax addq $24, %r14 cmpq $6, %rax je .LBB3_29 .LBB3_19: # %.preheader45.i # =>This Loop Header: Depth=1 # Child Loop BB3_20 Depth 2 # Child Loop BB3_21 Depth 3 # Child Loop BB3_22 Depth 4 leaq -1(%rax), %rcx leaq (%rax,%rax,2), %rdx leaq (%rbx,%rdx,8), %rdx movq %r14, %rsi xorl %edi, %edi jmp .LBB3_20 .p2align 4, 0x90 .LBB3_27: # in Loop: Header=BB3_20 Depth=2 movss %xmm0, (%rdx,%rdi,4) incq %rdi addq $4, %rsi cmpq $6, %rdi je .LBB3_28 .LBB3_20: # Parent Loop BB3_19 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB3_21 Depth 3 # Child Loop BB3_22 Depth 4 xorps %xmm0, %xmm0 movq %rsi, %r8 movq %r15, %r9 xorl %r10d, %r10d jmp .LBB3_21 .p2align 4, 0x90 .LBB3_26: # in Loop: Header=BB3_21 Depth=3 leaq 1(%r10), %r11 addq $8, %r9 addq $24, %r8 testq %r10, %r10 movq %r11, %r10 jne .LBB3_27 .LBB3_21: # %.preheader.i115 # Parent Loop BB3_19 Depth=1 # Parent Loop BB3_20 Depth=2 # => This Loop Header: Depth=3 # Child Loop BB3_22 Depth 4 leal (%rcx,%r10), %r11d movq $-1, %r12 jmp .LBB3_22 .p2align 4, 0x90 .LBB3_25: # in Loop: Header=BB3_22 Depth=4 incq %r12 jne .LBB3_26 .LBB3_22: # Parent Loop BB3_19 Depth=1 # Parent Loop BB3_20 Depth=2 # Parent Loop BB3_21 Depth=3 # => This Inner Loop Header: Depth=4 cmpl $5, %r11d ja .LBB3_25 # %bb.23: # in Loop: Header=BB3_22 Depth=4 leal (%rdi,%r12), %ebp cmpl $5, %ebp ja .LBB3_25 # %bb.24: # in Loop: Header=BB3_22 Depth=4 movss 4(%r8,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss 4(%r9,%r12,4), %xmm1 addss %xmm1, %xmm0 jmp .LBB3_25 .LBB3_29: # %_Z11host_conv2dPfS_S_ii.exit.preheader movq 8(%rsp), %rax xorl %ecx, %ecx xorl %r14d, %r14d xorl %r12d, %r12d .LBB3_30: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB3_31 Depth 2 movq %rcx, %r13 xorl %r15d, %r15d .p2align 4, 0x90 .LBB3_31: # Parent Loop BB3_30 Depth=1 # => This Inner Loop Header: Depth=2 movss (%rax,%r13), %xmm1 # xmm1 = mem[0],zero,zero,zero movss (%rbx,%r13), %xmm0 # xmm0 = mem[0],zero,zero,zero ucomiss %xmm0, %xmm1 jne .LBB3_32 jp .LBB3_32 # %bb.33: # in Loop: Header=BB3_31 Depth=2 decq %r15 addq $4, %r13 cmpq $-6, %r15 jne .LBB3_31 # %bb.34: # %.critedge # in Loop: Header=BB3_30 Depth=1 cmpq $5, %r14 leaq 1(%r14), %rdx setae %r12b addq $24, %rcx movq %rdx, %r14 cmpq $6, %rdx jne .LBB3_30 jmp .LBB3_35 .LBB3_32: negl %r15d cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movl %r14d, %esi movl %r15d, %edx movb $1, %al callq printf movq 8(%rsp), %rax movss (%rax,%r13), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.1, %edi movl %r14d, %esi movl %r15d, %edx movb $1, %al callq printf testb $1, %r12b je .LBB3_36 .LBB3_35: # %.critedge69 movl $.Lstr, %edi callq puts@PLT .LBB3_36: xorl %eax, %eax addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10gpu_conv2dPfS_S_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z10gpu_conv2dPfS_S_ii,@object # @_Z10gpu_conv2dPfS_S_ii .section .rodata,"a",@progbits .globl _Z10gpu_conv2dPfS_S_ii .p2align 3, 0x0 _Z10gpu_conv2dPfS_S_ii: .quad _Z25__device_stub__gpu_conv2dPfS_S_ii .size _Z10gpu_conv2dPfS_S_ii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz " h_out[%d][%d]: %f" .size .L.str, 19 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz " d_out[%d][%d]: %f" .size .L.str.1, 19 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z10gpu_conv2dPfS_S_ii" .size .L__unnamed_1, 23 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Success!!" .size .Lstr, 10 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__gpu_conv2dPfS_S_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10gpu_conv2dPfS_S_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z10gpu_conv2dPfS_S_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */ /* 0x000e220000002100 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*0030*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */ /* 0x000e280000002500 */ /*0040*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */ /* 0x000e680000002600 */ /*0050*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */ /* 0x000e620000002200 */ /*0060*/ IMAD R5, R5, 0x2, R4 ; /* 0x0000000205057824 */ /* 0x001fca00078e0204 */ /*0070*/ ISETP.GT.AND P1, PT, R5.reuse, c[0x0][0x17c], PT ; /* 0x00005f0005007a0c */ /* 0x040fe40003f24270 */ /*0080*/ LEA R0, R0, R7, 0x1 ; /* 0x0000000700007211 */ /* 0x002fe400078e08ff */ /*0090*/ ISETP.GT.AND P1, PT, R5, RZ, !P1 ; /* 0x000000ff0500720c */ /* 0x000fe40004f24270 */ /*00a0*/ ISETP.GT.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */ /* 0x000fc80003f04270 */ /*00b0*/ ISETP.GT.AND P0, PT, R0, RZ, !P0 ; /* 0x000000ff0000720c */ /* 0x000fc80004704270 */ /*00c0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000703070 */ /*00d0*/ @P0 IADD3 R2, R0, -0x1, RZ ; /* 0xffffffff00020810 */ /* 0x000fe40007ffe0ff */ /*00e0*/ @P0 MOV R3, 0x4 ; /* 0x0000000400030802 */ /* 0x000fc60000000f00 */ /*00f0*/ @P0 IMAD R2, R2, c[0x0][0x17c], R5 ; /* 0x00005f0002020a24 */ /* 0x000fca00078e0205 */ /*0100*/ @P0 IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02020810 */ /* 0x000fca0007ffe0ff */ /*0110*/ @P0 IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002020625 */ /* 0x000fcc00078e0203 */ /*0120*/ @P0 LDG.E R2, [R2.64] ; /* 0x0000000402020981 */ /* 0x000ea2000c1e1900 */ /*0130*/ IMAD R15, R7.reuse, 0x3, R4 ; /* 0x00000003070f7824 */ /* 0x040fe200078e0204 */ /*0140*/ ISETP.GT.AND P1, PT, R7, 0x1, PT ; /* 0x000000010700780c */ /* 0x000fc80003f24270 */ /*0150*/ @!P0 STS [R15.X4], RZ ; /* 0x000000ff0f008388 */ /* 0x0001e20000004800 */ /*0160*/ ISETP.GT.OR P1, PT, R4, 0x1, P1 ; /* 0x000000010400780c */ /* 0x000fc60000f24670 */ /*0170*/ @P0 STS [R15.X4], R2 ; /* 0x000000020f000388 */ /* 0x0041e80000004800 */ /*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0190*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*01a0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff027624 */ /* 0x001fe200078e00ff */ /*01b0*/ MOV R3, c[0x0][0x174] ; /* 0x00005d0000037a02 */ /* 0x000fe20000000f00 */ /*01c0*/ LDS R7, [R15.X4] ; /* 0x000000000f077984 */ /* 0x000e280000004800 */ /*01d0*/ LDG.E R4, [R2.64] ; /* 0x0000000402047981 */ /* 0x000e28000c1e1900 */ /*01e0*/ LDG.E R6, [R2.64+0x4] ; /* 0x0000040402067981 */ /* 0x000ea8000c1e1900 */ /*01f0*/ LDG.E R8, [R2.64+0x8] ; /* 0x0000080402087981 */ /* 0x000ee8000c1e1900 */ /*0200*/ LDG.E R10, [R2.64+0xc] ; /* 0x00000c04020a7981 */ /* 0x000f22000c1e1900 */ /*0210*/ IMAD R5, R0, c[0x0][0x17c], R5 ; /* 0x00005f0000057a24 */ /* 0x000fc600078e0205 */ /*0220*/ LDS R9, [R15.X4+0x4] ; /* 0x000004000f097984 */ /* 0x000ea80000004800 */ /*0230*/ LDS R11, [R15.X4+0xc] ; /* 0x00000c000f0b7984 */ /* 0x000ee80000004800 */ /*0240*/ LDS R13, [R15.X4+0x10] ; /* 0x000010000f0d7984 */ /* 0x000f220000004800 */ /*0250*/ FFMA R4, R4, R7, RZ ; /* 0x0000000704047223 */ /* 0x001fc800000000ff */ /*0260*/ FFMA R4, R9, R6, R4 ; /* 0x0000000609047223 */ /* 0x004fe40000000004 */ /*0270*/ IMAD.MOV.U32 R6, RZ, RZ, 0x4 ; /* 0x00000004ff067424 */ /* 0x000fe400078e00ff */ /*0280*/ FFMA R8, R11, R8, R4 ; /* 0x000000080b087223 */ /* 0x008fe40000000004 */ /*0290*/ IMAD.WIDE R4, R5, R6, c[0x0][0x160] ; /* 0x0000580005047625 */ /* 0x000fc800078e0206 */ /*02a0*/ FFMA R13, R13, R10, R8 ; /* 0x0000000a0d0d7223 */ /* 0x010fca0000000008 */ /*02b0*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */ /* 0x000fe2000c101904 */ /*02c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*02d0*/ BRA 0x2d0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0300*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0310*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0320*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10gpu_conv2dPfS_S_ii .globl _Z10gpu_conv2dPfS_S_ii .p2align 8 .type _Z10gpu_conv2dPfS_S_ii,@function _Z10gpu_conv2dPfS_S_ii: s_load_b32 s6, s[0:1], 0x1c v_bfe_u32 v3, v0, 10, 10 v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v4, 0x3ff, v0 v_mov_b32_e32 v5, 0 s_mov_b32 s4, exec_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshl_add_u32 v2, s15, 1, v3 v_lshl_add_u32 v0, s14, 1, v4 s_delay_alu instid0(VALU_DEP_2) v_cmpx_lt_i32_e32 0, v2 s_cbranch_execz .LBB0_4 s_load_b32 s2, s[0:1], 0x18 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1) v_cmp_lt_i32_e64 s3, 0, v0 v_mov_b32_e32 v5, 0 s_waitcnt lgkmcnt(0) v_cmp_ge_i32_e32 vcc_lo, s2, v2 v_cmp_ge_i32_e64 s2, s6, v0 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s3, s2, s3 s_and_saveexec_b32 s2, s3 s_cbranch_execz .LBB0_3 v_add_nc_u32_e32 v5, -1, v2 s_load_b64 s[8:9], s[0:1], 0x8 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v5, v5, s6 v_add3_u32 v5, v0, v5, -1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v6, 31, v5 v_lshlrev_b64 v[5:6], 2, v[5:6] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v5, vcc_lo, s8, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s9, v6, vcc_lo global_load_b32 v5, v[5:6], off .LBB0_3: s_or_b32 exec_lo, exec_lo, s2 .LBB0_4: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_2) s_or_b32 exec_lo, exec_lo, s4 v_lshlrev_b32_e32 v6, 2, v4 v_or_b32_e32 v4, v4, v3 v_mad_u32_u24 v3, v3, 12, v6 s_delay_alu instid0(VALU_DEP_2) v_cmp_gt_u32_e32 vcc_lo, 2, v4 s_waitcnt vmcnt(0) ds_store_b32 v3, v5 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_and_saveexec_b32 s7, vcc_lo s_cbranch_execz .LBB0_10 s_load_b64 s[2:3], s[0:1], 0x10 v_mov_b32_e32 v1, 0 s_mov_b32 s8, 0 .p2align 6 .LBB0_6: s_waitcnt lgkmcnt(0) s_mov_b64 s[4:5], s[2:3] s_mov_b32 s9, 0 .LBB0_7: s_delay_alu instid0(SALU_CYCLE_1) v_add_nc_u32_e32 v4, s9, v3 s_load_b32 s10, s[4:5], 0x0 s_add_i32 s9, s9, 4 s_add_u32 s4, s4, 4 s_addc_u32 s5, s5, 0 ds_load_b32 v4, v4 s_cmp_lg_u32 s9, 4 s_waitcnt lgkmcnt(0) v_fmac_f32_e32 v1, s10, v4 s_cbranch_scc0 .LBB0_7 s_add_i32 s4, s8, 1 v_add_nc_u32_e32 v3, 12, v3 s_add_u32 s2, s2, 8 s_addc_u32 s3, s3, 0 s_cmp_lg_u32 s8, 0 s_cbranch_scc1 .LBB0_10 s_mov_b32 s8, s4 s_branch .LBB0_6 .LBB0_10: s_or_b32 exec_lo, exec_lo, s7 s_and_saveexec_b32 s2, vcc_lo s_cbranch_execz .LBB0_12 s_load_b64 s[0:1], s[0:1], 0x0 v_mad_u64_u32 v[3:4], null, v2, s6, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v4, 31, v3 v_lshlrev_b64 v[2:3], 2, v[3:4] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s0, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo global_store_b32 v[2:3], v1, off .LBB0_12: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10gpu_conv2dPfS_S_ii .amdhsa_group_segment_fixed_size 36 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 32 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10gpu_conv2dPfS_S_ii, .Lfunc_end0-_Z10gpu_conv2dPfS_S_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value .group_segment_fixed_size: 36 .kernarg_segment_align: 8 .kernarg_segment_size: 32 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10gpu_conv2dPfS_S_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10gpu_conv2dPfS_S_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00022e97_00000000-6_conv2d.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z4initPfiif .type _Z4initPfiif, @function _Z4initPfiif: .LFB2057: .cfi_startproc endbr64 testl %esi, %esi jle .L3 movl $0, %r9d movl $0, %r8d movslq %edx, %r10 jmp .L5 .L7: movslq %r9d, %rcx leaq (%rdi,%rcx,4), %rax addq %r10, %rcx leaq (%rdi,%rcx,4), %rcx .L6: movss %xmm0, (%rax) addq $4, %rax cmpq %rcx, %rax jne .L6 .L8: addl $1, %r8d addl %edx, %r9d cmpl %r8d, %esi je .L3 .L5: testl %edx, %edx jg .L7 jmp .L8 .L3: ret .cfi_endproc .LFE2057: .size _Z4initPfiif, .-_Z4initPfiif .globl _Z11host_conv2dPfS_S_ii .type _Z11host_conv2dPfS_S_ii, @function _Z11host_conv2dPfS_S_ii: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 movq %rsi, -16(%rsp) testl %ecx, %ecx jle .L10 movq %rdx, %r15 movl %ecx, %r11d movl %r8d, %r12d negl %r12d leal -1(%rcx), %edx movl $-1, %eax movslq %r8d, %r14 movl %edx, %ecx movq %rdi, %rsi movq %r15, -8(%rsp) jmp .L12 .L24: movl %eax, %esi orl %edi, %esi js .L14 cmpl %eax, %r8d jle .L14 leal (%rax,%rbp), %esi movslq %esi, %rsi movq -16(%rsp), %rcx movss (%rcx,%rsi,4), %xmm1 movslq %edx, %rsi movq -8(%rsp), %rcx mulss (%rcx,%rsi,4), %xmm1 addss %xmm1, %xmm0 jmp .L14 .L16: movl %r15d, %eax movq -24(%rsp), %rsi movss %xmm0, -4(%rsi,%r9,4) leaq 1(%r9), %rsi addl $1, %ebx cmpq %r14, %r9 je .L23 movq %rsi, %r9 .L17: movl %eax, %edi movl %r12d, %ebp movl $0, %r10d pxor %xmm0, %xmm0 movl %eax, %r15d .L13: movl %ebx, %eax movl %r10d, %edx movq %rcx, -32(%rsp) .L15: cmpl %edi, %r11d jg .L24 .L14: addl $1, %edx addl $1, %eax cmpl %r9d, %eax jne .L15 movq -32(%rsp), %rcx addl %r8d, %ebp addl $2, %r10d addl $1, %edi cmpl $4, %r10d jne .L13 jmp .L16 .L23: movq %rcx, %rsi movl %r13d, %ecx .L19: addl $1, %eax addl %r8d, %r12d cmpl %ecx, %eax je .L10 .L12: testl %r8d, %r8d jle .L19 leal (%r12,%r8), %edx movslq %edx, %rdx leaq (%rsi,%rdx,4), %rdx movl $-1, %ebx movl $1, %r9d movq %rdx, -24(%rsp) movl %ecx, %r13d movq %rsi, %rcx jmp .L17 .L10: popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z11host_conv2dPfS_S_ii, .-_Z11host_conv2dPfS_S_ii .globl _Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii .type _Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii, @function _Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii: .LFB2084: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L29 .L25: movq 136(%rsp), %rax subq %fs:40, %rax jne .L30 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z10gpu_conv2dPfS_S_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L25 .L30: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii, .-_Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii .globl _Z10gpu_conv2dPfS_S_ii .type _Z10gpu_conv2dPfS_S_ii, @function _Z10gpu_conv2dPfS_S_ii: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z10gpu_conv2dPfS_S_ii, .-_Z10gpu_conv2dPfS_S_ii .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string " h_out[%d][%d]: %f" .LC3: .string " d_out[%d][%d]: %f" .LC4: .string "Success!!\n" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $72, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $144, %edi call malloc@PLT movq %rax, %rbx movl $16, %edi call malloc@PLT movq %rax, %rbp movl $144, %edi call malloc@PLT movq %rax, %r13 leaq 8(%rsp), %rdi movl $1, %edx movl $144, %esi call cudaMallocManaged@PLT leaq 16(%rsp), %rdi movl $1, %edx movl $16, %esi call cudaMallocManaged@PLT leaq 24(%rsp), %rdi movl $1, %edx movl $144, %esi call cudaMallocManaged@PLT movss .LC1(%rip), %xmm0 movl $6, %edx movl $6, %esi movq 8(%rsp), %rdi call _Z4initPfiif movss .LC1(%rip), %xmm0 movl $2, %edx movl $2, %esi movq 16(%rsp), %rdi call _Z4initPfiif pxor %xmm0, %xmm0 movl $6, %edx movl $6, %esi movq 24(%rsp), %rdi call _Z4initPfiif movl $3, 32(%rsp) movl $3, 36(%rsp) movl $1, 40(%rsp) movl $3, 44(%rsp) movl $3, 48(%rsp) movl $1, 52(%rsp) movl $0, %r9d movl $0, %r8d movq 32(%rsp), %rdx movl $1, %ecx movq 44(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L45 .L34: call cudaDeviceSynchronize@PLT movss .LC1(%rip), %xmm0 movl $6, %edx movl $6, %esi movq %rbx, %rdi call _Z4initPfiif movss .LC1(%rip), %xmm0 movl $2, %edx movl $2, %esi movq %rbp, %rdi call _Z4initPfiif pxor %xmm0, %xmm0 movl $6, %edx movl $6, %esi movq %r13, %rdi call _Z4initPfiif movl $6, %r8d movl $6, %ecx movq %rbp, %rdx movq %rbx, %rsi movq %r13, %rdi call _Z11host_conv2dPfS_S_ii movq 24(%rsp), %rax movl $0, %edx movl $0, %r12d jmp .L35 .L45: movl $6, %r8d movl $6, %ecx movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq 24(%rsp), %rdi call _Z36__device_stub__Z10gpu_conv2dPfS_S_iiPfS_S_ii jmp .L34 .L42: cvtss2sd %xmm0, %xmm0 movl %ebp, %ecx movl %r12d, %edx leaq .LC2(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 24(%rsp), %rax pxor %xmm0, %xmm0 cvtss2sd (%rax,%rbx), %xmm0 movl %ebp, %ecx movl %r12d, %edx leaq .LC3(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT .L38: movq 56(%rsp), %rax subq %fs:40, %rax jne .L46 movl $0, %eax addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L47: .cfi_restore_state addl $1, %r12d addq $24, %rdx cmpl $6, %r12d je .L40 .L35: movq %rdx, %rbx movl $0, %ebp .L39: movss 0(%r13,%rbx), %xmm0 ucomiss (%rax,%rbx), %xmm0 jp .L42 jne .L42 addl $1, %ebp addq $4, %rbx cmpl $6, %ebp jne .L39 jmp .L47 .L40: leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L38 .L46: call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1 .LC5: .string "_Z10gpu_conv2dPfS_S_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z10gpu_conv2dPfS_S_ii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC1: .long 1065353216 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "conv2d.hip" .globl _Z25__device_stub__gpu_conv2dPfS_S_ii # -- Begin function _Z25__device_stub__gpu_conv2dPfS_S_ii .p2align 4, 0x90 .type _Z25__device_stub__gpu_conv2dPfS_S_ii,@function _Z25__device_stub__gpu_conv2dPfS_S_ii: # @_Z25__device_stub__gpu_conv2dPfS_S_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z10gpu_conv2dPfS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z25__device_stub__gpu_conv2dPfS_S_ii, .Lfunc_end0-_Z25__device_stub__gpu_conv2dPfS_S_ii .cfi_endproc # -- End function .globl _Z4initPfiif # -- Begin function _Z4initPfiif .p2align 4, 0x90 .type _Z4initPfiif,@function _Z4initPfiif: # @_Z4initPfiif .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB1_6 # %bb.1: # %.preheader.lr.ph movl %esi, %eax movl %edx, %ecx xorl %esi, %esi xorl %r8d, %r8d jmp .LBB1_2 .p2align 4, 0x90 .LBB1_5: # %._crit_edge # in Loop: Header=BB1_2 Depth=1 incq %r8 addl %edx, %esi cmpq %rax, %r8 je .LBB1_6 .LBB1_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_4 Depth 2 testl %edx, %edx jle .LBB1_5 # %bb.3: # %.lr.ph # in Loop: Header=BB1_2 Depth=1 movl %esi, %r9d leaq (%rdi,%r9,4), %r9 xorl %r10d, %r10d .p2align 4, 0x90 .LBB1_4: # Parent Loop BB1_2 Depth=1 # => This Inner Loop Header: Depth=2 movss %xmm0, (%r9,%r10,4) incq %r10 cmpq %r10, %rcx jne .LBB1_4 jmp .LBB1_5 .LBB1_6: # %._crit_edge14 retq .Lfunc_end1: .size _Z4initPfiif, .Lfunc_end1-_Z4initPfiif .cfi_endproc # -- End function .globl _Z11host_conv2dPfS_S_ii # -- Begin function _Z11host_conv2dPfS_S_ii .p2align 4, 0x90 .type _Z11host_conv2dPfS_S_ii,@function _Z11host_conv2dPfS_S_ii: # @_Z11host_conv2dPfS_S_ii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rsi, -40(%rsp) # 8-byte Spill movq %rdi, -16(%rsp) # 8-byte Spill testl %ecx, %ecx jle .LBB2_13 # %bb.1: # %.preheader45.lr.ph movq %rdx, %rax movslq %r8d, %rdx movl %ecx, %esi movq %rsi, -8(%rsp) # 8-byte Spill movl %edx, %r10d movq %rdx, -24(%rsp) # 8-byte Spill leaq (,%rdx,4), %r11 subq %r11, -40(%rsp) # 8-byte Folded Spill xorl %edx, %edx movq %rdx, -32(%rsp) # 8-byte Spill jmp .LBB2_2 .p2align 4, 0x90 .LBB2_12: # %._crit_edge # in Loop: Header=BB2_2 Depth=1 movq -32(%rsp), %rsi # 8-byte Reload incq %rsi addq %r11, -40(%rsp) # 8-byte Folded Spill movq %rsi, %rdx movq %rsi, -32(%rsp) # 8-byte Spill cmpq -8(%rsp), %rsi # 8-byte Folded Reload je .LBB2_13 .LBB2_2: # %.preheader45 # =>This Loop Header: Depth=1 # Child Loop BB2_4 Depth 2 # Child Loop BB2_5 Depth 3 # Child Loop BB2_6 Depth 4 testl %r8d, %r8d jle .LBB2_12 # %bb.3: # %.lr.ph # in Loop: Header=BB2_2 Depth=1 movq -32(%rsp), %rdx # 8-byte Reload leaq -1(%rdx), %r14 imulq -24(%rsp), %rdx # 8-byte Folded Reload movq -16(%rsp), %rsi # 8-byte Reload leaq (%rsi,%rdx,4), %r15 movq -40(%rsp), %r12 # 8-byte Reload xorl %r13d, %r13d jmp .LBB2_4 .p2align 4, 0x90 .LBB2_11: # in Loop: Header=BB2_4 Depth=2 movss %xmm0, (%r15,%r13,4) incq %r13 addq $4, %r12 cmpq %r10, %r13 je .LBB2_12 .LBB2_4: # Parent Loop BB2_2 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB2_5 Depth 3 # Child Loop BB2_6 Depth 4 xorps %xmm0, %xmm0 movq %r12, %rbp movq %rax, %rdx xorl %edi, %edi jmp .LBB2_5 .p2align 4, 0x90 .LBB2_10: # in Loop: Header=BB2_5 Depth=3 leaq 1(%rdi), %r9 addq $8, %rdx addq %r11, %rbp testq %rdi, %rdi movq %r9, %rdi jne .LBB2_11 .LBB2_5: # %.preheader # Parent Loop BB2_2 Depth=1 # Parent Loop BB2_4 Depth=2 # => This Loop Header: Depth=3 # Child Loop BB2_6 Depth 4 leal (%r14,%rdi), %r9d movq $-1, %rbx jmp .LBB2_6 .p2align 4, 0x90 .LBB2_9: # in Loop: Header=BB2_6 Depth=4 incq %rbx jne .LBB2_10 .LBB2_6: # Parent Loop BB2_2 Depth=1 # Parent Loop BB2_4 Depth=2 # Parent Loop BB2_5 Depth=3 # => This Inner Loop Header: Depth=4 cmpl %ecx, %r9d jae .LBB2_9 # %bb.7: # in Loop: Header=BB2_6 Depth=4 leal (%rbx,%r13), %esi cmpl %r8d, %esi jae .LBB2_9 # %bb.8: # in Loop: Header=BB2_6 Depth=4 movss (%rbp,%rbx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss 4(%rdx,%rbx,4), %xmm1 addss %xmm1, %xmm0 jmp .LBB2_9 .LBB2_13: # %._crit_edge52 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _Z11host_conv2dPfS_S_ii, .Lfunc_end2-_Z11host_conv2dPfS_S_ii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $152, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $144, %edi callq malloc movq %rax, %r14 movq %rax, %r12 movl $16, %edi callq malloc movq %rax, %r15 movl $144, %edi callq malloc movq %rax, %rbx leaq 32(%rsp), %rdi movl $144, %esi movl $1, %edx callq hipMallocManaged leaq 24(%rsp), %rdi movl $16, %esi movl $1, %edx callq hipMallocManaged leaq 8(%rsp), %rdi movl $144, %esi movl $1, %edx callq hipMallocManaged xorl %eax, %eax movq 32(%rsp), %rcx .p2align 4, 0x90 .LBB3_1: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB3_2 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB3_2: # Parent Loop BB3_1 Depth=1 # => This Inner Loop Header: Depth=2 movl $1065353216, (%rcx,%rdx,4) # imm = 0x3F800000 incq %rdx cmpq $6, %rdx jne .LBB3_2 # %bb.3: # %._crit_edge.i # in Loop: Header=BB3_1 Depth=1 incq %rax addq $24, %rcx cmpq $6, %rax jne .LBB3_1 # %bb.4: # %_Z4initPfiif.exit movq 24(%rsp), %rax xorl %ecx, %ecx .p2align 4, 0x90 .LBB3_5: # %.preheader.i70 # =>This Loop Header: Depth=1 # Child Loop BB3_6 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB3_6: # Parent Loop BB3_5 Depth=1 # => This Inner Loop Header: Depth=2 movl $1065353216, (%rax,%rdx,4) # imm = 0x3F800000 incq %rdx cmpq $2, %rdx jne .LBB3_6 # %bb.7: # %._crit_edge.i75 # in Loop: Header=BB3_5 Depth=1 incq %rcx addq $8, %rax cmpq $2, %rcx jne .LBB3_5 # %bb.8: # %_Z4initPfiif.exit78 movq 8(%rsp), %rax xorps %xmm0, %xmm0 movups %xmm0, 128(%rax) movups %xmm0, 112(%rax) movups %xmm0, 96(%rax) movups %xmm0, 80(%rax) movups %xmm0, 64(%rax) movups %xmm0, 48(%rax) movups %xmm0, 32(%rax) movups %xmm0, 16(%rax) movups %xmm0, (%rax) movabsq $12884901891, %rdi # imm = 0x300000003 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_10 # %bb.9: movq 8(%rsp), %rax movq 32(%rsp), %rcx movq 24(%rsp), %rdx movq %rax, 104(%rsp) movq %rcx, 96(%rsp) movq %rdx, 88(%rsp) movl $6, 20(%rsp) movl $6, 16(%rsp) leaq 104(%rsp), %rax movq %rax, 112(%rsp) leaq 96(%rsp), %rax movq %rax, 120(%rsp) leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 20(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 112(%rsp), %r9 movl $_Z10gpu_conv2dPfS_S_ii, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_10: callq hipDeviceSynchronize xorl %eax, %eax .p2align 4, 0x90 .LBB3_11: # %.preheader.i88 # =>This Loop Header: Depth=1 # Child Loop BB3_12 Depth 2 xorl %ecx, %ecx .p2align 4, 0x90 .LBB3_12: # Parent Loop BB3_11 Depth=1 # => This Inner Loop Header: Depth=2 movl $1065353216, (%r12,%rcx,4) # imm = 0x3F800000 incq %rcx cmpq $6, %rcx jne .LBB3_12 # %bb.13: # %._crit_edge.i93 # in Loop: Header=BB3_11 Depth=1 incq %rax addq $24, %r12 cmpq $6, %rax jne .LBB3_11 # %bb.14: # %.preheader.i97.preheader xorl %eax, %eax movq %r15, %rcx .p2align 4, 0x90 .LBB3_15: # %.preheader.i97 # =>This Loop Header: Depth=1 # Child Loop BB3_16 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB3_16: # Parent Loop BB3_15 Depth=1 # => This Inner Loop Header: Depth=2 movl $1065353216, (%rcx,%rdx,4) # imm = 0x3F800000 incq %rdx cmpq $2, %rdx jne .LBB3_16 # %bb.17: # %._crit_edge.i102 # in Loop: Header=BB3_15 Depth=1 incq %rax addq $8, %rcx cmpq $2, %rax jne .LBB3_15 # %bb.18: # %.preheader.i106.preheader xorps %xmm0, %xmm0 movups %xmm0, 128(%rbx) movups %xmm0, 112(%rbx) movups %xmm0, 96(%rbx) movups %xmm0, 80(%rbx) movups %xmm0, 64(%rbx) movups %xmm0, 48(%rbx) movups %xmm0, 32(%rbx) movups %xmm0, 16(%rbx) movups %xmm0, (%rbx) addq $-28, %r14 xorl %eax, %eax jmp .LBB3_19 .p2align 4, 0x90 .LBB3_28: # %._crit_edge.i119 # in Loop: Header=BB3_19 Depth=1 incq %rax addq $24, %r14 cmpq $6, %rax je .LBB3_29 .LBB3_19: # %.preheader45.i # =>This Loop Header: Depth=1 # Child Loop BB3_20 Depth 2 # Child Loop BB3_21 Depth 3 # Child Loop BB3_22 Depth 4 leaq -1(%rax), %rcx leaq (%rax,%rax,2), %rdx leaq (%rbx,%rdx,8), %rdx movq %r14, %rsi xorl %edi, %edi jmp .LBB3_20 .p2align 4, 0x90 .LBB3_27: # in Loop: Header=BB3_20 Depth=2 movss %xmm0, (%rdx,%rdi,4) incq %rdi addq $4, %rsi cmpq $6, %rdi je .LBB3_28 .LBB3_20: # Parent Loop BB3_19 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB3_21 Depth 3 # Child Loop BB3_22 Depth 4 xorps %xmm0, %xmm0 movq %rsi, %r8 movq %r15, %r9 xorl %r10d, %r10d jmp .LBB3_21 .p2align 4, 0x90 .LBB3_26: # in Loop: Header=BB3_21 Depth=3 leaq 1(%r10), %r11 addq $8, %r9 addq $24, %r8 testq %r10, %r10 movq %r11, %r10 jne .LBB3_27 .LBB3_21: # %.preheader.i115 # Parent Loop BB3_19 Depth=1 # Parent Loop BB3_20 Depth=2 # => This Loop Header: Depth=3 # Child Loop BB3_22 Depth 4 leal (%rcx,%r10), %r11d movq $-1, %r12 jmp .LBB3_22 .p2align 4, 0x90 .LBB3_25: # in Loop: Header=BB3_22 Depth=4 incq %r12 jne .LBB3_26 .LBB3_22: # Parent Loop BB3_19 Depth=1 # Parent Loop BB3_20 Depth=2 # Parent Loop BB3_21 Depth=3 # => This Inner Loop Header: Depth=4 cmpl $5, %r11d ja .LBB3_25 # %bb.23: # in Loop: Header=BB3_22 Depth=4 leal (%rdi,%r12), %ebp cmpl $5, %ebp ja .LBB3_25 # %bb.24: # in Loop: Header=BB3_22 Depth=4 movss 4(%r8,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss 4(%r9,%r12,4), %xmm1 addss %xmm1, %xmm0 jmp .LBB3_25 .LBB3_29: # %_Z11host_conv2dPfS_S_ii.exit.preheader movq 8(%rsp), %rax xorl %ecx, %ecx xorl %r14d, %r14d xorl %r12d, %r12d .LBB3_30: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB3_31 Depth 2 movq %rcx, %r13 xorl %r15d, %r15d .p2align 4, 0x90 .LBB3_31: # Parent Loop BB3_30 Depth=1 # => This Inner Loop Header: Depth=2 movss (%rax,%r13), %xmm1 # xmm1 = mem[0],zero,zero,zero movss (%rbx,%r13), %xmm0 # xmm0 = mem[0],zero,zero,zero ucomiss %xmm0, %xmm1 jne .LBB3_32 jp .LBB3_32 # %bb.33: # in Loop: Header=BB3_31 Depth=2 decq %r15 addq $4, %r13 cmpq $-6, %r15 jne .LBB3_31 # %bb.34: # %.critedge # in Loop: Header=BB3_30 Depth=1 cmpq $5, %r14 leaq 1(%r14), %rdx setae %r12b addq $24, %rcx movq %rdx, %r14 cmpq $6, %rdx jne .LBB3_30 jmp .LBB3_35 .LBB3_32: negl %r15d cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movl %r14d, %esi movl %r15d, %edx movb $1, %al callq printf movq 8(%rsp), %rax movss (%rax,%r13), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.1, %edi movl %r14d, %esi movl %r15d, %edx movb $1, %al callq printf testb $1, %r12b je .LBB3_36 .LBB3_35: # %.critedge69 movl $.Lstr, %edi callq puts@PLT .LBB3_36: xorl %eax, %eax addq $152, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB4_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB4_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10gpu_conv2dPfS_S_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end4: .size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB5_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB5_2: retq .Lfunc_end5: .size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor .cfi_endproc # -- End function .type _Z10gpu_conv2dPfS_S_ii,@object # @_Z10gpu_conv2dPfS_S_ii .section .rodata,"a",@progbits .globl _Z10gpu_conv2dPfS_S_ii .p2align 3, 0x0 _Z10gpu_conv2dPfS_S_ii: .quad _Z25__device_stub__gpu_conv2dPfS_S_ii .size _Z10gpu_conv2dPfS_S_ii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz " h_out[%d][%d]: %f" .size .L.str, 19 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz " d_out[%d][%d]: %f" .size .L.str.1, 19 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z10gpu_conv2dPfS_S_ii" .size .L__unnamed_1, 23 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Success!!" .size .Lstr, 10 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__gpu_conv2dPfS_S_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10gpu_conv2dPfS_S_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/* * EyLeftUpdater.cpp * * Created on: 01 февр. 2016 г. * Author: aleksandr */ #include "EyLeftUpdater.h" #include "SmartIndex.h" /* * indx должен пренадлежать участку от [0, sizeY-1] */ __device__ void EyLeftUpdater::operator() (const int indx) { int n = indx; Ey(0, n) = coeff[0]*(Ey(2, n) + EyLeft(0, 1, n)) + coeff[1] * (EyLeft(0, 0, n) + EyLeft(2, 0, n) - Ey(1, n) - EyLeft(1, 1, n)) + coeff[2] * EyLeft(1, 0, n) - EyLeft(2, 1, n); for (int m = 0; m < 3; m++) { EyLeft(m, 1, n) = EyLeft(m, 0, n); EyLeft(m, 0, n) = Ey(m, n); } }
code for sm_80 Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* * EyLeftUpdater.cpp * * Created on: 01 февр. 2016 г. * Author: aleksandr */ #include "EyLeftUpdater.h" #include "SmartIndex.h" /* * indx должен пренадлежать участку от [0, sizeY-1] */ __device__ void EyLeftUpdater::operator() (const int indx) { int n = indx; Ey(0, n) = coeff[0]*(Ey(2, n) + EyLeft(0, 1, n)) + coeff[1] * (EyLeft(0, 0, n) + EyLeft(2, 0, n) - Ey(1, n) - EyLeft(1, 1, n)) + coeff[2] * EyLeft(1, 0, n) - EyLeft(2, 1, n); for (int m = 0; m < 3; m++) { EyLeft(m, 1, n) = EyLeft(m, 0, n); EyLeft(m, 0, n) = Ey(m, n); } }
/* * EyLeftUpdater.cpp * * Created on: 01 февр. 2016 г. * Author: aleksandr */ #include <hip/hip_runtime.h> #include "EyLeftUpdater.h" #include "SmartIndex.h" /* * indx должен пренадлежать участку от [0, sizeY-1] */ __device__ void EyLeftUpdater::operator() (const int indx) { int n = indx; Ey(0, n) = coeff[0]*(Ey(2, n) + EyLeft(0, 1, n)) + coeff[1] * (EyLeft(0, 0, n) + EyLeft(2, 0, n) - Ey(1, n) - EyLeft(1, 1, n)) + coeff[2] * EyLeft(1, 0, n) - EyLeft(2, 1, n); for (int m = 0; m < 3; m++) { EyLeft(m, 1, n) = EyLeft(m, 0, n); EyLeft(m, 0, n) = Ey(m, n); } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* * EyLeftUpdater.cpp * * Created on: 01 февр. 2016 г. * Author: aleksandr */ #include <hip/hip_runtime.h> #include "EyLeftUpdater.h" #include "SmartIndex.h" /* * indx должен пренадлежать участку от [0, sizeY-1] */ __device__ void EyLeftUpdater::operator() (const int indx) { int n = indx; Ey(0, n) = coeff[0]*(Ey(2, n) + EyLeft(0, 1, n)) + coeff[1] * (EyLeft(0, 0, n) + EyLeft(2, 0, n) - Ey(1, n) - EyLeft(1, 1, n)) + coeff[2] * EyLeft(1, 0, n) - EyLeft(2, 1, n); for (int m = 0; m < 3; m++) { EyLeft(m, 1, n) = EyLeft(m, 0, n); EyLeft(m, 0, n) = Ey(m, n); } }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* * EyLeftUpdater.cpp * * Created on: 01 февр. 2016 г. * Author: aleksandr */ #include <hip/hip_runtime.h> #include "EyLeftUpdater.h" #include "SmartIndex.h" /* * indx должен пренадлежать участку от [0, sizeY-1] */ __device__ void EyLeftUpdater::operator() (const int indx) { int n = indx; Ey(0, n) = coeff[0]*(Ey(2, n) + EyLeft(0, 1, n)) + coeff[1] * (EyLeft(0, 0, n) + EyLeft(2, 0, n) - Ey(1, n) - EyLeft(1, 1, n)) + coeff[2] * EyLeft(1, 0, n) - EyLeft(2, 1, n); for (int m = 0; m < 3; m++) { EyLeft(m, 1, n) = EyLeft(m, 0, n); EyLeft(m, 0, n) = Ey(m, n); } }
.text .file "EyLeftUpdater.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) { __shared__ unsigned int ic[SNDVALS][SNDGRPS]; __shared__ double ss[SNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK); int tid = threadIdx.x + threadIdx.y * blockDim.x; if (threadIdx.y == 0) { ss[threadIdx.x] = strata[threadIdx.x]; } for (int i = istart; i < iend; i += SBIGBLK) { __syncthreads(); if (threadIdx.y < SNDGRPS) { ic[threadIdx.x][threadIdx.y] = 0; } __syncthreads(); for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) { double v = a[k]; int j = 0; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = j - SNDVALS + 1; atomicInc(&ic[j][threadIdx.y], 65536*32767); } __syncthreads(); if (threadIdx.y == 0) { bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3]; } bibase += SNDVALS; } }
.file "tmpxft_000fe9b5_00000000-6___stratifycounts.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z41__device_stub__Z16__stratifycountsPdiS_PjPdiS_Pj .type _Z41__device_stub__Z16__stratifycountsPdiS_PjPdiS_Pj, @function _Z41__device_stub__Z16__stratifycountsPdiS_PjPdiS_Pj: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z16__stratifycountsPdiS_Pj(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z41__device_stub__Z16__stratifycountsPdiS_PjPdiS_Pj, .-_Z41__device_stub__Z16__stratifycountsPdiS_PjPdiS_Pj .globl _Z16__stratifycountsPdiS_Pj .type _Z16__stratifycountsPdiS_Pj, @function _Z16__stratifycountsPdiS_Pj: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z41__device_stub__Z16__stratifycountsPdiS_PjPdiS_Pj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z16__stratifycountsPdiS_Pj, .-_Z16__stratifycountsPdiS_Pj .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z16__stratifycountsPdiS_Pj" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z16__stratifycountsPdiS_Pj(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) { __shared__ unsigned int ic[SNDVALS][SNDGRPS]; __shared__ double ss[SNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK); int tid = threadIdx.x + threadIdx.y * blockDim.x; if (threadIdx.y == 0) { ss[threadIdx.x] = strata[threadIdx.x]; } for (int i = istart; i < iend; i += SBIGBLK) { __syncthreads(); if (threadIdx.y < SNDGRPS) { ic[threadIdx.x][threadIdx.y] = 0; } __syncthreads(); for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) { double v = a[k]; int j = 0; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = j - SNDVALS + 1; atomicInc(&ic[j][threadIdx.y], 65536*32767); } __syncthreads(); if (threadIdx.y == 0) { bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3]; } bibase += SNDVALS; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) { __shared__ unsigned int ic[SNDVALS][SNDGRPS]; __shared__ double ss[SNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK); int tid = threadIdx.x + threadIdx.y * blockDim.x; if (threadIdx.y == 0) { ss[threadIdx.x] = strata[threadIdx.x]; } for (int i = istart; i < iend; i += SBIGBLK) { __syncthreads(); if (threadIdx.y < SNDGRPS) { ic[threadIdx.x][threadIdx.y] = 0; } __syncthreads(); for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) { double v = a[k]; int j = 0; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = j - SNDVALS + 1; atomicInc(&ic[j][threadIdx.y], 65536*32767); } __syncthreads(); if (threadIdx.y == 0) { bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3]; } bibase += SNDVALS; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) { __shared__ unsigned int ic[SNDVALS][SNDGRPS]; __shared__ double ss[SNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK); int tid = threadIdx.x + threadIdx.y * blockDim.x; if (threadIdx.y == 0) { ss[threadIdx.x] = strata[threadIdx.x]; } for (int i = istart; i < iend; i += SBIGBLK) { __syncthreads(); if (threadIdx.y < SNDGRPS) { ic[threadIdx.x][threadIdx.y] = 0; } __syncthreads(); for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) { double v = a[k]; int j = 0; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = j - SNDVALS + 1; atomicInc(&ic[j][threadIdx.y], 65536*32767); } __syncthreads(); if (threadIdx.y == 0) { bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3]; } bibase += SNDVALS; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z16__stratifycountsPdiS_Pj .globl _Z16__stratifycountsPdiS_Pj .p2align 8 .type _Z16__stratifycountsPdiS_Pj,@function _Z16__stratifycountsPdiS_Pj: s_clause 0x1 s_load_b32 s10, s[0:1], 0x8 s_load_b32 s6, s[0:1], 0x20 s_mov_b32 s7, 0 s_delay_alu instid0(SALU_CYCLE_1) s_mov_b32 s8, s7 s_waitcnt lgkmcnt(0) s_ashr_i32 s11, s10, 31 s_mul_hi_u32 s2, s10, s15 s_mul_i32 s3, s11, s15 s_mul_i32 s12, s10, s15 s_add_i32 s9, s2, s3 s_add_u32 s4, s0, 32 s_addc_u32 s5, s1, 0 s_cmp_lg_u64 s[8:9], 0 s_mov_b64 s[2:3], s[6:7] s_cbranch_scc0 .LBB0_19 v_cvt_f32_ubyte0_e32 v1, 0 v_cvt_f32_u32_e32 v2, s2 s_sub_u32 s13, 0, s2 s_subb_u32 s14, 0, s3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmamk_f32 v1, v1, 0x4f800000, v2 v_rcp_f32_e32 v1, v1 s_waitcnt_depctr 0xfff v_mul_f32_e32 v1, 0x5f7ffffc, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v2, 0x2f800000, v1 v_trunc_f32_e32 v2, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_fmamk_f32 v1, v2, 0xcf800000, v1 v_cvt_u32_f32_e32 v2, v2 v_cvt_u32_f32_e32 v1, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_readfirstlane_b32 s6, v2 v_readfirstlane_b32 s8, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) s_mul_i32 s16, s13, s6 s_mul_hi_u32 s18, s13, s8 s_mul_i32 s17, s14, s8 s_add_i32 s16, s18, s16 s_mul_i32 s19, s13, s8 s_add_i32 s16, s16, s17 s_mul_hi_u32 s18, s8, s19 s_mul_hi_u32 s20, s6, s19 s_mul_i32 s17, s6, s19 s_mul_hi_u32 s19, s8, s16 s_mul_i32 s8, s8, s16 s_mul_hi_u32 s21, s6, s16 s_add_u32 s8, s18, s8 s_addc_u32 s18, 0, s19 s_add_u32 s8, s8, s17 s_mul_i32 s16, s6, s16 s_addc_u32 s8, s18, s20 s_addc_u32 s17, s21, 0 s_add_u32 s8, s8, s16 s_addc_u32 s16, 0, s17 v_add_co_u32 v1, s8, v1, s8 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) s_cmp_lg_u32 s8, 0 s_addc_u32 s6, s6, s16 v_readfirstlane_b32 s8, v1 s_mul_i32 s16, s13, s6 s_delay_alu instid0(VALU_DEP_1) s_mul_hi_u32 s17, s13, s8 s_mul_i32 s14, s14, s8 s_add_i32 s16, s17, s16 s_mul_i32 s13, s13, s8 s_add_i32 s16, s16, s14 s_mul_hi_u32 s17, s6, s13 s_mul_i32 s18, s6, s13 s_mul_hi_u32 s13, s8, s13 s_mul_hi_u32 s19, s8, s16 s_mul_i32 s8, s8, s16 s_mul_hi_u32 s14, s6, s16 s_add_u32 s8, s13, s8 s_addc_u32 s13, 0, s19 s_add_u32 s8, s8, s18 s_mul_i32 s16, s6, s16 s_addc_u32 s8, s13, s17 s_addc_u32 s13, s14, 0 s_add_u32 s8, s8, s16 s_addc_u32 s13, 0, s13 v_add_co_u32 v1, s8, v1, s8 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) s_cmp_lg_u32 s8, 0 s_addc_u32 s6, s6, s13 s_ashr_i32 s16, s9, 31 v_readfirstlane_b32 s13, v1 s_add_u32 s8, s12, s16 s_mov_b32 s17, s16 s_addc_u32 s9, s9, s16 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_xor_b64 s[8:9], s[8:9], s[16:17] s_mul_i32 s18, s8, s6 s_mul_hi_u32 s19, s8, s13 s_mul_hi_u32 s14, s8, s6 s_mul_hi_u32 s21, s9, s13 s_mul_i32 s13, s9, s13 s_add_u32 s18, s19, s18 s_addc_u32 s14, 0, s14 s_mul_hi_u32 s20, s9, s6 s_add_u32 s13, s18, s13 s_mul_i32 s6, s9, s6 s_addc_u32 s13, s14, s21 s_addc_u32 s14, s20, 0 s_add_u32 s6, s13, s6 s_addc_u32 s13, 0, s14 s_mul_i32 s18, s2, s6 s_add_u32 s14, s6, 1 v_sub_co_u32 v1, s8, s8, s18 s_mul_hi_u32 s18, s2, s6 s_addc_u32 s19, s13, 0 s_mul_i32 s20, s2, s13 s_delay_alu instid0(VALU_DEP_1) v_sub_co_u32 v2, s21, v1, s2 s_add_u32 s22, s6, 2 s_addc_u32 s23, s13, 0 s_add_i32 s18, s18, s20 s_cmp_lg_u32 s8, 0 v_readfirstlane_b32 s8, v2 s_subb_u32 s9, s9, s18 s_cmp_lg_u32 s21, 0 s_subb_u32 s18, s9, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) s_cmp_ge_u32 s8, s2 s_cselect_b32 s8, -1, 0 s_cmp_eq_u32 s18, 0 v_readfirstlane_b32 s18, v1 s_cselect_b32 s8, s8, -1 s_cmp_lg_u32 s8, 0 s_cselect_b32 s8, s22, s14 s_cselect_b32 s14, s23, s19 s_cmp_ge_u32 s18, s2 s_cselect_b32 s18, -1, 0 s_cmp_eq_u32 s9, 0 s_cselect_b32 s9, s18, -1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) s_cmp_lg_u32 s9, 0 s_cselect_b32 s9, s14, s13 s_cselect_b32 s8, s8, s6 s_xor_b64 s[8:9], s[8:9], s[16:17] s_delay_alu instid0(SALU_CYCLE_1) s_sub_u32 s8, s8, s16 s_and_not1_b32 vcc_lo, exec_lo, s7 s_cbranch_vccnz .LBB0_3 .LBB0_2: v_cvt_f32_u32_e32 v1, s2 s_sub_i32 s7, 0, s2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_rcp_iflag_f32_e32 v1, v1 s_waitcnt_depctr 0xfff v_mul_f32_e32 v1, 0x4f7ffffe, v1 v_cvt_u32_f32_e32 v1, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_readfirstlane_b32 s6, v1 s_mul_i32 s7, s7, s6 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_hi_u32 s7, s6, s7 s_add_i32 s6, s6, s7 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_hi_u32 s6, s12, s6 s_mul_i32 s7, s6, s2 s_add_i32 s8, s6, 1 s_sub_i32 s7, s12, s7 s_delay_alu instid0(SALU_CYCLE_1) s_sub_i32 s9, s7, s2 s_cmp_ge_u32 s7, s2 s_cselect_b32 s6, s8, s6 s_cselect_b32 s7, s9, s7 s_add_i32 s8, s6, 1 s_cmp_ge_u32 s7, s2 s_cselect_b32 s8, s8, s6 .LBB0_3: s_add_i32 s9, s15, 1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) s_mul_i32 s6, s11, s9 s_mul_hi_u32 s7, s10, s9 s_mul_i32 s9, s10, s9 s_add_i32 s7, s7, s6 s_mov_b32 s6, 0 s_cmp_lg_u64 s[6:7], 0 s_cbranch_scc0 .LBB0_20 v_cvt_f32_ubyte0_e32 v1, 0 v_cvt_f32_u32_e32 v2, s2 s_sub_u32 s12, 0, s2 s_subb_u32 s3, 0, s3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmamk_f32 v1, v1, 0x4f800000, v2 v_rcp_f32_e32 v1, v1 s_waitcnt_depctr 0xfff v_mul_f32_e32 v1, 0x5f7ffffc, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v2, 0x2f800000, v1 v_trunc_f32_e32 v2, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_fmamk_f32 v1, v2, 0xcf800000, v1 v_cvt_u32_f32_e32 v2, v2 v_cvt_u32_f32_e32 v1, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_readfirstlane_b32 s10, v2 v_readfirstlane_b32 s11, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) s_mul_i32 s13, s12, s10 s_mul_hi_u32 s16, s12, s11 s_mul_i32 s14, s3, s11 s_add_i32 s13, s16, s13 s_mul_i32 s17, s12, s11 s_add_i32 s13, s13, s14 s_mul_hi_u32 s16, s11, s17 s_mul_hi_u32 s18, s10, s17 s_mul_i32 s14, s10, s17 s_mul_hi_u32 s17, s11, s13 s_mul_i32 s11, s11, s13 s_mul_hi_u32 s19, s10, s13 s_add_u32 s11, s16, s11 s_addc_u32 s16, 0, s17 s_add_u32 s11, s11, s14 s_mul_i32 s13, s10, s13 s_addc_u32 s11, s16, s18 s_addc_u32 s14, s19, 0 s_add_u32 s11, s11, s13 s_addc_u32 s13, 0, s14 v_add_co_u32 v1, s11, v1, s11 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) s_cmp_lg_u32 s11, 0 s_addc_u32 s10, s10, s13 v_readfirstlane_b32 s11, v1 s_mul_i32 s13, s12, s10 s_delay_alu instid0(VALU_DEP_1) s_mul_hi_u32 s14, s12, s11 s_mul_i32 s3, s3, s11 s_add_i32 s13, s14, s13 s_mul_i32 s12, s12, s11 s_add_i32 s13, s13, s3 s_mul_hi_u32 s3, s11, s12 s_mul_hi_u32 s17, s11, s13 s_mul_i32 s11, s11, s13 s_mul_i32 s16, s10, s12 s_add_u32 s3, s3, s11 s_mul_hi_u32 s14, s10, s12 s_addc_u32 s11, 0, s17 s_mul_hi_u32 s12, s10, s13 s_add_u32 s3, s3, s16 s_mul_i32 s13, s10, s13 s_addc_u32 s3, s11, s14 s_addc_u32 s11, s12, 0 s_add_u32 s3, s3, s13 s_addc_u32 s11, 0, s11 v_add_co_u32 v1, s3, v1, s3 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) s_cmp_lg_u32 s3, 0 s_addc_u32 s3, s10, s11 s_ashr_i32 s10, s7, 31 s_add_u32 s12, s9, s10 s_addc_u32 s13, s7, s10 v_readfirstlane_b32 s7, v1 s_mov_b32 s11, s10 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_xor_b64 s[12:13], s[12:13], s[10:11] s_mul_i32 s14, s12, s3 s_delay_alu instid0(VALU_DEP_1) s_mul_hi_u32 s16, s12, s7 s_mul_hi_u32 s11, s12, s3 s_mul_hi_u32 s18, s13, s7 s_mul_i32 s7, s13, s7 s_add_u32 s14, s16, s14 s_addc_u32 s11, 0, s11 s_mul_hi_u32 s17, s13, s3 s_add_u32 s7, s14, s7 s_mul_i32 s3, s13, s3 s_addc_u32 s7, s11, s18 s_addc_u32 s11, s17, 0 s_add_u32 s3, s7, s3 s_addc_u32 s7, 0, s11 s_mul_i32 s16, s2, s3 s_mul_hi_u32 s14, s2, s3 v_sub_co_u32 v1, s12, s12, s16 s_mul_i32 s7, s2, s7 s_add_u32 s11, s3, 1 s_add_i32 s14, s14, s7 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2) v_sub_co_u32 v2, s7, v1, s2 s_add_u32 s16, s3, 2 s_cmp_lg_u32 s12, 0 v_mov_b32_e32 v3, s16 v_cmp_le_u32_e32 vcc_lo, s2, v2 s_subb_u32 s12, s13, s14 s_cmp_lg_u32 s7, 0 s_subb_u32 s7, s12, 0 v_cndmask_b32_e64 v2, 0, -1, vcc_lo v_cmp_le_u32_e32 vcc_lo, s2, v1 s_cmp_eq_u32 s7, 0 v_cndmask_b32_e64 v1, 0, -1, vcc_lo s_cselect_b32 vcc_lo, -1, 0 s_cmp_eq_u32 s12, 0 v_cndmask_b32_e32 v2, -1, v2, vcc_lo s_cselect_b32 vcc_lo, -1, 0 v_cndmask_b32_e32 v1, -1, v1, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_cmp_ne_u32_e32 vcc_lo, 0, v2 v_cndmask_b32_e32 v2, s11, v3, vcc_lo v_cmp_ne_u32_e32 vcc_lo, 0, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v1, s3, v2, vcc_lo v_xor_b32_e32 v1, s10, v1 s_delay_alu instid0(VALU_DEP_1) v_sub_co_u32 v1, vcc_lo, v1, s10 s_and_not1_b32 vcc_lo, exec_lo, s6 s_cbranch_vccnz .LBB0_6 .LBB0_5: v_cvt_f32_u32_e32 v1, s2 s_sub_i32 s3, 0, s2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_rcp_iflag_f32_e32 v1, v1 s_waitcnt_depctr 0xfff v_mul_f32_e32 v1, 0x4f7ffffe, v1 v_cvt_u32_f32_e32 v1, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v2, s3, v1 v_mul_hi_u32 v2, v1, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v1, v1, v2 v_mul_hi_u32 v1, s9, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mul_lo_u32 v2, v1, s2 v_add_nc_u32_e32 v3, 1, v1 v_sub_nc_u32_e32 v2, s9, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v4, s2, v2 v_cmp_le_u32_e32 vcc_lo, s2, v2 v_dual_cndmask_b32 v2, v2, v4 :: v_dual_cndmask_b32 v1, v1, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_le_u32_e32 vcc_lo, s2, v2 v_add_nc_u32_e32 v3, 1, v1 s_delay_alu instid0(VALU_DEP_1) v_cndmask_b32_e32 v1, v1, v3, vcc_lo .LBB0_6: v_bfe_u32 v2, v0, 10, 10 v_and_b32_e32 v0, 0x3ff, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_eq_u32_e64 s2, 0, v2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_8 s_load_b64 s[6:7], s[0:1], 0x0 v_lshlrev_b32_e32 v5, 3, v0 s_waitcnt lgkmcnt(0) global_load_b64 v[3:4], v5, s[6:7] s_waitcnt vmcnt(0) ds_store_b64 v5, v[3:4] offset:4096 .LBB0_8: s_or_b32 exec_lo, exec_lo, s3 v_cmp_ge_i32_e32 vcc_lo, s8, v1 s_cbranch_vccnz .LBB0_18 s_load_b32 s3, s[4:5], 0xc s_load_b128 s[4:7], s[0:1], 0x10 s_ashr_i32 s9, s8, 31 v_dual_mov_b32 v3, 0 :: v_dual_lshlrev_b32 v10, 4, v0 s_lshr_b32 s1, s9, 20 v_cmp_gt_u32_e64 s0, 4, v2 s_add_i32 s1, s8, s1 v_lshlrev_b32_e32 v11, 2, v2 s_ashr_i32 s1, s1, 12 v_lshl_add_u32 v12, v2, 2, v10 v_or_b32_e32 v14, 4, v10 v_or_b32_e32 v15, 8, v10 v_or_b32_e32 v16, 12, v10 s_add_i32 s1, s1, s15 v_mov_b32_e32 v17, 0x7fff0000 s_waitcnt lgkmcnt(0) s_and_b32 s3, 0xffff, s3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u32_u24 v13, v2, s3, v0 s_lshl_b32 s3, s1, 8 v_add_nc_u32_e32 v4, s8, v13 s_branch .LBB0_11 .LBB0_10: s_or_b32 exec_lo, exec_lo, s1 s_addk_i32 s8, 0x1000 v_add_nc_u32_e32 v4, 0x1000, v4 v_cmp_ge_i32_e32 vcc_lo, s8, v1 s_addk_i32 s3, 0x100 s_cbranch_vccnz .LBB0_18 .LBB0_11: s_waitcnt_vscnt null, 0x0 s_barrier buffer_gl0_inv s_and_saveexec_b32 s1, s0 s_cbranch_execz .LBB0_13 ds_store_b32 v12, v3 .LBB0_13: s_or_b32 exec_lo, exec_lo, s1 v_add_nc_u32_e32 v2, s8, v13 s_mov_b32 s9, exec_lo s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv v_cmpx_lt_i32_e64 v2, v1 s_cbranch_execz .LBB0_16 ds_load_b64 v[6:7], v3 offset:4096 v_ashrrev_i32_e32 v5, 31, v4 v_add_nc_u32_e32 v18, 0x1000, v2 s_mov_b32 s10, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[8:9], 3, v[4:5] v_min_i32_e32 v5, v1, v18 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v8, vcc_lo, s4, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s5, v9, vcc_lo .LBB0_15: global_load_b64 v[18:19], v[8:9], off v_add_nc_u32_e32 v2, 0x400, v2 v_add_co_u32 v8, s1, v8, 0x2000 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_add_co_ci_u32_e64 v9, s1, 0, v9, s1 s_waitcnt vmcnt(0) lgkmcnt(0) v_cmp_gt_f64_e32 vcc_lo, v[18:19], v[6:7] v_cndmask_b32_e64 v22, 1, 2, vcc_lo v_lshlrev_b32_e32 v20, 3, v22 v_lshlrev_b32_e32 v22, 1, v22 ds_load_b64 v[20:21], v20 offset:4096 v_add_nc_u32_e32 v23, 2, v22 v_or_b32_e32 v22, 1, v22 s_waitcnt lgkmcnt(0) v_cmp_gt_f64_e32 vcc_lo, v[18:19], v[20:21] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v22, v22, v23, vcc_lo v_lshlrev_b32_e32 v20, 3, v22 v_lshlrev_b32_e32 v22, 1, v22 ds_load_b64 v[20:21], v20 offset:4096 v_add_nc_u32_e32 v23, 2, v22 v_or_b32_e32 v22, 1, v22 s_waitcnt lgkmcnt(0) v_cmp_gt_f64_e32 vcc_lo, v[18:19], v[20:21] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v22, v22, v23, vcc_lo v_lshlrev_b32_e32 v20, 3, v22 v_lshlrev_b32_e32 v22, 1, v22 ds_load_b64 v[20:21], v20 offset:4096 v_add_nc_u32_e32 v23, 2, v22 v_or_b32_e32 v22, 1, v22 s_waitcnt lgkmcnt(0) v_cmp_gt_f64_e32 vcc_lo, v[18:19], v[20:21] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v22, v22, v23, vcc_lo v_lshlrev_b32_e32 v20, 3, v22 v_lshlrev_b32_e32 v22, 1, v22 ds_load_b64 v[20:21], v20 offset:4096 v_add_nc_u32_e32 v23, 2, v22 v_or_b32_e32 v22, 1, v22 s_waitcnt lgkmcnt(0) v_cmp_gt_f64_e32 vcc_lo, v[18:19], v[20:21] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v22, v22, v23, vcc_lo v_lshlrev_b32_e32 v20, 3, v22 v_lshlrev_b32_e32 v22, 1, v22 ds_load_b64 v[20:21], v20 offset:4096 v_add_nc_u32_e32 v23, 2, v22 v_or_b32_e32 v22, 1, v22 s_waitcnt lgkmcnt(0) v_cmp_gt_f64_e32 vcc_lo, v[18:19], v[20:21] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v22, v22, v23, vcc_lo v_lshlrev_b32_e32 v20, 3, v22 v_lshlrev_b32_e32 v22, 1, v22 ds_load_b64 v[20:21], v20 offset:4096 v_add_nc_u32_e32 v23, 2, v22 v_or_b32_e32 v22, 1, v22 s_waitcnt lgkmcnt(0) v_cmp_gt_f64_e32 vcc_lo, v[18:19], v[20:21] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v22, v22, v23, vcc_lo v_lshlrev_b32_e32 v20, 3, v22 v_lshlrev_b32_e32 v22, 1, v22 ds_load_b64 v[20:21], v20 offset:4096 v_add_nc_u32_e32 v23, 2, v22 v_or_b32_e32 v22, 1, v22 s_waitcnt lgkmcnt(0) v_cmp_gt_f64_e32 vcc_lo, v[18:19], v[20:21] s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v18, v22, v23, vcc_lo v_cmp_ge_i32_e32 vcc_lo, v2, v5 v_lshlrev_b32_e32 v18, 4, v18 s_or_b32 s10, vcc_lo, s10 s_delay_alu instid0(VALU_DEP_1) v_add3_u32 v18, v18, v11, 0xfffff010 ds_inc_u32 v18, v17 s_and_not1_b32 exec_lo, exec_lo, s10 s_cbranch_execnz .LBB0_15 .LBB0_16: s_or_b32 exec_lo, exec_lo, s9 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_and_saveexec_b32 s1, s2 s_cbranch_execz .LBB0_10 ds_load_b32 v5, v10 ds_load_b32 v6, v14 ds_load_b32 v7, v15 ds_load_b32 v8, v16 v_add_nc_u32_e32 v2, s3, v0 s_waitcnt lgkmcnt(2) v_add_nc_u32_e32 v9, v6, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[5:6], 2, v[2:3] s_waitcnt lgkmcnt(0) v_add3_u32 v2, v9, v7, v8 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v5, vcc_lo, s6, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s7, v6, vcc_lo global_store_b32 v[5:6], v2, off s_branch .LBB0_10 .LBB0_18: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .LBB0_19: s_branch .LBB0_2 .LBB0_20: s_branch .LBB0_5 .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z16__stratifycountsPdiS_Pj .amdhsa_group_segment_fixed_size 6144 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 24 .amdhsa_next_free_sgpr 24 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z16__stratifycountsPdiS_Pj, .Lfunc_end0-_Z16__stratifycountsPdiS_Pj .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 6144 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z16__stratifycountsPdiS_Pj .private_segment_fixed_size: 0 .sgpr_count: 26 .sgpr_spill_count: 0 .symbol: _Z16__stratifycountsPdiS_Pj.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 24 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) { __shared__ unsigned int ic[SNDVALS][SNDGRPS]; __shared__ double ss[SNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK); int tid = threadIdx.x + threadIdx.y * blockDim.x; if (threadIdx.y == 0) { ss[threadIdx.x] = strata[threadIdx.x]; } for (int i = istart; i < iend; i += SBIGBLK) { __syncthreads(); if (threadIdx.y < SNDGRPS) { ic[threadIdx.x][threadIdx.y] = 0; } __syncthreads(); for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) { double v = a[k]; int j = 0; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = j - SNDVALS + 1; atomicInc(&ic[j][threadIdx.y], 65536*32767); } __syncthreads(); if (threadIdx.y == 0) { bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3]; } bibase += SNDVALS; } }
.text .file "__stratifycounts.hip" .globl _Z31__device_stub____stratifycountsPdiS_Pj # -- Begin function _Z31__device_stub____stratifycountsPdiS_Pj .p2align 4, 0x90 .type _Z31__device_stub____stratifycountsPdiS_Pj,@function _Z31__device_stub____stratifycountsPdiS_Pj: # @_Z31__device_stub____stratifycountsPdiS_Pj .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movl %esi, 4(%rsp) movq %rdx, 64(%rsp) movq %rcx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z16__stratifycountsPdiS_Pj, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z31__device_stub____stratifycountsPdiS_Pj, .Lfunc_end0-_Z31__device_stub____stratifycountsPdiS_Pj .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z16__stratifycountsPdiS_Pj, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z16__stratifycountsPdiS_Pj,@object # @_Z16__stratifycountsPdiS_Pj .section .rodata,"a",@progbits .globl _Z16__stratifycountsPdiS_Pj .p2align 3, 0x0 _Z16__stratifycountsPdiS_Pj: .quad _Z31__device_stub____stratifycountsPdiS_Pj .size _Z16__stratifycountsPdiS_Pj, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z16__stratifycountsPdiS_Pj" .size .L__unnamed_1, 28 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z31__device_stub____stratifycountsPdiS_Pj .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z16__stratifycountsPdiS_Pj .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000fe9b5_00000000-6___stratifycounts.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z41__device_stub__Z16__stratifycountsPdiS_PjPdiS_Pj .type _Z41__device_stub__Z16__stratifycountsPdiS_PjPdiS_Pj, @function _Z41__device_stub__Z16__stratifycountsPdiS_PjPdiS_Pj: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z16__stratifycountsPdiS_Pj(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z41__device_stub__Z16__stratifycountsPdiS_PjPdiS_Pj, .-_Z41__device_stub__Z16__stratifycountsPdiS_PjPdiS_Pj .globl _Z16__stratifycountsPdiS_Pj .type _Z16__stratifycountsPdiS_Pj, @function _Z16__stratifycountsPdiS_Pj: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z41__device_stub__Z16__stratifycountsPdiS_PjPdiS_Pj addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z16__stratifycountsPdiS_Pj, .-_Z16__stratifycountsPdiS_Pj .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z16__stratifycountsPdiS_Pj" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z16__stratifycountsPdiS_Pj(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "__stratifycounts.hip" .globl _Z31__device_stub____stratifycountsPdiS_Pj # -- Begin function _Z31__device_stub____stratifycountsPdiS_Pj .p2align 4, 0x90 .type _Z31__device_stub____stratifycountsPdiS_Pj,@function _Z31__device_stub____stratifycountsPdiS_Pj: # @_Z31__device_stub____stratifycountsPdiS_Pj .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movl %esi, 4(%rsp) movq %rdx, 64(%rsp) movq %rcx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 56(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z16__stratifycountsPdiS_Pj, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z31__device_stub____stratifycountsPdiS_Pj, .Lfunc_end0-_Z31__device_stub____stratifycountsPdiS_Pj .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z16__stratifycountsPdiS_Pj, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z16__stratifycountsPdiS_Pj,@object # @_Z16__stratifycountsPdiS_Pj .section .rodata,"a",@progbits .globl _Z16__stratifycountsPdiS_Pj .p2align 3, 0x0 _Z16__stratifycountsPdiS_Pj: .quad _Z31__device_stub____stratifycountsPdiS_Pj .size _Z16__stratifycountsPdiS_Pj, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z16__stratifycountsPdiS_Pj" .size .L__unnamed_1, 28 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z31__device_stub____stratifycountsPdiS_Pj .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z16__stratifycountsPdiS_Pj .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "ops-builder.hh" #include <stdexcept> #include "graph.hh" #include "add.hh" #include "adam-update.hh" #include "argmax-accuracy.hh" #include "input.hh" #include "leaky-relu-grad.hh" #include "log-softmax.hh" #include "mat-mat-mul.hh" #include "mat-mul-add.hh" #include "mat-rvect-add.hh" #include "mat-sum.hh" #include "moment-update.hh" #include "mse.hh" #include "mse-grad.hh" #include "relu-grad.hh" #include "seq.hh" #include "sigmoid-cross-entropy.hh" #include "sigmoid-cross-entropy-grad.hh" #include "sigmoid-grad.hh" #include "softmax.hh" #include "softmax-cross-entropy.hh" #include "softmax-cross-entropy-grad.hh" #include "tanh-grad.hh" #include "update.hh" #include "variable.hh" #include "vect-sigmoid.hh" #include "conv2d.hh" #include "conv2d-bias-add.hh" #include "conv2d-input-grad.hh" #include "conv2d-kernel-grad.hh" #include "conv2d-bias-add-grad.hh" #include "conv2d-transpose.hh" #include "conv2d-transpose-input-grad.hh" #include "conv2d-transpose-kernel-grad.hh" #include "vect-relu.hh" #include "vect-relu-leaky.hh" #include "vect-tanh.hh" #include "reshape.hh" namespace ops { OpsBuilder& OpsBuilder::instance() { static OpsBuilder builder; return builder; } OpsBuilder::OpsBuilder() : graph_(Graph::instance()) {} Add* OpsBuilder::add(Op* left, Op* right) { if (left->shape_get() != right->shape_get()) throw std::runtime_error {"add: left and right must have the same shape"}; auto res = new Add(left, right); graph_.add(res); return res; } AdamUpdate* OpsBuilder::adam_update(Variable* var, Op* m, Op* v, dbl_t learning_rate, dbl_t beta1, dbl_t beta2, dbl_t eps) { if (var->shape_get() != m->shape_get()) throw std::runtime_error {"var and m must have the same shape"}; if (var->shape_get() != v->shape_get()) throw std::runtime_error {"var and v must have the same shape"}; auto res = new AdamUpdate(var, m, v, learning_rate, beta1, beta2, eps); graph_.add(res); return res; } ArgmaxAccuracy* OpsBuilder::argmax_accuracy(Op* y, Op* y_hat) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (y_hat->shape_get().ndims() != 2) throw std::runtime_error {"y_hat must be a matrix"}; if (y->shape_get() != y_hat->shape_get()) throw std::runtime_error {"y and y_hat must have the same shape"}; auto res = new ArgmaxAccuracy(y, y_hat); graph_.add(res); return res; } Conv2D* OpsBuilder::conv2d(Op* input, Op* kernel, const int* strides) { if (input->shape_get().ndims() != 4) throw std::runtime_error {"Conv2D:input must be a 4D tensor"}; if (kernel->shape_get().ndims() != 4) throw std::runtime_error {"Conv2D:kernel must be a 4D tensor"}; auto res = new Conv2D(input, kernel, strides); graph_.add(res); return res; } Conv2DBiasAdd* OpsBuilder::conv2d_bias_add(Op* z, Op* bias) { if (z->shape_get().ndims() != 4) throw std::runtime_error {"Conv2DBiasAdd:z must be a 4D tensor"}; if (bias->shape_get().ndims() != 1) throw std::runtime_error {"Conv2DBiasAdd:bias must be a 1D array"}; if (z->shape_get()[3] != bias->shape_get()[0]) throw std::runtime_error {"Conv2DBiasAdd:z and bias shape are not corresponding"}; auto res = new Conv2DBiasAdd(z, bias); graph_.add(res); return res; } Conv2DBiasAddGrad* OpsBuilder::conv2d_bias_add_grad(Op* z) { if (z->shape_get().ndims() != 4) throw std::runtime_error {"Conv2DBiasAddGrad:z must be a 4D tensor"}; auto res = new Conv2DBiasAddGrad(z); graph_.add(res); return res; } Conv2DInputGrad* OpsBuilder::conv2d_input_grad(Op* y, Op* kernel, const int* strides, const int* input_size) { auto res = new Conv2DInputGrad(y, kernel, strides, input_size); graph_.add(res); return res; } Conv2DKernelGrad* OpsBuilder::conv2d_kernel_grad(Op* y, Op* input, const int* strides, const int* kernel_size, const int* padded_size) { auto res = new Conv2DKernelGrad(y, input, strides, kernel_size, padded_size); graph_.add(res); return res; } Conv2DTranspose* OpsBuilder::conv2d_transpose(Op* input, Op* kernel, const int* out_size, const int* strides) { auto res = new Conv2DTranspose(input, kernel, out_size, strides); graph_.add(res); return res; } Conv2DTransposeInputGrad* OpsBuilder::conv2d_transpose_input_grad(Op* y, Op* kernel, const int* strides, const int* input_size) { auto res = new Conv2DTransposeInputGrad(y, kernel, strides, input_size); graph_.add(res); return res; } Conv2DTransposeKernelGrad* OpsBuilder::conv2d_transpose_kernel_grad(Op* y, Op* input, const int* strides, const int* kernel_size) { auto res = new Conv2DTransposeKernelGrad(y, input, strides, kernel_size); graph_.add(res); return res; } Input* OpsBuilder::input(const Shape& shape) { auto res = new Input(shape); graph_.add(res); return res; } LeakyReluGrad* OpsBuilder::leaky_relu_grad(Op* z, Op* dout, dbl_t alpha) { if (z->shape_get() != dout->shape_get()) throw std::runtime_error {"LeakyReluGrad: z and dout must have the same shape"}; auto res = new LeakyReluGrad(z, dout, alpha); graph_.add(res); return res; } LogSoftmax* OpsBuilder::log_softmax(Op* arg) { if (arg->shape_get().ndims() != 2) throw std::runtime_error{"log softmax input must be a matrix"}; auto res = new LogSoftmax(arg); graph_.add(res); return res; } MatMatMul* OpsBuilder::mat_mat_mul(Op* left, Op* right, bool left_tr, bool right_tr) { if (left->shape_get().ndims() != 2) throw std::runtime_error{"left operand must be a matrix"}; if (right->shape_get().ndims() != 2) throw std::runtime_error{"right operand must be a matrix"}; if (left->shape_get()[!left_tr] != right->shape_get()[right_tr]) throw std::runtime_error{"left[1] and right[0] differ"}; auto res = new MatMatMul(left, right, left_tr, right_tr); graph_.add(res); return res; } MatMulAdd* OpsBuilder::mat_mul_add(Op* x, Op* w, Op* b) { if (x->shape_get().ndims() != 2) throw std::runtime_error{"x must be a matrix"}; if (w->shape_get().ndims() != 2) throw std::runtime_error{"w must be a matrix"}; if (b->shape_get().ndims() != 1) throw std::runtime_error{"b must be a vector"}; if (x->shape_get()[1] != w->shape_get()[0]) throw std::runtime_error{"x[1] and w[0] differ"}; if (w->shape_get()[1] != b->shape_get()[0]) throw std::runtime_error{"w[1] and b[0] differ"}; auto res = new MatMulAdd(x, w, b); graph_.add(res); return res; } MatRvectAdd* OpsBuilder::mat_rvect_add(Op* left, Op* right) { if (left->shape_get().ndims() != 2) throw std::runtime_error{"left operand must be a matrix"}; if (right->shape_get().ndims() != 1) throw std::runtime_error{"right operand must be a vector"}; if (left->shape_get()[1] != right->shape_get()[0]) throw std::runtime_error{"left[1] and right[0] differ"}; auto res = new MatRvectAdd(left, right); graph_.add(res); return res; } MatSum* OpsBuilder::mat_sum(Op* arg, std::size_t axis) { if (arg->shape_get().ndims() != 2) throw std::runtime_error {"arg must be a matrix"}; if (axis >= 2) throw std::runtime_error {"axis must be 0 or 1"}; auto res = new MatSum(arg, axis); graph_.add(res); return res; } MomentUpdate* OpsBuilder::moment_update(Variable* var, Op* dt, dbl_t coeff1, dbl_t coeff2, bool sq_update) { if (var->shape_get() != dt->shape_get()) throw std::runtime_error {"var and dt must have the same shape"}; auto res = new MomentUpdate(var, dt, coeff1, coeff2, sq_update); graph_.add(res); return res; } MSE* OpsBuilder::mse(Op* y, Op* y_hat) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"MSE:y must be a matrix"}; if (y_hat->shape_get().ndims() != 2) throw std::runtime_error {"MSE:y_hat must be a matrix"}; if (y->shape_get() != y_hat->shape_get()) throw std::runtime_error {"MSE: y and y_hat must have the same shape"}; auto res = new MSE(y, y_hat); graph_.add(res); return res; } Reshape* OpsBuilder::reshape(Op* arg, const Shape& shape) { auto& arg_shape = arg->shape_get(); if (shape.defined() && shape.total() != arg_shape.total()) throw std::runtime_error {"Reshape:"}; // if (! shape.defined() && (arg_shape.total() % (- shape.total()) != 0)) // throw std::runtime_error {"Reshape:"}; // nb -1 = max 1 ?? has to be checked auto res = new Reshape(arg, shape); graph_.add(res); return res; } MSEGrad* OpsBuilder::mse_grad(Op* y, Op* y_hat) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"MSEGrad: y must be a matrix"}; if (y_hat->shape_get().ndims() != 2) throw std::runtime_error {"MSEGrad: y_hat must be a matrix"}; if (y->shape_get() != y_hat->shape_get()) throw std::runtime_error {"MSEGrad: y and y_hat must have the same shape"}; auto res = new MSEGrad(y, y_hat); graph_.add(res); return res; } ReluGrad* OpsBuilder::relu_grad(Op* z, Op* dout) { if (z->shape_get() != dout->shape_get()) throw std::runtime_error {"ReluGrad: z and dout must have the same shape"}; auto res = new ReluGrad(z, dout); graph_.add(res); return res; } Seq* OpsBuilder::seq(const std::vector<Op*>& ops) { if (ops.empty()) throw std::runtime_error {"seq: ops can't be empty"}; auto res = new Seq(ops); graph_.add(res); return res; } SigmoidCrossEntropy* OpsBuilder::sigmoid_cross_entropy(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SigmoidCrossEntropy(y, logits); graph_.add(res); return res; } SigmoidCrossEntropyGrad* OpsBuilder::sigmoid_cross_entropy_grad(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SigmoidCrossEntropyGrad(y, logits); graph_.add(res); return res; } SigmoidGrad* OpsBuilder::sigmoid_grad(Op* sig_out, Op* dout) { if (sig_out->shape_get() != dout->shape_get()) throw std::runtime_error {"SigmoidGrad: sig_out and dout must have the same shape"}; auto res = new SigmoidGrad(sig_out, dout); graph_.add(res); return res; } Softmax* OpsBuilder::softmax(Op* arg) { if (arg->shape_get().ndims() != 2) throw std::runtime_error{"softmax input must be a matrix"}; auto res = new Softmax(arg); graph_.add(res); return res; } SoftmaxCrossEntropy* OpsBuilder::softmax_cross_entropy(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SoftmaxCrossEntropy(y, logits); graph_.add(res); return res; } SoftmaxCrossEntropyGrad* OpsBuilder::softmax_cross_entropy_grad(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SoftmaxCrossEntropyGrad(y, logits); graph_.add(res); return res; } TanhGrad* OpsBuilder::tanh_grad(Op* tanh_out, Op* dout) { if (tanh_out->shape_get() != dout->shape_get()) throw std::runtime_error {"TanhGrad: tanh_out and dout must have the same shape"}; auto res = new TanhGrad(tanh_out, dout); graph_.add(res); return res; } Update* OpsBuilder::update(Variable* var, Op* dt, Op* coeff) { if (var->shape_get() != dt->shape_get()) throw std::runtime_error {"var and dt must have the same shape"}; if (coeff->shape_get().ndims()) throw std::runtime_error {"coeff must be a scalar"}; auto res = new Update(var, dt, coeff); graph_.add(res); return res; } Variable* OpsBuilder::variable(const Shape& shape, bool trainable) { if (!shape.defined()) throw std::runtime_error{"shape not fully defined"}; auto res = new Variable(shape, trainable); graph_.add_var(res); return res; } VectSigmoid* OpsBuilder::vect_sigmoid(Op* arg) { auto res = new VectSigmoid(arg); graph_.add(res); return res; } VectRelu* OpsBuilder::vect_relu(Op* arg) { auto res = new VectRelu(arg); graph_.add(res); return res; } VectReluLeaky* OpsBuilder::vect_relu_leaky(Op* arg, const dbl_t alpha) { auto res = new VectReluLeaky(arg, alpha); graph_.add(res); return res; } VectTanh* OpsBuilder::vect_tanh(Op* arg) { auto res = new VectTanh(arg); graph_.add(res); return res; } }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "ops-builder.hh" #include <stdexcept> #include "graph.hh" #include "add.hh" #include "adam-update.hh" #include "argmax-accuracy.hh" #include "input.hh" #include "leaky-relu-grad.hh" #include "log-softmax.hh" #include "mat-mat-mul.hh" #include "mat-mul-add.hh" #include "mat-rvect-add.hh" #include "mat-sum.hh" #include "moment-update.hh" #include "mse.hh" #include "mse-grad.hh" #include "relu-grad.hh" #include "seq.hh" #include "sigmoid-cross-entropy.hh" #include "sigmoid-cross-entropy-grad.hh" #include "sigmoid-grad.hh" #include "softmax.hh" #include "softmax-cross-entropy.hh" #include "softmax-cross-entropy-grad.hh" #include "tanh-grad.hh" #include "update.hh" #include "variable.hh" #include "vect-sigmoid.hh" #include "conv2d.hh" #include "conv2d-bias-add.hh" #include "conv2d-input-grad.hh" #include "conv2d-kernel-grad.hh" #include "conv2d-bias-add-grad.hh" #include "conv2d-transpose.hh" #include "conv2d-transpose-input-grad.hh" #include "conv2d-transpose-kernel-grad.hh" #include "vect-relu.hh" #include "vect-relu-leaky.hh" #include "vect-tanh.hh" #include "reshape.hh" namespace ops { OpsBuilder& OpsBuilder::instance() { static OpsBuilder builder; return builder; } OpsBuilder::OpsBuilder() : graph_(Graph::instance()) {} Add* OpsBuilder::add(Op* left, Op* right) { if (left->shape_get() != right->shape_get()) throw std::runtime_error {"add: left and right must have the same shape"}; auto res = new Add(left, right); graph_.add(res); return res; } AdamUpdate* OpsBuilder::adam_update(Variable* var, Op* m, Op* v, dbl_t learning_rate, dbl_t beta1, dbl_t beta2, dbl_t eps) { if (var->shape_get() != m->shape_get()) throw std::runtime_error {"var and m must have the same shape"}; if (var->shape_get() != v->shape_get()) throw std::runtime_error {"var and v must have the same shape"}; auto res = new AdamUpdate(var, m, v, learning_rate, beta1, beta2, eps); graph_.add(res); return res; } ArgmaxAccuracy* OpsBuilder::argmax_accuracy(Op* y, Op* y_hat) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (y_hat->shape_get().ndims() != 2) throw std::runtime_error {"y_hat must be a matrix"}; if (y->shape_get() != y_hat->shape_get()) throw std::runtime_error {"y and y_hat must have the same shape"}; auto res = new ArgmaxAccuracy(y, y_hat); graph_.add(res); return res; } Conv2D* OpsBuilder::conv2d(Op* input, Op* kernel, const int* strides) { if (input->shape_get().ndims() != 4) throw std::runtime_error {"Conv2D:input must be a 4D tensor"}; if (kernel->shape_get().ndims() != 4) throw std::runtime_error {"Conv2D:kernel must be a 4D tensor"}; auto res = new Conv2D(input, kernel, strides); graph_.add(res); return res; } Conv2DBiasAdd* OpsBuilder::conv2d_bias_add(Op* z, Op* bias) { if (z->shape_get().ndims() != 4) throw std::runtime_error {"Conv2DBiasAdd:z must be a 4D tensor"}; if (bias->shape_get().ndims() != 1) throw std::runtime_error {"Conv2DBiasAdd:bias must be a 1D array"}; if (z->shape_get()[3] != bias->shape_get()[0]) throw std::runtime_error {"Conv2DBiasAdd:z and bias shape are not corresponding"}; auto res = new Conv2DBiasAdd(z, bias); graph_.add(res); return res; } Conv2DBiasAddGrad* OpsBuilder::conv2d_bias_add_grad(Op* z) { if (z->shape_get().ndims() != 4) throw std::runtime_error {"Conv2DBiasAddGrad:z must be a 4D tensor"}; auto res = new Conv2DBiasAddGrad(z); graph_.add(res); return res; } Conv2DInputGrad* OpsBuilder::conv2d_input_grad(Op* y, Op* kernel, const int* strides, const int* input_size) { auto res = new Conv2DInputGrad(y, kernel, strides, input_size); graph_.add(res); return res; } Conv2DKernelGrad* OpsBuilder::conv2d_kernel_grad(Op* y, Op* input, const int* strides, const int* kernel_size, const int* padded_size) { auto res = new Conv2DKernelGrad(y, input, strides, kernel_size, padded_size); graph_.add(res); return res; } Conv2DTranspose* OpsBuilder::conv2d_transpose(Op* input, Op* kernel, const int* out_size, const int* strides) { auto res = new Conv2DTranspose(input, kernel, out_size, strides); graph_.add(res); return res; } Conv2DTransposeInputGrad* OpsBuilder::conv2d_transpose_input_grad(Op* y, Op* kernel, const int* strides, const int* input_size) { auto res = new Conv2DTransposeInputGrad(y, kernel, strides, input_size); graph_.add(res); return res; } Conv2DTransposeKernelGrad* OpsBuilder::conv2d_transpose_kernel_grad(Op* y, Op* input, const int* strides, const int* kernel_size) { auto res = new Conv2DTransposeKernelGrad(y, input, strides, kernel_size); graph_.add(res); return res; } Input* OpsBuilder::input(const Shape& shape) { auto res = new Input(shape); graph_.add(res); return res; } LeakyReluGrad* OpsBuilder::leaky_relu_grad(Op* z, Op* dout, dbl_t alpha) { if (z->shape_get() != dout->shape_get()) throw std::runtime_error {"LeakyReluGrad: z and dout must have the same shape"}; auto res = new LeakyReluGrad(z, dout, alpha); graph_.add(res); return res; } LogSoftmax* OpsBuilder::log_softmax(Op* arg) { if (arg->shape_get().ndims() != 2) throw std::runtime_error{"log softmax input must be a matrix"}; auto res = new LogSoftmax(arg); graph_.add(res); return res; } MatMatMul* OpsBuilder::mat_mat_mul(Op* left, Op* right, bool left_tr, bool right_tr) { if (left->shape_get().ndims() != 2) throw std::runtime_error{"left operand must be a matrix"}; if (right->shape_get().ndims() != 2) throw std::runtime_error{"right operand must be a matrix"}; if (left->shape_get()[!left_tr] != right->shape_get()[right_tr]) throw std::runtime_error{"left[1] and right[0] differ"}; auto res = new MatMatMul(left, right, left_tr, right_tr); graph_.add(res); return res; } MatMulAdd* OpsBuilder::mat_mul_add(Op* x, Op* w, Op* b) { if (x->shape_get().ndims() != 2) throw std::runtime_error{"x must be a matrix"}; if (w->shape_get().ndims() != 2) throw std::runtime_error{"w must be a matrix"}; if (b->shape_get().ndims() != 1) throw std::runtime_error{"b must be a vector"}; if (x->shape_get()[1] != w->shape_get()[0]) throw std::runtime_error{"x[1] and w[0] differ"}; if (w->shape_get()[1] != b->shape_get()[0]) throw std::runtime_error{"w[1] and b[0] differ"}; auto res = new MatMulAdd(x, w, b); graph_.add(res); return res; } MatRvectAdd* OpsBuilder::mat_rvect_add(Op* left, Op* right) { if (left->shape_get().ndims() != 2) throw std::runtime_error{"left operand must be a matrix"}; if (right->shape_get().ndims() != 1) throw std::runtime_error{"right operand must be a vector"}; if (left->shape_get()[1] != right->shape_get()[0]) throw std::runtime_error{"left[1] and right[0] differ"}; auto res = new MatRvectAdd(left, right); graph_.add(res); return res; } MatSum* OpsBuilder::mat_sum(Op* arg, std::size_t axis) { if (arg->shape_get().ndims() != 2) throw std::runtime_error {"arg must be a matrix"}; if (axis >= 2) throw std::runtime_error {"axis must be 0 or 1"}; auto res = new MatSum(arg, axis); graph_.add(res); return res; } MomentUpdate* OpsBuilder::moment_update(Variable* var, Op* dt, dbl_t coeff1, dbl_t coeff2, bool sq_update) { if (var->shape_get() != dt->shape_get()) throw std::runtime_error {"var and dt must have the same shape"}; auto res = new MomentUpdate(var, dt, coeff1, coeff2, sq_update); graph_.add(res); return res; } MSE* OpsBuilder::mse(Op* y, Op* y_hat) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"MSE:y must be a matrix"}; if (y_hat->shape_get().ndims() != 2) throw std::runtime_error {"MSE:y_hat must be a matrix"}; if (y->shape_get() != y_hat->shape_get()) throw std::runtime_error {"MSE: y and y_hat must have the same shape"}; auto res = new MSE(y, y_hat); graph_.add(res); return res; } Reshape* OpsBuilder::reshape(Op* arg, const Shape& shape) { auto& arg_shape = arg->shape_get(); if (shape.defined() && shape.total() != arg_shape.total()) throw std::runtime_error {"Reshape:"}; // if (! shape.defined() && (arg_shape.total() % (- shape.total()) != 0)) // throw std::runtime_error {"Reshape:"}; // nb -1 = max 1 ?? has to be checked auto res = new Reshape(arg, shape); graph_.add(res); return res; } MSEGrad* OpsBuilder::mse_grad(Op* y, Op* y_hat) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"MSEGrad: y must be a matrix"}; if (y_hat->shape_get().ndims() != 2) throw std::runtime_error {"MSEGrad: y_hat must be a matrix"}; if (y->shape_get() != y_hat->shape_get()) throw std::runtime_error {"MSEGrad: y and y_hat must have the same shape"}; auto res = new MSEGrad(y, y_hat); graph_.add(res); return res; } ReluGrad* OpsBuilder::relu_grad(Op* z, Op* dout) { if (z->shape_get() != dout->shape_get()) throw std::runtime_error {"ReluGrad: z and dout must have the same shape"}; auto res = new ReluGrad(z, dout); graph_.add(res); return res; } Seq* OpsBuilder::seq(const std::vector<Op*>& ops) { if (ops.empty()) throw std::runtime_error {"seq: ops can't be empty"}; auto res = new Seq(ops); graph_.add(res); return res; } SigmoidCrossEntropy* OpsBuilder::sigmoid_cross_entropy(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SigmoidCrossEntropy(y, logits); graph_.add(res); return res; } SigmoidCrossEntropyGrad* OpsBuilder::sigmoid_cross_entropy_grad(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SigmoidCrossEntropyGrad(y, logits); graph_.add(res); return res; } SigmoidGrad* OpsBuilder::sigmoid_grad(Op* sig_out, Op* dout) { if (sig_out->shape_get() != dout->shape_get()) throw std::runtime_error {"SigmoidGrad: sig_out and dout must have the same shape"}; auto res = new SigmoidGrad(sig_out, dout); graph_.add(res); return res; } Softmax* OpsBuilder::softmax(Op* arg) { if (arg->shape_get().ndims() != 2) throw std::runtime_error{"softmax input must be a matrix"}; auto res = new Softmax(arg); graph_.add(res); return res; } SoftmaxCrossEntropy* OpsBuilder::softmax_cross_entropy(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SoftmaxCrossEntropy(y, logits); graph_.add(res); return res; } SoftmaxCrossEntropyGrad* OpsBuilder::softmax_cross_entropy_grad(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SoftmaxCrossEntropyGrad(y, logits); graph_.add(res); return res; } TanhGrad* OpsBuilder::tanh_grad(Op* tanh_out, Op* dout) { if (tanh_out->shape_get() != dout->shape_get()) throw std::runtime_error {"TanhGrad: tanh_out and dout must have the same shape"}; auto res = new TanhGrad(tanh_out, dout); graph_.add(res); return res; } Update* OpsBuilder::update(Variable* var, Op* dt, Op* coeff) { if (var->shape_get() != dt->shape_get()) throw std::runtime_error {"var and dt must have the same shape"}; if (coeff->shape_get().ndims()) throw std::runtime_error {"coeff must be a scalar"}; auto res = new Update(var, dt, coeff); graph_.add(res); return res; } Variable* OpsBuilder::variable(const Shape& shape, bool trainable) { if (!shape.defined()) throw std::runtime_error{"shape not fully defined"}; auto res = new Variable(shape, trainable); graph_.add_var(res); return res; } VectSigmoid* OpsBuilder::vect_sigmoid(Op* arg) { auto res = new VectSigmoid(arg); graph_.add(res); return res; } VectRelu* OpsBuilder::vect_relu(Op* arg) { auto res = new VectRelu(arg); graph_.add(res); return res; } VectReluLeaky* OpsBuilder::vect_relu_leaky(Op* arg, const dbl_t alpha) { auto res = new VectReluLeaky(arg, alpha); graph_.add(res); return res; } VectTanh* OpsBuilder::vect_tanh(Op* arg) { auto res = new VectTanh(arg); graph_.add(res); return res; } }
#include <hip/hip_runtime.h> #include "ops-builder.hh" #include <stdexcept> #include "graph.hh" #include "add.hh" #include "adam-update.hh" #include "argmax-accuracy.hh" #include "input.hh" #include "leaky-relu-grad.hh" #include "log-softmax.hh" #include "mat-mat-mul.hh" #include "mat-mul-add.hh" #include "mat-rvect-add.hh" #include "mat-sum.hh" #include "moment-update.hh" #include "mse.hh" #include "mse-grad.hh" #include "relu-grad.hh" #include "seq.hh" #include "sigmoid-cross-entropy.hh" #include "sigmoid-cross-entropy-grad.hh" #include "sigmoid-grad.hh" #include "softmax.hh" #include "softmax-cross-entropy.hh" #include "softmax-cross-entropy-grad.hh" #include "tanh-grad.hh" #include "update.hh" #include "variable.hh" #include "vect-sigmoid.hh" #include "conv2d.hh" #include "conv2d-bias-add.hh" #include "conv2d-input-grad.hh" #include "conv2d-kernel-grad.hh" #include "conv2d-bias-add-grad.hh" #include "conv2d-transpose.hh" #include "conv2d-transpose-input-grad.hh" #include "conv2d-transpose-kernel-grad.hh" #include "vect-relu.hh" #include "vect-relu-leaky.hh" #include "vect-tanh.hh" #include "reshape.hh" namespace ops { OpsBuilder& OpsBuilder::instance() { static OpsBuilder builder; return builder; } OpsBuilder::OpsBuilder() : graph_(Graph::instance()) {} Add* OpsBuilder::add(Op* left, Op* right) { if (left->shape_get() != right->shape_get()) throw std::runtime_error {"add: left and right must have the same shape"}; auto res = new Add(left, right); graph_.add(res); return res; } AdamUpdate* OpsBuilder::adam_update(Variable* var, Op* m, Op* v, dbl_t learning_rate, dbl_t beta1, dbl_t beta2, dbl_t eps) { if (var->shape_get() != m->shape_get()) throw std::runtime_error {"var and m must have the same shape"}; if (var->shape_get() != v->shape_get()) throw std::runtime_error {"var and v must have the same shape"}; auto res = new AdamUpdate(var, m, v, learning_rate, beta1, beta2, eps); graph_.add(res); return res; } ArgmaxAccuracy* OpsBuilder::argmax_accuracy(Op* y, Op* y_hat) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (y_hat->shape_get().ndims() != 2) throw std::runtime_error {"y_hat must be a matrix"}; if (y->shape_get() != y_hat->shape_get()) throw std::runtime_error {"y and y_hat must have the same shape"}; auto res = new ArgmaxAccuracy(y, y_hat); graph_.add(res); return res; } Conv2D* OpsBuilder::conv2d(Op* input, Op* kernel, const int* strides) { if (input->shape_get().ndims() != 4) throw std::runtime_error {"Conv2D:input must be a 4D tensor"}; if (kernel->shape_get().ndims() != 4) throw std::runtime_error {"Conv2D:kernel must be a 4D tensor"}; auto res = new Conv2D(input, kernel, strides); graph_.add(res); return res; } Conv2DBiasAdd* OpsBuilder::conv2d_bias_add(Op* z, Op* bias) { if (z->shape_get().ndims() != 4) throw std::runtime_error {"Conv2DBiasAdd:z must be a 4D tensor"}; if (bias->shape_get().ndims() != 1) throw std::runtime_error {"Conv2DBiasAdd:bias must be a 1D array"}; if (z->shape_get()[3] != bias->shape_get()[0]) throw std::runtime_error {"Conv2DBiasAdd:z and bias shape are not corresponding"}; auto res = new Conv2DBiasAdd(z, bias); graph_.add(res); return res; } Conv2DBiasAddGrad* OpsBuilder::conv2d_bias_add_grad(Op* z) { if (z->shape_get().ndims() != 4) throw std::runtime_error {"Conv2DBiasAddGrad:z must be a 4D tensor"}; auto res = new Conv2DBiasAddGrad(z); graph_.add(res); return res; } Conv2DInputGrad* OpsBuilder::conv2d_input_grad(Op* y, Op* kernel, const int* strides, const int* input_size) { auto res = new Conv2DInputGrad(y, kernel, strides, input_size); graph_.add(res); return res; } Conv2DKernelGrad* OpsBuilder::conv2d_kernel_grad(Op* y, Op* input, const int* strides, const int* kernel_size, const int* padded_size) { auto res = new Conv2DKernelGrad(y, input, strides, kernel_size, padded_size); graph_.add(res); return res; } Conv2DTranspose* OpsBuilder::conv2d_transpose(Op* input, Op* kernel, const int* out_size, const int* strides) { auto res = new Conv2DTranspose(input, kernel, out_size, strides); graph_.add(res); return res; } Conv2DTransposeInputGrad* OpsBuilder::conv2d_transpose_input_grad(Op* y, Op* kernel, const int* strides, const int* input_size) { auto res = new Conv2DTransposeInputGrad(y, kernel, strides, input_size); graph_.add(res); return res; } Conv2DTransposeKernelGrad* OpsBuilder::conv2d_transpose_kernel_grad(Op* y, Op* input, const int* strides, const int* kernel_size) { auto res = new Conv2DTransposeKernelGrad(y, input, strides, kernel_size); graph_.add(res); return res; } Input* OpsBuilder::input(const Shape& shape) { auto res = new Input(shape); graph_.add(res); return res; } LeakyReluGrad* OpsBuilder::leaky_relu_grad(Op* z, Op* dout, dbl_t alpha) { if (z->shape_get() != dout->shape_get()) throw std::runtime_error {"LeakyReluGrad: z and dout must have the same shape"}; auto res = new LeakyReluGrad(z, dout, alpha); graph_.add(res); return res; } LogSoftmax* OpsBuilder::log_softmax(Op* arg) { if (arg->shape_get().ndims() != 2) throw std::runtime_error{"log softmax input must be a matrix"}; auto res = new LogSoftmax(arg); graph_.add(res); return res; } MatMatMul* OpsBuilder::mat_mat_mul(Op* left, Op* right, bool left_tr, bool right_tr) { if (left->shape_get().ndims() != 2) throw std::runtime_error{"left operand must be a matrix"}; if (right->shape_get().ndims() != 2) throw std::runtime_error{"right operand must be a matrix"}; if (left->shape_get()[!left_tr] != right->shape_get()[right_tr]) throw std::runtime_error{"left[1] and right[0] differ"}; auto res = new MatMatMul(left, right, left_tr, right_tr); graph_.add(res); return res; } MatMulAdd* OpsBuilder::mat_mul_add(Op* x, Op* w, Op* b) { if (x->shape_get().ndims() != 2) throw std::runtime_error{"x must be a matrix"}; if (w->shape_get().ndims() != 2) throw std::runtime_error{"w must be a matrix"}; if (b->shape_get().ndims() != 1) throw std::runtime_error{"b must be a vector"}; if (x->shape_get()[1] != w->shape_get()[0]) throw std::runtime_error{"x[1] and w[0] differ"}; if (w->shape_get()[1] != b->shape_get()[0]) throw std::runtime_error{"w[1] and b[0] differ"}; auto res = new MatMulAdd(x, w, b); graph_.add(res); return res; } MatRvectAdd* OpsBuilder::mat_rvect_add(Op* left, Op* right) { if (left->shape_get().ndims() != 2) throw std::runtime_error{"left operand must be a matrix"}; if (right->shape_get().ndims() != 1) throw std::runtime_error{"right operand must be a vector"}; if (left->shape_get()[1] != right->shape_get()[0]) throw std::runtime_error{"left[1] and right[0] differ"}; auto res = new MatRvectAdd(left, right); graph_.add(res); return res; } MatSum* OpsBuilder::mat_sum(Op* arg, std::size_t axis) { if (arg->shape_get().ndims() != 2) throw std::runtime_error {"arg must be a matrix"}; if (axis >= 2) throw std::runtime_error {"axis must be 0 or 1"}; auto res = new MatSum(arg, axis); graph_.add(res); return res; } MomentUpdate* OpsBuilder::moment_update(Variable* var, Op* dt, dbl_t coeff1, dbl_t coeff2, bool sq_update) { if (var->shape_get() != dt->shape_get()) throw std::runtime_error {"var and dt must have the same shape"}; auto res = new MomentUpdate(var, dt, coeff1, coeff2, sq_update); graph_.add(res); return res; } MSE* OpsBuilder::mse(Op* y, Op* y_hat) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"MSE:y must be a matrix"}; if (y_hat->shape_get().ndims() != 2) throw std::runtime_error {"MSE:y_hat must be a matrix"}; if (y->shape_get() != y_hat->shape_get()) throw std::runtime_error {"MSE: y and y_hat must have the same shape"}; auto res = new MSE(y, y_hat); graph_.add(res); return res; } Reshape* OpsBuilder::reshape(Op* arg, const Shape& shape) { auto& arg_shape = arg->shape_get(); if (shape.defined() && shape.total() != arg_shape.total()) throw std::runtime_error {"Reshape:"}; // if (! shape.defined() && (arg_shape.total() % (- shape.total()) != 0)) // throw std::runtime_error {"Reshape:"}; // nb -1 = max 1 ?? has to be checked auto res = new Reshape(arg, shape); graph_.add(res); return res; } MSEGrad* OpsBuilder::mse_grad(Op* y, Op* y_hat) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"MSEGrad: y must be a matrix"}; if (y_hat->shape_get().ndims() != 2) throw std::runtime_error {"MSEGrad: y_hat must be a matrix"}; if (y->shape_get() != y_hat->shape_get()) throw std::runtime_error {"MSEGrad: y and y_hat must have the same shape"}; auto res = new MSEGrad(y, y_hat); graph_.add(res); return res; } ReluGrad* OpsBuilder::relu_grad(Op* z, Op* dout) { if (z->shape_get() != dout->shape_get()) throw std::runtime_error {"ReluGrad: z and dout must have the same shape"}; auto res = new ReluGrad(z, dout); graph_.add(res); return res; } Seq* OpsBuilder::seq(const std::vector<Op*>& ops) { if (ops.empty()) throw std::runtime_error {"seq: ops can't be empty"}; auto res = new Seq(ops); graph_.add(res); return res; } SigmoidCrossEntropy* OpsBuilder::sigmoid_cross_entropy(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SigmoidCrossEntropy(y, logits); graph_.add(res); return res; } SigmoidCrossEntropyGrad* OpsBuilder::sigmoid_cross_entropy_grad(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SigmoidCrossEntropyGrad(y, logits); graph_.add(res); return res; } SigmoidGrad* OpsBuilder::sigmoid_grad(Op* sig_out, Op* dout) { if (sig_out->shape_get() != dout->shape_get()) throw std::runtime_error {"SigmoidGrad: sig_out and dout must have the same shape"}; auto res = new SigmoidGrad(sig_out, dout); graph_.add(res); return res; } Softmax* OpsBuilder::softmax(Op* arg) { if (arg->shape_get().ndims() != 2) throw std::runtime_error{"softmax input must be a matrix"}; auto res = new Softmax(arg); graph_.add(res); return res; } SoftmaxCrossEntropy* OpsBuilder::softmax_cross_entropy(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SoftmaxCrossEntropy(y, logits); graph_.add(res); return res; } SoftmaxCrossEntropyGrad* OpsBuilder::softmax_cross_entropy_grad(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SoftmaxCrossEntropyGrad(y, logits); graph_.add(res); return res; } TanhGrad* OpsBuilder::tanh_grad(Op* tanh_out, Op* dout) { if (tanh_out->shape_get() != dout->shape_get()) throw std::runtime_error {"TanhGrad: tanh_out and dout must have the same shape"}; auto res = new TanhGrad(tanh_out, dout); graph_.add(res); return res; } Update* OpsBuilder::update(Variable* var, Op* dt, Op* coeff) { if (var->shape_get() != dt->shape_get()) throw std::runtime_error {"var and dt must have the same shape"}; if (coeff->shape_get().ndims()) throw std::runtime_error {"coeff must be a scalar"}; auto res = new Update(var, dt, coeff); graph_.add(res); return res; } Variable* OpsBuilder::variable(const Shape& shape, bool trainable) { if (!shape.defined()) throw std::runtime_error{"shape not fully defined"}; auto res = new Variable(shape, trainable); graph_.add_var(res); return res; } VectSigmoid* OpsBuilder::vect_sigmoid(Op* arg) { auto res = new VectSigmoid(arg); graph_.add(res); return res; } VectRelu* OpsBuilder::vect_relu(Op* arg) { auto res = new VectRelu(arg); graph_.add(res); return res; } VectReluLeaky* OpsBuilder::vect_relu_leaky(Op* arg, const dbl_t alpha) { auto res = new VectReluLeaky(arg, alpha); graph_.add(res); return res; } VectTanh* OpsBuilder::vect_tanh(Op* arg) { auto res = new VectTanh(arg); graph_.add(res); return res; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "ops-builder.hh" #include <stdexcept> #include "graph.hh" #include "add.hh" #include "adam-update.hh" #include "argmax-accuracy.hh" #include "input.hh" #include "leaky-relu-grad.hh" #include "log-softmax.hh" #include "mat-mat-mul.hh" #include "mat-mul-add.hh" #include "mat-rvect-add.hh" #include "mat-sum.hh" #include "moment-update.hh" #include "mse.hh" #include "mse-grad.hh" #include "relu-grad.hh" #include "seq.hh" #include "sigmoid-cross-entropy.hh" #include "sigmoid-cross-entropy-grad.hh" #include "sigmoid-grad.hh" #include "softmax.hh" #include "softmax-cross-entropy.hh" #include "softmax-cross-entropy-grad.hh" #include "tanh-grad.hh" #include "update.hh" #include "variable.hh" #include "vect-sigmoid.hh" #include "conv2d.hh" #include "conv2d-bias-add.hh" #include "conv2d-input-grad.hh" #include "conv2d-kernel-grad.hh" #include "conv2d-bias-add-grad.hh" #include "conv2d-transpose.hh" #include "conv2d-transpose-input-grad.hh" #include "conv2d-transpose-kernel-grad.hh" #include "vect-relu.hh" #include "vect-relu-leaky.hh" #include "vect-tanh.hh" #include "reshape.hh" namespace ops { OpsBuilder& OpsBuilder::instance() { static OpsBuilder builder; return builder; } OpsBuilder::OpsBuilder() : graph_(Graph::instance()) {} Add* OpsBuilder::add(Op* left, Op* right) { if (left->shape_get() != right->shape_get()) throw std::runtime_error {"add: left and right must have the same shape"}; auto res = new Add(left, right); graph_.add(res); return res; } AdamUpdate* OpsBuilder::adam_update(Variable* var, Op* m, Op* v, dbl_t learning_rate, dbl_t beta1, dbl_t beta2, dbl_t eps) { if (var->shape_get() != m->shape_get()) throw std::runtime_error {"var and m must have the same shape"}; if (var->shape_get() != v->shape_get()) throw std::runtime_error {"var and v must have the same shape"}; auto res = new AdamUpdate(var, m, v, learning_rate, beta1, beta2, eps); graph_.add(res); return res; } ArgmaxAccuracy* OpsBuilder::argmax_accuracy(Op* y, Op* y_hat) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (y_hat->shape_get().ndims() != 2) throw std::runtime_error {"y_hat must be a matrix"}; if (y->shape_get() != y_hat->shape_get()) throw std::runtime_error {"y and y_hat must have the same shape"}; auto res = new ArgmaxAccuracy(y, y_hat); graph_.add(res); return res; } Conv2D* OpsBuilder::conv2d(Op* input, Op* kernel, const int* strides) { if (input->shape_get().ndims() != 4) throw std::runtime_error {"Conv2D:input must be a 4D tensor"}; if (kernel->shape_get().ndims() != 4) throw std::runtime_error {"Conv2D:kernel must be a 4D tensor"}; auto res = new Conv2D(input, kernel, strides); graph_.add(res); return res; } Conv2DBiasAdd* OpsBuilder::conv2d_bias_add(Op* z, Op* bias) { if (z->shape_get().ndims() != 4) throw std::runtime_error {"Conv2DBiasAdd:z must be a 4D tensor"}; if (bias->shape_get().ndims() != 1) throw std::runtime_error {"Conv2DBiasAdd:bias must be a 1D array"}; if (z->shape_get()[3] != bias->shape_get()[0]) throw std::runtime_error {"Conv2DBiasAdd:z and bias shape are not corresponding"}; auto res = new Conv2DBiasAdd(z, bias); graph_.add(res); return res; } Conv2DBiasAddGrad* OpsBuilder::conv2d_bias_add_grad(Op* z) { if (z->shape_get().ndims() != 4) throw std::runtime_error {"Conv2DBiasAddGrad:z must be a 4D tensor"}; auto res = new Conv2DBiasAddGrad(z); graph_.add(res); return res; } Conv2DInputGrad* OpsBuilder::conv2d_input_grad(Op* y, Op* kernel, const int* strides, const int* input_size) { auto res = new Conv2DInputGrad(y, kernel, strides, input_size); graph_.add(res); return res; } Conv2DKernelGrad* OpsBuilder::conv2d_kernel_grad(Op* y, Op* input, const int* strides, const int* kernel_size, const int* padded_size) { auto res = new Conv2DKernelGrad(y, input, strides, kernel_size, padded_size); graph_.add(res); return res; } Conv2DTranspose* OpsBuilder::conv2d_transpose(Op* input, Op* kernel, const int* out_size, const int* strides) { auto res = new Conv2DTranspose(input, kernel, out_size, strides); graph_.add(res); return res; } Conv2DTransposeInputGrad* OpsBuilder::conv2d_transpose_input_grad(Op* y, Op* kernel, const int* strides, const int* input_size) { auto res = new Conv2DTransposeInputGrad(y, kernel, strides, input_size); graph_.add(res); return res; } Conv2DTransposeKernelGrad* OpsBuilder::conv2d_transpose_kernel_grad(Op* y, Op* input, const int* strides, const int* kernel_size) { auto res = new Conv2DTransposeKernelGrad(y, input, strides, kernel_size); graph_.add(res); return res; } Input* OpsBuilder::input(const Shape& shape) { auto res = new Input(shape); graph_.add(res); return res; } LeakyReluGrad* OpsBuilder::leaky_relu_grad(Op* z, Op* dout, dbl_t alpha) { if (z->shape_get() != dout->shape_get()) throw std::runtime_error {"LeakyReluGrad: z and dout must have the same shape"}; auto res = new LeakyReluGrad(z, dout, alpha); graph_.add(res); return res; } LogSoftmax* OpsBuilder::log_softmax(Op* arg) { if (arg->shape_get().ndims() != 2) throw std::runtime_error{"log softmax input must be a matrix"}; auto res = new LogSoftmax(arg); graph_.add(res); return res; } MatMatMul* OpsBuilder::mat_mat_mul(Op* left, Op* right, bool left_tr, bool right_tr) { if (left->shape_get().ndims() != 2) throw std::runtime_error{"left operand must be a matrix"}; if (right->shape_get().ndims() != 2) throw std::runtime_error{"right operand must be a matrix"}; if (left->shape_get()[!left_tr] != right->shape_get()[right_tr]) throw std::runtime_error{"left[1] and right[0] differ"}; auto res = new MatMatMul(left, right, left_tr, right_tr); graph_.add(res); return res; } MatMulAdd* OpsBuilder::mat_mul_add(Op* x, Op* w, Op* b) { if (x->shape_get().ndims() != 2) throw std::runtime_error{"x must be a matrix"}; if (w->shape_get().ndims() != 2) throw std::runtime_error{"w must be a matrix"}; if (b->shape_get().ndims() != 1) throw std::runtime_error{"b must be a vector"}; if (x->shape_get()[1] != w->shape_get()[0]) throw std::runtime_error{"x[1] and w[0] differ"}; if (w->shape_get()[1] != b->shape_get()[0]) throw std::runtime_error{"w[1] and b[0] differ"}; auto res = new MatMulAdd(x, w, b); graph_.add(res); return res; } MatRvectAdd* OpsBuilder::mat_rvect_add(Op* left, Op* right) { if (left->shape_get().ndims() != 2) throw std::runtime_error{"left operand must be a matrix"}; if (right->shape_get().ndims() != 1) throw std::runtime_error{"right operand must be a vector"}; if (left->shape_get()[1] != right->shape_get()[0]) throw std::runtime_error{"left[1] and right[0] differ"}; auto res = new MatRvectAdd(left, right); graph_.add(res); return res; } MatSum* OpsBuilder::mat_sum(Op* arg, std::size_t axis) { if (arg->shape_get().ndims() != 2) throw std::runtime_error {"arg must be a matrix"}; if (axis >= 2) throw std::runtime_error {"axis must be 0 or 1"}; auto res = new MatSum(arg, axis); graph_.add(res); return res; } MomentUpdate* OpsBuilder::moment_update(Variable* var, Op* dt, dbl_t coeff1, dbl_t coeff2, bool sq_update) { if (var->shape_get() != dt->shape_get()) throw std::runtime_error {"var and dt must have the same shape"}; auto res = new MomentUpdate(var, dt, coeff1, coeff2, sq_update); graph_.add(res); return res; } MSE* OpsBuilder::mse(Op* y, Op* y_hat) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"MSE:y must be a matrix"}; if (y_hat->shape_get().ndims() != 2) throw std::runtime_error {"MSE:y_hat must be a matrix"}; if (y->shape_get() != y_hat->shape_get()) throw std::runtime_error {"MSE: y and y_hat must have the same shape"}; auto res = new MSE(y, y_hat); graph_.add(res); return res; } Reshape* OpsBuilder::reshape(Op* arg, const Shape& shape) { auto& arg_shape = arg->shape_get(); if (shape.defined() && shape.total() != arg_shape.total()) throw std::runtime_error {"Reshape:"}; // if (! shape.defined() && (arg_shape.total() % (- shape.total()) != 0)) // throw std::runtime_error {"Reshape:"}; // nb -1 = max 1 ?? has to be checked auto res = new Reshape(arg, shape); graph_.add(res); return res; } MSEGrad* OpsBuilder::mse_grad(Op* y, Op* y_hat) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"MSEGrad: y must be a matrix"}; if (y_hat->shape_get().ndims() != 2) throw std::runtime_error {"MSEGrad: y_hat must be a matrix"}; if (y->shape_get() != y_hat->shape_get()) throw std::runtime_error {"MSEGrad: y and y_hat must have the same shape"}; auto res = new MSEGrad(y, y_hat); graph_.add(res); return res; } ReluGrad* OpsBuilder::relu_grad(Op* z, Op* dout) { if (z->shape_get() != dout->shape_get()) throw std::runtime_error {"ReluGrad: z and dout must have the same shape"}; auto res = new ReluGrad(z, dout); graph_.add(res); return res; } Seq* OpsBuilder::seq(const std::vector<Op*>& ops) { if (ops.empty()) throw std::runtime_error {"seq: ops can't be empty"}; auto res = new Seq(ops); graph_.add(res); return res; } SigmoidCrossEntropy* OpsBuilder::sigmoid_cross_entropy(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SigmoidCrossEntropy(y, logits); graph_.add(res); return res; } SigmoidCrossEntropyGrad* OpsBuilder::sigmoid_cross_entropy_grad(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SigmoidCrossEntropyGrad(y, logits); graph_.add(res); return res; } SigmoidGrad* OpsBuilder::sigmoid_grad(Op* sig_out, Op* dout) { if (sig_out->shape_get() != dout->shape_get()) throw std::runtime_error {"SigmoidGrad: sig_out and dout must have the same shape"}; auto res = new SigmoidGrad(sig_out, dout); graph_.add(res); return res; } Softmax* OpsBuilder::softmax(Op* arg) { if (arg->shape_get().ndims() != 2) throw std::runtime_error{"softmax input must be a matrix"}; auto res = new Softmax(arg); graph_.add(res); return res; } SoftmaxCrossEntropy* OpsBuilder::softmax_cross_entropy(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SoftmaxCrossEntropy(y, logits); graph_.add(res); return res; } SoftmaxCrossEntropyGrad* OpsBuilder::softmax_cross_entropy_grad(Op* y, Op* logits) { if (y->shape_get().ndims() != 2) throw std::runtime_error {"y must be a matrix"}; if (logits->shape_get().ndims() != 2) throw std::runtime_error {"logits must be a matrix"}; if (y->shape_get() != logits->shape_get()) throw std::runtime_error {"y and logits must have the same shape"}; auto res = new SoftmaxCrossEntropyGrad(y, logits); graph_.add(res); return res; } TanhGrad* OpsBuilder::tanh_grad(Op* tanh_out, Op* dout) { if (tanh_out->shape_get() != dout->shape_get()) throw std::runtime_error {"TanhGrad: tanh_out and dout must have the same shape"}; auto res = new TanhGrad(tanh_out, dout); graph_.add(res); return res; } Update* OpsBuilder::update(Variable* var, Op* dt, Op* coeff) { if (var->shape_get() != dt->shape_get()) throw std::runtime_error {"var and dt must have the same shape"}; if (coeff->shape_get().ndims()) throw std::runtime_error {"coeff must be a scalar"}; auto res = new Update(var, dt, coeff); graph_.add(res); return res; } Variable* OpsBuilder::variable(const Shape& shape, bool trainable) { if (!shape.defined()) throw std::runtime_error{"shape not fully defined"}; auto res = new Variable(shape, trainable); graph_.add_var(res); return res; } VectSigmoid* OpsBuilder::vect_sigmoid(Op* arg) { auto res = new VectSigmoid(arg); graph_.add(res); return res; } VectRelu* OpsBuilder::vect_relu(Op* arg) { auto res = new VectRelu(arg); graph_.add(res); return res; } VectReluLeaky* OpsBuilder::vect_relu_leaky(Op* arg, const dbl_t alpha) { auto res = new VectReluLeaky(arg, alpha); graph_.add(res); return res; } VectTanh* OpsBuilder::vect_tanh(Op* arg) { auto res = new VectTanh(arg); graph_.add(res); return res; } }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <cuda.h> //You can change the dimension, program will produce two matrices. #define M 600 #define N 800 #define CUDA_CALL(x) {if((x) != cudaSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \ exit(EXIT_FAILURE);}} __global__ void matrixAdd(int d_x[][N], int d_y[][N], int d_z[][N]) { int idx = threadIdx.x; int idy = threadIdx.y; if (idx < M && idy < N) { d_z[idx][idy] = d_x[idx][idy] + d_y[idx][idy]; } } int main() { int size = (M * N) * sizeof(int); int h_x[M][N], h_y[M][N], h_z[M][N]; int(*d_x)[N], (*d_y)[N], (*d_z)[N]; int i = 0; int j = 0; //Initialize matrix for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_x[i][j] = M; h_y[i][j] = N; h_z[i][j] = 0; } } cudaEvent_t startC, stopC; float elapsed_time_msC; cudaEventCreate( &startC ); cudaEventCreate( &stopC ); cudaEventRecord( startC, 0 ); for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_z[i][j] =h_x[i][j] + h_y[i][j] ; } } cudaEventRecord( stopC, 0 ); cudaEventSynchronize( stopC ); cudaEventElapsedTime( &elapsed_time_msC, startC, stopC ); printf("Time to calculate results(CPU Time): %f ms.\n", elapsed_time_msC); CUDA_CALL(cudaMalloc(&d_x, size)); CUDA_CALL(cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&d_y, size)); CUDA_CALL(cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&d_z, size)); dim3 dimGrid(1, 1); dim3 dimBlock(M, N); cudaEvent_t start, stop; float elapsed_time_ms; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); matrixAdd <<< dimGrid, dimBlock >>> (d_x, d_y, d_z); CUDA_CALL(cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost)); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsed_time_ms, start, stop ); printf("Time to calculate results(GPU Time): %f ms.\n", elapsed_time_ms); cudaFree(d_x); cudaFree(d_y); cudaFree(d_z); printf("Output of Summation\n"); // for (i = 0; i<M; i++) { // for (j = 0; j<N; j++) { // printf("%d\t", h_z[i][j]); // } // printf("\n"); // } printf("\n"); }
code for sm_80 Function : _Z9matrixAddPA800_iS0_S0_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R9, SR_TID.Y ; /* 0x0000000000097919 */ /* 0x000e280000002200 */ /*0020*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */ /* 0x000e620000002100 */ /*0030*/ ISETP.GT.AND P0, PT, R9, 0x31f, PT ; /* 0x0000031f0900780c */ /* 0x001fc80003f04270 */ /*0040*/ ISETP.GT.OR P0, PT, R6, 0x257, P0 ; /* 0x000002570600780c */ /* 0x002fda0000704670 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 0.000274658203125 ; /* 0x00000c80ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fc800078e0207 */ /*0090*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x000fc800078e0207 */ /*00a0*/ IMAD.WIDE R2, R9, 0x4, R2 ; /* 0x0000000409027825 */ /* 0x000fc800078e0202 */ /*00b0*/ IMAD.WIDE R4, R9, 0x4, R4 ; /* 0x0000000409047825 */ /* 0x000fe400078e0204 */ /*00c0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea8000c1e1900 */ /*00d0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*00e0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fcc00078e0207 */ /*00f0*/ IMAD.WIDE R6, R9, 0x4, R6 ; /* 0x0000000409067825 */ /* 0x000fe200078e0206 */ /*0100*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */ /* 0x004fca0007ffe0ff */ /*0110*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*0120*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0130*/ BRA 0x130; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <cuda.h> //You can change the dimension, program will produce two matrices. #define M 600 #define N 800 #define CUDA_CALL(x) {if((x) != cudaSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \ exit(EXIT_FAILURE);}} __global__ void matrixAdd(int d_x[][N], int d_y[][N], int d_z[][N]) { int idx = threadIdx.x; int idy = threadIdx.y; if (idx < M && idy < N) { d_z[idx][idy] = d_x[idx][idy] + d_y[idx][idy]; } } int main() { int size = (M * N) * sizeof(int); int h_x[M][N], h_y[M][N], h_z[M][N]; int(*d_x)[N], (*d_y)[N], (*d_z)[N]; int i = 0; int j = 0; //Initialize matrix for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_x[i][j] = M; h_y[i][j] = N; h_z[i][j] = 0; } } cudaEvent_t startC, stopC; float elapsed_time_msC; cudaEventCreate( &startC ); cudaEventCreate( &stopC ); cudaEventRecord( startC, 0 ); for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_z[i][j] =h_x[i][j] + h_y[i][j] ; } } cudaEventRecord( stopC, 0 ); cudaEventSynchronize( stopC ); cudaEventElapsedTime( &elapsed_time_msC, startC, stopC ); printf("Time to calculate results(CPU Time): %f ms.\n", elapsed_time_msC); CUDA_CALL(cudaMalloc(&d_x, size)); CUDA_CALL(cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&d_y, size)); CUDA_CALL(cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&d_z, size)); dim3 dimGrid(1, 1); dim3 dimBlock(M, N); cudaEvent_t start, stop; float elapsed_time_ms; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); matrixAdd <<< dimGrid, dimBlock >>> (d_x, d_y, d_z); CUDA_CALL(cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost)); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsed_time_ms, start, stop ); printf("Time to calculate results(GPU Time): %f ms.\n", elapsed_time_ms); cudaFree(d_x); cudaFree(d_y); cudaFree(d_z); printf("Output of Summation\n"); // for (i = 0; i<M; i++) { // for (j = 0; j<N; j++) { // printf("%d\t", h_z[i][j]); // } // printf("\n"); // } printf("\n"); }
.file "tmpxft_00010e9b_00000000-6_matrix_sum.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_ .type _Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_, @function _Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_: .LFB2082: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9matrixAddPA800_iS0_S0_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_, .-_Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_ .globl _Z9matrixAddPA800_iS0_S0_ .type _Z9matrixAddPA800_iS0_S0_, @function _Z9matrixAddPA800_iS0_S0_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z9matrixAddPA800_iS0_S0_, .-_Z9matrixAddPA800_iS0_S0_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "Time to calculate results(CPU Time): %f ms.\n" .align 8 .LC1: .string "/home/ubuntu/Datasets/stackv2/train-structured/blgnksy/CudaLabExercises/master/LabAssign1/matrix_sum.cu" .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "CUDA error at %s:%d\n" .LC3: .string " %s\n" .section .rodata.str1.8 .align 8 .LC4: .string "Time to calculate results(GPU Time): %f ms.\n" .section .rodata.str1.1 .LC5: .string "Output of Summation\n" .LC6: .string "\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 leaq -5758976(%rsp), %r11 .cfi_def_cfa 11, 5759016 .LPSRL0: subq $4096, %rsp orq $0, (%rsp) cmpq %r11, %rsp jne .LPSRL0 .cfi_def_cfa_register 7 subq $1144, %rsp .cfi_def_cfa_offset 5760160 movq %fs:40, %rax movq %rax, 5760104(%rsp) xorl %eax, %eax leaq 96(%rsp), %rbx leaq 1920096(%rsp), %rbp leaq 3840096(%rsp), %r12 movq %rbp, %r13 movq %r12, %rsi movq %rbp, %rcx movq %rbx, %rdx .L12: movl $0, %eax .L13: movl $600, (%rdx,%rax) movl $800, (%rcx,%rax) movl $0, (%rsi,%rax) addq $4, %rax cmpq $3200, %rax jne .L13 addq $3200, %rdx addq $3200, %rcx addq $3200, %rsi cmpq %r13, %rdx jne .L12 leaq 40(%rsp), %rdi call cudaEventCreate@PLT leaq 48(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 40(%rsp), %rdi call cudaEventRecord@PLT .L15: movl $0, %eax .L16: movl 0(%rbp,%rax), %edx addl (%rbx,%rax), %edx movl %edx, (%r12,%rax) addq $4, %rax cmpq $3200, %rax jne .L16 addq $3200, %rbx addq $3200, %rbp addq $3200, %r12 cmpq %r13, %rbx jne .L15 movl $0, %esi movq 48(%rsp), %rdi call cudaEventRecord@PLT movq 48(%rsp), %rdi call cudaEventSynchronize@PLT leaq 8(%rsp), %rdi movq 48(%rsp), %rdx movq 40(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 8(%rsp), %xmm0 leaq .LC0(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT leaq 16(%rsp), %rdi movl $1920000, %esi call cudaMalloc@PLT testl %eax, %eax jne .L29 leaq 96(%rsp), %rsi movl $1, %ecx movl $1920000, %edx movq 16(%rsp), %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L30 leaq 24(%rsp), %rdi movl $1920000, %esi call cudaMalloc@PLT testl %eax, %eax jne .L31 leaq 1920096(%rsp), %rsi movl $1, %ecx movl $1920000, %edx movq 24(%rsp), %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L32 leaq 32(%rsp), %rdi movl $1920000, %esi call cudaMalloc@PLT testl %eax, %eax jne .L33 movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $600, 84(%rsp) movl $800, 88(%rsp) movl $1, 92(%rsp) leaq 56(%rsp), %rdi call cudaEventCreate@PLT leaq 64(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT movl 92(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 84(%rsp), %rdx movq 72(%rsp), %rdi movl 80(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L34 .L23: leaq 3840096(%rsp), %rdi movl $2, %ecx movl $1920000, %edx movq 32(%rsp), %rsi call cudaMemcpy@PLT testl %eax, %eax jne .L35 movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT movq 64(%rsp), %rdi call cudaEventSynchronize@PLT leaq 12(%rsp), %rdi movq 64(%rsp), %rdx movq 56(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 12(%rsp), %xmm0 leaq .LC4(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 5760104(%rsp), %rax subq %fs:40, %rax jne .L36 movl $0, %eax addq $5760120, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state movl $57, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L30: movl $58, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L31: movl $60, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L32: movl $61, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L33: movl $63, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L34: movq 32(%rsp), %rdx movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_ jmp .L23 .L35: movl $76, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L36: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC7: .string "_Z9matrixAddPA800_iS0_S0_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z9matrixAddPA800_iS0_S0_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <cuda.h> //You can change the dimension, program will produce two matrices. #define M 600 #define N 800 #define CUDA_CALL(x) {if((x) != cudaSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \ exit(EXIT_FAILURE);}} __global__ void matrixAdd(int d_x[][N], int d_y[][N], int d_z[][N]) { int idx = threadIdx.x; int idy = threadIdx.y; if (idx < M && idy < N) { d_z[idx][idy] = d_x[idx][idy] + d_y[idx][idy]; } } int main() { int size = (M * N) * sizeof(int); int h_x[M][N], h_y[M][N], h_z[M][N]; int(*d_x)[N], (*d_y)[N], (*d_z)[N]; int i = 0; int j = 0; //Initialize matrix for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_x[i][j] = M; h_y[i][j] = N; h_z[i][j] = 0; } } cudaEvent_t startC, stopC; float elapsed_time_msC; cudaEventCreate( &startC ); cudaEventCreate( &stopC ); cudaEventRecord( startC, 0 ); for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_z[i][j] =h_x[i][j] + h_y[i][j] ; } } cudaEventRecord( stopC, 0 ); cudaEventSynchronize( stopC ); cudaEventElapsedTime( &elapsed_time_msC, startC, stopC ); printf("Time to calculate results(CPU Time): %f ms.\n", elapsed_time_msC); CUDA_CALL(cudaMalloc(&d_x, size)); CUDA_CALL(cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&d_y, size)); CUDA_CALL(cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&d_z, size)); dim3 dimGrid(1, 1); dim3 dimBlock(M, N); cudaEvent_t start, stop; float elapsed_time_ms; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); matrixAdd <<< dimGrid, dimBlock >>> (d_x, d_y, d_z); CUDA_CALL(cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost)); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsed_time_ms, start, stop ); printf("Time to calculate results(GPU Time): %f ms.\n", elapsed_time_ms); cudaFree(d_x); cudaFree(d_y); cudaFree(d_z); printf("Output of Summation\n"); // for (i = 0; i<M; i++) { // for (j = 0; j<N; j++) { // printf("%d\t", h_z[i][j]); // } // printf("\n"); // } printf("\n"); }
#include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> //You can change the dimension, program will produce two matrices. #define M 600 #define N 800 #define CUDA_CALL(x) {if((x) != hipSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", hipGetErrorString(hipGetLastError())); \ exit(EXIT_FAILURE);}} __global__ void matrixAdd(int d_x[][N], int d_y[][N], int d_z[][N]) { int idx = threadIdx.x; int idy = threadIdx.y; if (idx < M && idy < N) { d_z[idx][idy] = d_x[idx][idy] + d_y[idx][idy]; } } int main() { int size = (M * N) * sizeof(int); int h_x[M][N], h_y[M][N], h_z[M][N]; int(*d_x)[N], (*d_y)[N], (*d_z)[N]; int i = 0; int j = 0; //Initialize matrix for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_x[i][j] = M; h_y[i][j] = N; h_z[i][j] = 0; } } hipEvent_t startC, stopC; float elapsed_time_msC; hipEventCreate( &startC ); hipEventCreate( &stopC ); hipEventRecord( startC, 0 ); for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_z[i][j] =h_x[i][j] + h_y[i][j] ; } } hipEventRecord( stopC, 0 ); hipEventSynchronize( stopC ); hipEventElapsedTime( &elapsed_time_msC, startC, stopC ); printf("Time to calculate results(CPU Time): %f ms.\n", elapsed_time_msC); CUDA_CALL(hipMalloc(&d_x, size)); CUDA_CALL(hipMemcpy(d_x, h_x, size, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&d_y, size)); CUDA_CALL(hipMemcpy(d_y, h_y, size, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&d_z, size)); dim3 dimGrid(1, 1); dim3 dimBlock(M, N); hipEvent_t start, stop; float elapsed_time_ms; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start, 0 ); matrixAdd <<< dimGrid, dimBlock >>> (d_x, d_y, d_z); CUDA_CALL(hipMemcpy(h_z, d_z, size, hipMemcpyDeviceToHost)); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &elapsed_time_ms, start, stop ); printf("Time to calculate results(GPU Time): %f ms.\n", elapsed_time_ms); hipFree(d_x); hipFree(d_y); hipFree(d_z); printf("Output of Summation\n"); // for (i = 0; i<M; i++) { // for (j = 0; j<N; j++) { // printf("%d\t", h_z[i][j]); // } // printf("\n"); // } printf("\n"); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> //You can change the dimension, program will produce two matrices. #define M 600 #define N 800 #define CUDA_CALL(x) {if((x) != hipSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", hipGetErrorString(hipGetLastError())); \ exit(EXIT_FAILURE);}} __global__ void matrixAdd(int d_x[][N], int d_y[][N], int d_z[][N]) { int idx = threadIdx.x; int idy = threadIdx.y; if (idx < M && idy < N) { d_z[idx][idy] = d_x[idx][idy] + d_y[idx][idy]; } } int main() { int size = (M * N) * sizeof(int); int h_x[M][N], h_y[M][N], h_z[M][N]; int(*d_x)[N], (*d_y)[N], (*d_z)[N]; int i = 0; int j = 0; //Initialize matrix for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_x[i][j] = M; h_y[i][j] = N; h_z[i][j] = 0; } } hipEvent_t startC, stopC; float elapsed_time_msC; hipEventCreate( &startC ); hipEventCreate( &stopC ); hipEventRecord( startC, 0 ); for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_z[i][j] =h_x[i][j] + h_y[i][j] ; } } hipEventRecord( stopC, 0 ); hipEventSynchronize( stopC ); hipEventElapsedTime( &elapsed_time_msC, startC, stopC ); printf("Time to calculate results(CPU Time): %f ms.\n", elapsed_time_msC); CUDA_CALL(hipMalloc(&d_x, size)); CUDA_CALL(hipMemcpy(d_x, h_x, size, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&d_y, size)); CUDA_CALL(hipMemcpy(d_y, h_y, size, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&d_z, size)); dim3 dimGrid(1, 1); dim3 dimBlock(M, N); hipEvent_t start, stop; float elapsed_time_ms; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start, 0 ); matrixAdd <<< dimGrid, dimBlock >>> (d_x, d_y, d_z); CUDA_CALL(hipMemcpy(h_z, d_z, size, hipMemcpyDeviceToHost)); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &elapsed_time_ms, start, stop ); printf("Time to calculate results(GPU Time): %f ms.\n", elapsed_time_ms); hipFree(d_x); hipFree(d_y); hipFree(d_z); printf("Output of Summation\n"); // for (i = 0; i<M; i++) { // for (j = 0; j<N; j++) { // printf("%d\t", h_z[i][j]); // } // printf("\n"); // } printf("\n"); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9matrixAddPA800_iS0_S0_ .globl _Z9matrixAddPA800_iS0_S0_ .p2align 8 .type _Z9matrixAddPA800_iS0_S0_,@function _Z9matrixAddPA800_iS0_S0_: v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_u32_e32 vcc_lo, 0x258, v1 v_cmp_gt_u32_e64 s2, 0x320, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x0 v_mul_u32_u24_e32 v4, 0xc80, v1 v_mul_hi_u32_u24_e32 v5, 0xc80, v1 v_lshlrev_b32_e32 v6, 2, v0 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s4, v4 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v5, vcc_lo v_add_co_u32 v2, vcc_lo, s6, v4 v_add_co_ci_u32_e32 v3, vcc_lo, s7, v5, vcc_lo s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v0, vcc_lo, v0, v6 v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v2, vcc_lo, v2, v6 v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo global_load_b32 v0, v[0:1], off global_load_b32 v1, v[2:3], off v_add_co_u32 v2, vcc_lo, s0, v4 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v5, vcc_lo s_waitcnt vmcnt(0) v_add_nc_u32_e32 v4, v1, v0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v0, vcc_lo, v2, v6 v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo global_store_b32 v[0:1], v4, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9matrixAddPA800_iS0_S0_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 8 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9matrixAddPA800_iS0_S0_, .Lfunc_end0-_Z9matrixAddPA800_iS0_S0_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9matrixAddPA800_iS0_S0_ .private_segment_fixed_size: 0 .sgpr_count: 10 .sgpr_spill_count: 0 .symbol: _Z9matrixAddPA800_iS0_S0_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> //You can change the dimension, program will produce two matrices. #define M 600 #define N 800 #define CUDA_CALL(x) {if((x) != hipSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", hipGetErrorString(hipGetLastError())); \ exit(EXIT_FAILURE);}} __global__ void matrixAdd(int d_x[][N], int d_y[][N], int d_z[][N]) { int idx = threadIdx.x; int idy = threadIdx.y; if (idx < M && idy < N) { d_z[idx][idy] = d_x[idx][idy] + d_y[idx][idy]; } } int main() { int size = (M * N) * sizeof(int); int h_x[M][N], h_y[M][N], h_z[M][N]; int(*d_x)[N], (*d_y)[N], (*d_z)[N]; int i = 0; int j = 0; //Initialize matrix for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_x[i][j] = M; h_y[i][j] = N; h_z[i][j] = 0; } } hipEvent_t startC, stopC; float elapsed_time_msC; hipEventCreate( &startC ); hipEventCreate( &stopC ); hipEventRecord( startC, 0 ); for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_z[i][j] =h_x[i][j] + h_y[i][j] ; } } hipEventRecord( stopC, 0 ); hipEventSynchronize( stopC ); hipEventElapsedTime( &elapsed_time_msC, startC, stopC ); printf("Time to calculate results(CPU Time): %f ms.\n", elapsed_time_msC); CUDA_CALL(hipMalloc(&d_x, size)); CUDA_CALL(hipMemcpy(d_x, h_x, size, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&d_y, size)); CUDA_CALL(hipMemcpy(d_y, h_y, size, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&d_z, size)); dim3 dimGrid(1, 1); dim3 dimBlock(M, N); hipEvent_t start, stop; float elapsed_time_ms; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start, 0 ); matrixAdd <<< dimGrid, dimBlock >>> (d_x, d_y, d_z); CUDA_CALL(hipMemcpy(h_z, d_z, size, hipMemcpyDeviceToHost)); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &elapsed_time_ms, start, stop ); printf("Time to calculate results(GPU Time): %f ms.\n", elapsed_time_ms); hipFree(d_x); hipFree(d_y); hipFree(d_z); printf("Output of Summation\n"); // for (i = 0; i<M; i++) { // for (j = 0; j<N; j++) { // printf("%d\t", h_z[i][j]); // } // printf("\n"); // } printf("\n"); }
.text .file "matrix_sum.hip" .globl _Z24__device_stub__matrixAddPA800_iS0_S0_ # -- Begin function _Z24__device_stub__matrixAddPA800_iS0_S0_ .p2align 4, 0x90 .type _Z24__device_stub__matrixAddPA800_iS0_S0_,@function _Z24__device_stub__matrixAddPA800_iS0_S0_: # @_Z24__device_stub__matrixAddPA800_iS0_S0_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9matrixAddPA800_iS0_S0_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z24__device_stub__matrixAddPA800_iS0_S0_, .Lfunc_end0-_Z24__device_stub__matrixAddPA800_iS0_S0_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $5760168, %rsp # imm = 0x57E4A8 .cfi_def_cfa_offset 5760192 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 leaq 3840160(%rsp), %rbx xorl %r14d, %r14d movl $1920000, %edx # imm = 0x1D4C00 movq %rbx, %rdi xorl %esi, %esi callq memset@PLT leaq 1920160(%rsp), %rax leaq 160(%rsp), %rcx .p2align 4, 0x90 .LBB1_1: # %.preheader43 # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB1_2: # Parent Loop BB1_1 Depth=1 # => This Inner Loop Header: Depth=2 movl $600, (%rax,%rdx,4) # imm = 0x258 movl $800, (%rcx,%rdx,4) # imm = 0x320 incq %rdx cmpq $800, %rdx # imm = 0x320 jne .LBB1_2 # %bb.3: # in Loop: Header=BB1_1 Depth=1 incq %r14 addq $3200, %rax # imm = 0xC80 addq $3200, %rcx # imm = 0xC80 cmpq $600, %r14 # imm = 0x258 jne .LBB1_1 # %bb.4: leaq 56(%rsp), %rdi callq hipEventCreate leaq 8(%rsp), %rdi callq hipEventCreate movq 56(%rsp), %rdi xorl %r14d, %r14d xorl %esi, %esi callq hipEventRecord leaq 1920160(%rsp), %rax leaq 160(%rsp), %rcx .p2align 4, 0x90 .LBB1_5: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_6 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB1_6: # Parent Loop BB1_5 Depth=1 # => This Inner Loop Header: Depth=2 movl (%rcx,%rdx,4), %esi addl (%rax,%rdx,4), %esi movl %esi, (%rbx,%rdx,4) incq %rdx cmpq $800, %rdx # imm = 0x320 jne .LBB1_6 # %bb.7: # in Loop: Header=BB1_5 Depth=1 incq %r14 addq $3200, %rax # imm = 0xC80 addq $3200, %rcx # imm = 0xC80 addq $3200, %rbx # imm = 0xC80 cmpq $600, %r14 # imm = 0x258 jne .LBB1_5 # %bb.8: movq 8(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 8(%rsp), %rdi callq hipEventSynchronize movq 56(%rsp), %rsi movq 8(%rsp), %rdx leaq 44(%rsp), %rdi callq hipEventElapsedTime movss 44(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf leaq 32(%rsp), %rdi movl $1920000, %esi # imm = 0x1D4C00 callq hipMalloc testl %eax, %eax jne .LBB1_9 # %bb.11: movq 32(%rsp), %rdi leaq 1920160(%rsp), %rsi movl $1920000, %edx # imm = 0x1D4C00 movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB1_12 # %bb.13: leaq 24(%rsp), %rdi movl $1920000, %esi # imm = 0x1D4C00 callq hipMalloc testl %eax, %eax jne .LBB1_14 # %bb.15: movq 24(%rsp), %rdi leaq 160(%rsp), %rsi movl $1920000, %edx # imm = 0x1D4C00 movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB1_16 # %bb.17: leaq 16(%rsp), %rdi movl $1920000, %esi # imm = 0x1D4C00 callq hipMalloc testl %eax, %eax jne .LBB1_18 # %bb.19: leaq 48(%rsp), %rdi callq hipEventCreate movq %rsp, %rdi callq hipEventCreate movq 48(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movabsq $4294967297, %rdi # imm = 0x100000001 movabsq $3435973837400, %rdx # imm = 0x32000000258 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_21 # %bb.20: movq 32(%rsp), %rax movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 152(%rsp) movq %rcx, 144(%rsp) movq %rdx, 136(%rsp) leaq 152(%rsp), %rax movq %rax, 64(%rsp) leaq 144(%rsp), %rax movq %rax, 72(%rsp) leaq 136(%rsp), %rax movq %rax, 80(%rsp) leaq 120(%rsp), %rdi leaq 104(%rsp), %rsi leaq 96(%rsp), %rdx leaq 88(%rsp), %rcx callq __hipPopCallConfiguration movq 120(%rsp), %rsi movl 128(%rsp), %edx movq 104(%rsp), %rcx movl 112(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z9matrixAddPA800_iS0_S0_, %edi pushq 88(%rsp) .cfi_adjust_cfa_offset 8 pushq 104(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_21: movq 16(%rsp), %rsi leaq 3840160(%rsp), %rdi movl $1920000, %edx # imm = 0x1D4C00 movl $2, %ecx callq hipMemcpy testl %eax, %eax jne .LBB1_22 # %bb.23: movq (%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq (%rsp), %rdi callq hipEventSynchronize movq 48(%rsp), %rsi movq (%rsp), %rdx leaq 64(%rsp), %rdi callq hipEventElapsedTime movss 64(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.4, %edi movb $1, %al callq printf movq 32(%rsp), %rdi callq hipFree movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movl $.Lstr, %edi callq puts@PLT movl $10, %edi callq putchar@PLT xorl %eax, %eax addq $5760168, %rsp # imm = 0x57E4A8 .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .LBB1_9: .cfi_def_cfa_offset 5760192 movl $.L.str.1, %edi movl $.L.str.2, %esi movl $57, %edx jmp .LBB1_10 .LBB1_12: movl $.L.str.1, %edi movl $.L.str.2, %esi movl $58, %edx jmp .LBB1_10 .LBB1_14: movl $.L.str.1, %edi movl $.L.str.2, %esi movl $60, %edx jmp .LBB1_10 .LBB1_16: movl $.L.str.1, %edi movl $.L.str.2, %esi movl $61, %edx jmp .LBB1_10 .LBB1_18: movl $.L.str.1, %edi movl $.L.str.2, %esi movl $63, %edx jmp .LBB1_10 .LBB1_22: movl $.L.str.1, %edi movl $.L.str.2, %esi movl $76, %edx .LBB1_10: xorl %eax, %eax callq printf callq hipGetLastError movl %eax, %edi callq hipGetErrorString movl $.L.str.3, %edi movq %rax, %rsi xorl %eax, %eax callq printf movl $1, %edi callq exit .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9matrixAddPA800_iS0_S0_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z9matrixAddPA800_iS0_S0_,@object # @_Z9matrixAddPA800_iS0_S0_ .section .rodata,"a",@progbits .globl _Z9matrixAddPA800_iS0_S0_ .p2align 3, 0x0 _Z9matrixAddPA800_iS0_S0_: .quad _Z24__device_stub__matrixAddPA800_iS0_S0_ .size _Z9matrixAddPA800_iS0_S0_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Time to calculate results(CPU Time): %f ms.\n" .size .L.str, 45 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "CUDA error at %s:%d\n" .size .L.str.1, 21 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/blgnksy/CudaLabExercises/master/LabAssign1/matrix_sum.hip" .size .L.str.2, 115 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz " %s\n" .size .L.str.3, 6 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "Time to calculate results(GPU Time): %f ms.\n" .size .L.str.4, 45 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9matrixAddPA800_iS0_S0_" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Output of Summation" .size .Lstr, 20 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__matrixAddPA800_iS0_S0_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9matrixAddPA800_iS0_S0_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z9matrixAddPA800_iS0_S0_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R9, SR_TID.Y ; /* 0x0000000000097919 */ /* 0x000e280000002200 */ /*0020*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */ /* 0x000e620000002100 */ /*0030*/ ISETP.GT.AND P0, PT, R9, 0x31f, PT ; /* 0x0000031f0900780c */ /* 0x001fc80003f04270 */ /*0040*/ ISETP.GT.OR P0, PT, R6, 0x257, P0 ; /* 0x000002570600780c */ /* 0x002fda0000704670 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 0.000274658203125 ; /* 0x00000c80ff077435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fc800078e0207 */ /*0090*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x000fc800078e0207 */ /*00a0*/ IMAD.WIDE R2, R9, 0x4, R2 ; /* 0x0000000409027825 */ /* 0x000fc800078e0202 */ /*00b0*/ IMAD.WIDE R4, R9, 0x4, R4 ; /* 0x0000000409047825 */ /* 0x000fe400078e0204 */ /*00c0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea8000c1e1900 */ /*00d0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*00e0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fcc00078e0207 */ /*00f0*/ IMAD.WIDE R6, R9, 0x4, R6 ; /* 0x0000000409067825 */ /* 0x000fe200078e0206 */ /*0100*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */ /* 0x004fca0007ffe0ff */ /*0110*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*0120*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0130*/ BRA 0x130; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9matrixAddPA800_iS0_S0_ .globl _Z9matrixAddPA800_iS0_S0_ .p2align 8 .type _Z9matrixAddPA800_iS0_S0_,@function _Z9matrixAddPA800_iS0_S0_: v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_u32_e32 vcc_lo, 0x258, v1 v_cmp_gt_u32_e64 s2, 0x320, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_2 s_load_b128 s[4:7], s[0:1], 0x0 v_mul_u32_u24_e32 v4, 0xc80, v1 v_mul_hi_u32_u24_e32 v5, 0xc80, v1 v_lshlrev_b32_e32 v6, 2, v0 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s4, v4 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v5, vcc_lo v_add_co_u32 v2, vcc_lo, s6, v4 v_add_co_ci_u32_e32 v3, vcc_lo, s7, v5, vcc_lo s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v0, vcc_lo, v0, v6 v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v2, vcc_lo, v2, v6 v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo global_load_b32 v0, v[0:1], off global_load_b32 v1, v[2:3], off v_add_co_u32 v2, vcc_lo, s0, v4 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v5, vcc_lo s_waitcnt vmcnt(0) v_add_nc_u32_e32 v4, v1, v0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v0, vcc_lo, v2, v6 v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo global_store_b32 v[0:1], v4, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9matrixAddPA800_iS0_S0_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 8 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9matrixAddPA800_iS0_S0_, .Lfunc_end0-_Z9matrixAddPA800_iS0_S0_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9matrixAddPA800_iS0_S0_ .private_segment_fixed_size: 0 .sgpr_count: 10 .sgpr_spill_count: 0 .symbol: _Z9matrixAddPA800_iS0_S0_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00010e9b_00000000-6_matrix_sum.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_ .type _Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_, @function _Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_: .LFB2082: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9matrixAddPA800_iS0_S0_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_, .-_Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_ .globl _Z9matrixAddPA800_iS0_S0_ .type _Z9matrixAddPA800_iS0_S0_, @function _Z9matrixAddPA800_iS0_S0_: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z9matrixAddPA800_iS0_S0_, .-_Z9matrixAddPA800_iS0_S0_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "Time to calculate results(CPU Time): %f ms.\n" .align 8 .LC1: .string "/home/ubuntu/Datasets/stackv2/train-structured/blgnksy/CudaLabExercises/master/LabAssign1/matrix_sum.cu" .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "CUDA error at %s:%d\n" .LC3: .string " %s\n" .section .rodata.str1.8 .align 8 .LC4: .string "Time to calculate results(GPU Time): %f ms.\n" .section .rodata.str1.1 .LC5: .string "Output of Summation\n" .LC6: .string "\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 leaq -5758976(%rsp), %r11 .cfi_def_cfa 11, 5759016 .LPSRL0: subq $4096, %rsp orq $0, (%rsp) cmpq %r11, %rsp jne .LPSRL0 .cfi_def_cfa_register 7 subq $1144, %rsp .cfi_def_cfa_offset 5760160 movq %fs:40, %rax movq %rax, 5760104(%rsp) xorl %eax, %eax leaq 96(%rsp), %rbx leaq 1920096(%rsp), %rbp leaq 3840096(%rsp), %r12 movq %rbp, %r13 movq %r12, %rsi movq %rbp, %rcx movq %rbx, %rdx .L12: movl $0, %eax .L13: movl $600, (%rdx,%rax) movl $800, (%rcx,%rax) movl $0, (%rsi,%rax) addq $4, %rax cmpq $3200, %rax jne .L13 addq $3200, %rdx addq $3200, %rcx addq $3200, %rsi cmpq %r13, %rdx jne .L12 leaq 40(%rsp), %rdi call cudaEventCreate@PLT leaq 48(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 40(%rsp), %rdi call cudaEventRecord@PLT .L15: movl $0, %eax .L16: movl 0(%rbp,%rax), %edx addl (%rbx,%rax), %edx movl %edx, (%r12,%rax) addq $4, %rax cmpq $3200, %rax jne .L16 addq $3200, %rbx addq $3200, %rbp addq $3200, %r12 cmpq %r13, %rbx jne .L15 movl $0, %esi movq 48(%rsp), %rdi call cudaEventRecord@PLT movq 48(%rsp), %rdi call cudaEventSynchronize@PLT leaq 8(%rsp), %rdi movq 48(%rsp), %rdx movq 40(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 8(%rsp), %xmm0 leaq .LC0(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT leaq 16(%rsp), %rdi movl $1920000, %esi call cudaMalloc@PLT testl %eax, %eax jne .L29 leaq 96(%rsp), %rsi movl $1, %ecx movl $1920000, %edx movq 16(%rsp), %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L30 leaq 24(%rsp), %rdi movl $1920000, %esi call cudaMalloc@PLT testl %eax, %eax jne .L31 leaq 1920096(%rsp), %rsi movl $1, %ecx movl $1920000, %edx movq 24(%rsp), %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L32 leaq 32(%rsp), %rdi movl $1920000, %esi call cudaMalloc@PLT testl %eax, %eax jne .L33 movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $600, 84(%rsp) movl $800, 88(%rsp) movl $1, 92(%rsp) leaq 56(%rsp), %rdi call cudaEventCreate@PLT leaq 64(%rsp), %rdi call cudaEventCreate@PLT movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT movl 92(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 84(%rsp), %rdx movq 72(%rsp), %rdi movl 80(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L34 .L23: leaq 3840096(%rsp), %rdi movl $2, %ecx movl $1920000, %edx movq 32(%rsp), %rsi call cudaMemcpy@PLT testl %eax, %eax jne .L35 movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT movq 64(%rsp), %rdi call cudaEventSynchronize@PLT leaq 12(%rsp), %rdi movq 64(%rsp), %rdx movq 56(%rsp), %rsi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 12(%rsp), %xmm0 leaq .LC4(%rip), %rsi movl $2, %edi movl $1, %eax call __printf_chk@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 24(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 5760104(%rsp), %rax subq %fs:40, %rax jne .L36 movl $0, %eax addq $5760120, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L29: .cfi_restore_state movl $57, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L30: movl $58, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L31: movl $60, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L32: movl $61, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L33: movl $63, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L34: movq 32(%rsp), %rdx movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z39__device_stub__Z9matrixAddPA800_iS0_S0_PA800_iS0_S0_ jmp .L23 .L35: movl $76, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT call cudaGetLastError@PLT movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %rdx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L36: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC7: .string "_Z9matrixAddPA800_iS0_S0_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z9matrixAddPA800_iS0_S0_(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "matrix_sum.hip" .globl _Z24__device_stub__matrixAddPA800_iS0_S0_ # -- Begin function _Z24__device_stub__matrixAddPA800_iS0_S0_ .p2align 4, 0x90 .type _Z24__device_stub__matrixAddPA800_iS0_S0_,@function _Z24__device_stub__matrixAddPA800_iS0_S0_: # @_Z24__device_stub__matrixAddPA800_iS0_S0_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9matrixAddPA800_iS0_S0_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z24__device_stub__matrixAddPA800_iS0_S0_, .Lfunc_end0-_Z24__device_stub__matrixAddPA800_iS0_S0_ .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $5760168, %rsp # imm = 0x57E4A8 .cfi_def_cfa_offset 5760192 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 leaq 3840160(%rsp), %rbx xorl %r14d, %r14d movl $1920000, %edx # imm = 0x1D4C00 movq %rbx, %rdi xorl %esi, %esi callq memset@PLT leaq 1920160(%rsp), %rax leaq 160(%rsp), %rcx .p2align 4, 0x90 .LBB1_1: # %.preheader43 # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB1_2: # Parent Loop BB1_1 Depth=1 # => This Inner Loop Header: Depth=2 movl $600, (%rax,%rdx,4) # imm = 0x258 movl $800, (%rcx,%rdx,4) # imm = 0x320 incq %rdx cmpq $800, %rdx # imm = 0x320 jne .LBB1_2 # %bb.3: # in Loop: Header=BB1_1 Depth=1 incq %r14 addq $3200, %rax # imm = 0xC80 addq $3200, %rcx # imm = 0xC80 cmpq $600, %r14 # imm = 0x258 jne .LBB1_1 # %bb.4: leaq 56(%rsp), %rdi callq hipEventCreate leaq 8(%rsp), %rdi callq hipEventCreate movq 56(%rsp), %rdi xorl %r14d, %r14d xorl %esi, %esi callq hipEventRecord leaq 1920160(%rsp), %rax leaq 160(%rsp), %rcx .p2align 4, 0x90 .LBB1_5: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_6 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB1_6: # Parent Loop BB1_5 Depth=1 # => This Inner Loop Header: Depth=2 movl (%rcx,%rdx,4), %esi addl (%rax,%rdx,4), %esi movl %esi, (%rbx,%rdx,4) incq %rdx cmpq $800, %rdx # imm = 0x320 jne .LBB1_6 # %bb.7: # in Loop: Header=BB1_5 Depth=1 incq %r14 addq $3200, %rax # imm = 0xC80 addq $3200, %rcx # imm = 0xC80 addq $3200, %rbx # imm = 0xC80 cmpq $600, %r14 # imm = 0x258 jne .LBB1_5 # %bb.8: movq 8(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 8(%rsp), %rdi callq hipEventSynchronize movq 56(%rsp), %rsi movq 8(%rsp), %rdx leaq 44(%rsp), %rdi callq hipEventElapsedTime movss 44(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str, %edi movb $1, %al callq printf leaq 32(%rsp), %rdi movl $1920000, %esi # imm = 0x1D4C00 callq hipMalloc testl %eax, %eax jne .LBB1_9 # %bb.11: movq 32(%rsp), %rdi leaq 1920160(%rsp), %rsi movl $1920000, %edx # imm = 0x1D4C00 movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB1_12 # %bb.13: leaq 24(%rsp), %rdi movl $1920000, %esi # imm = 0x1D4C00 callq hipMalloc testl %eax, %eax jne .LBB1_14 # %bb.15: movq 24(%rsp), %rdi leaq 160(%rsp), %rsi movl $1920000, %edx # imm = 0x1D4C00 movl $1, %ecx callq hipMemcpy testl %eax, %eax jne .LBB1_16 # %bb.17: leaq 16(%rsp), %rdi movl $1920000, %esi # imm = 0x1D4C00 callq hipMalloc testl %eax, %eax jne .LBB1_18 # %bb.19: leaq 48(%rsp), %rdi callq hipEventCreate movq %rsp, %rdi callq hipEventCreate movq 48(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movabsq $4294967297, %rdi # imm = 0x100000001 movabsq $3435973837400, %rdx # imm = 0x32000000258 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_21 # %bb.20: movq 32(%rsp), %rax movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 152(%rsp) movq %rcx, 144(%rsp) movq %rdx, 136(%rsp) leaq 152(%rsp), %rax movq %rax, 64(%rsp) leaq 144(%rsp), %rax movq %rax, 72(%rsp) leaq 136(%rsp), %rax movq %rax, 80(%rsp) leaq 120(%rsp), %rdi leaq 104(%rsp), %rsi leaq 96(%rsp), %rdx leaq 88(%rsp), %rcx callq __hipPopCallConfiguration movq 120(%rsp), %rsi movl 128(%rsp), %edx movq 104(%rsp), %rcx movl 112(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z9matrixAddPA800_iS0_S0_, %edi pushq 88(%rsp) .cfi_adjust_cfa_offset 8 pushq 104(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_21: movq 16(%rsp), %rsi leaq 3840160(%rsp), %rdi movl $1920000, %edx # imm = 0x1D4C00 movl $2, %ecx callq hipMemcpy testl %eax, %eax jne .LBB1_22 # %bb.23: movq (%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq (%rsp), %rdi callq hipEventSynchronize movq 48(%rsp), %rsi movq (%rsp), %rdx leaq 64(%rsp), %rdi callq hipEventElapsedTime movss 64(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movl $.L.str.4, %edi movb $1, %al callq printf movq 32(%rsp), %rdi callq hipFree movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movl $.Lstr, %edi callq puts@PLT movl $10, %edi callq putchar@PLT xorl %eax, %eax addq $5760168, %rsp # imm = 0x57E4A8 .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .LBB1_9: .cfi_def_cfa_offset 5760192 movl $.L.str.1, %edi movl $.L.str.2, %esi movl $57, %edx jmp .LBB1_10 .LBB1_12: movl $.L.str.1, %edi movl $.L.str.2, %esi movl $58, %edx jmp .LBB1_10 .LBB1_14: movl $.L.str.1, %edi movl $.L.str.2, %esi movl $60, %edx jmp .LBB1_10 .LBB1_16: movl $.L.str.1, %edi movl $.L.str.2, %esi movl $61, %edx jmp .LBB1_10 .LBB1_18: movl $.L.str.1, %edi movl $.L.str.2, %esi movl $63, %edx jmp .LBB1_10 .LBB1_22: movl $.L.str.1, %edi movl $.L.str.2, %esi movl $76, %edx .LBB1_10: xorl %eax, %eax callq printf callq hipGetLastError movl %eax, %edi callq hipGetErrorString movl $.L.str.3, %edi movq %rax, %rsi xorl %eax, %eax callq printf movl $1, %edi callq exit .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9matrixAddPA800_iS0_S0_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z9matrixAddPA800_iS0_S0_,@object # @_Z9matrixAddPA800_iS0_S0_ .section .rodata,"a",@progbits .globl _Z9matrixAddPA800_iS0_S0_ .p2align 3, 0x0 _Z9matrixAddPA800_iS0_S0_: .quad _Z24__device_stub__matrixAddPA800_iS0_S0_ .size _Z9matrixAddPA800_iS0_S0_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Time to calculate results(CPU Time): %f ms.\n" .size .L.str, 45 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "CUDA error at %s:%d\n" .size .L.str.1, 21 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/blgnksy/CudaLabExercises/master/LabAssign1/matrix_sum.hip" .size .L.str.2, 115 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz " %s\n" .size .L.str.3, 6 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "Time to calculate results(GPU Time): %f ms.\n" .size .L.str.4, 45 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9matrixAddPA800_iS0_S0_" .size .L__unnamed_1, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Output of Summation" .size .Lstr, 20 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__matrixAddPA800_iS0_S0_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9matrixAddPA800_iS0_S0_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" #ifndef _KERNEL_H #define _KERNEL_H typedef struct Node { int starting; int no_of_edges; }Node; #endif __global__ void bfs_kernel(Node* d_graph_nodes, int* d_edge_list, bool* d_graph_level, bool* d_graph_visited, int* d_cost, bool* loop, int no_of_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; //d_graph_level[tid] is true means the vertex in the current level //is being visited if (tid < no_of_nodes && d_graph_level[tid]) { d_graph_level[tid] = false; d_graph_visited[tid] = true; for (int i = d_graph_nodes[tid].starting; i < (d_graph_nodes[tid].no_of_edges + d_graph_nodes[tid].starting); i++) { int id = d_edge_list[i]; if (!d_graph_visited[id]) { //calculate in which level the vertex is visited d_cost[id] = d_cost[tid] + 1; d_graph_level[id] = true; //to make the loop continues *loop = true; } } } }
code for sm_80 Function : _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R8, R8, c[0x0][0x0], R3 ; /* 0x0000000008087a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R8, c[0x0][0x190], PT ; /* 0x0000640008007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ SHF.R.S32.HI R13, RZ, 0x1f, R8 ; /* 0x0000001fff0d7819 */ /* 0x000fe20000011408 */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0080*/ IADD3 R4, P0, R8, c[0x0][0x170], RZ ; /* 0x00005c0008047a10 */ /* 0x000fc80007f1e0ff */ /*0090*/ IADD3.X R5, R13, c[0x0][0x174], RZ, P0, !PT ; /* 0x00005d000d057a10 */ /* 0x000fca00007fe4ff */ /*00a0*/ LDG.E.U8 R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea4000c1e1100 */ /*00b0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x004fda0003f05270 */ /*00c0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*00d0*/ IADD3 R6, P0, R8.reuse, c[0x0][0x178], RZ ; /* 0x00005e0008067a10 */ /* 0x040fe20007f1e0ff */ /*00e0*/ IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff007424 */ /* 0x000fe200078e00ff */ /*00f0*/ LEA R2, P1, R8.reuse, c[0x0][0x160], 0x3 ; /* 0x0000580008027a11 */ /* 0x040fe200078218ff */ /*0100*/ STG.E.U8 [R4.64], RZ ; /* 0x000000ff04007986 */ /* 0x0001e2000c101104 */ /*0110*/ IADD3.X R7, R13, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f000d077a10 */ /* 0x000fe400007fe4ff */ /*0120*/ LEA.HI.X R3, R8, c[0x0][0x164], R13, 0x3, P1 ; /* 0x0000590008037a11 */ /* 0x000fc600008f1c0d */ /*0130*/ STG.E.U8 [R6.64], R0 ; /* 0x0000000006007986 */ /* 0x0001e8000c101104 */ /*0140*/ LDG.E R9, [R2.64+0x4] ; /* 0x0000040402097981 */ /* 0x000ea4000c1e1900 */ /*0150*/ ISETP.GE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */ /* 0x004fda0003f06270 */ /*0160*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0170*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */ /* 0x001ea2000c1e1900 */ /*0180*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */ /* 0x000fe200078e00ff */ /*0190*/ LEA R4, P0, R8, c[0x0][0x180], 0x2 ; /* 0x0000600008047a11 */ /* 0x000fe200078010ff */ /*01a0*/ IMAD.MOV.U32 R16, RZ, RZ, R9 ; /* 0x000000ffff107224 */ /* 0x000fc600078e0009 */ /*01b0*/ LEA.HI.X R5, R8, c[0x0][0x184], R13, 0x2, P0 ; /* 0x0000610008057a11 */ /* 0x000fe200000f140d */ /*01c0*/ IMAD.WIDE R10, R0, R11, c[0x0][0x168] ; /* 0x00005a00000a7625 */ /* 0x004fc800078e020b */ /*01d0*/ IMAD.MOV.U32 R19, RZ, RZ, R11 ; /* 0x000000ffff137224 */ /* 0x000fe400078e000b */ /*01e0*/ IMAD.MOV.U32 R17, RZ, RZ, R0 ; /* 0x000000ffff117224 */ /* 0x000fe400078e0000 */ /*01f0*/ IMAD.MOV.U32 R11, RZ, RZ, R19 ; /* 0x000000ffff0b7224 */ /* 0x000fca00078e0013 */ /*0200*/ LDG.E R7, [R10.64] ; /* 0x000000040a077981 */ /* 0x000ea4000c1e1900 */ /*0210*/ SHF.R.S32.HI R12, RZ, 0x1f, R7 ; /* 0x0000001fff0c7819 */ /* 0x004fe40000011407 */ /*0220*/ IADD3 R14, P0, R7, c[0x0][0x178], RZ ; /* 0x00005e00070e7a10 */ /* 0x000fc80007f1e0ff */ /*0230*/ IADD3.X R15, R12, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f000c0f7a10 */ /* 0x000fca00007fe4ff */ /*0240*/ LDG.E.U8 R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x000ea4000c1e1100 */ /*0250*/ ISETP.NE.AND P0, PT, R14, RZ, PT ; /* 0x000000ff0e00720c */ /* 0x004fda0003f05270 */ /*0260*/ @!P0 LDG.E R9, [R4.64] ; /* 0x0000000404098981 */ /* 0x000ea2000c1e1900 */ /*0270*/ @!P0 LEA R6, P1, R7.reuse, c[0x0][0x180], 0x2 ; /* 0x0000600007068a11 */ /* 0x040fe200078210ff */ /*0280*/ IMAD.MOV.U32 R18, RZ, RZ, 0x1 ; /* 0x00000001ff127424 */ /* 0x000fe200078e00ff */ /*0290*/ @!P0 IADD3 R8, P2, R7.reuse, c[0x0][0x170], RZ ; /* 0x00005c0007088a10 */ /* 0x040fe20007f5e0ff */ /*02a0*/ @!P0 IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff0d8624 */ /* 0x000fe200078e00ff */ /*02b0*/ @!P0 LEA.HI.X R7, R7, c[0x0][0x184], R12, 0x2, P1 ; /* 0x0000610007078a11 */ /* 0x000fe400008f140c */ /*02c0*/ @!P0 IADD3 R11, R9, 0x1, RZ ; /* 0x00000001090b8810 */ /* 0x004fe40007ffe0ff */ /*02d0*/ @!P0 IADD3.X R9, R12, c[0x0][0x174], RZ, P2, !PT ; /* 0x00005d000c098a10 */ /* 0x000fe200017fe4ff */ /*02e0*/ @!P0 IMAD.MOV.U32 R12, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff0c8624 */ /* 0x000fc400078e00ff */ /*02f0*/ @!P0 STG.E [R6.64], R11 ; /* 0x0000000b06008986 */ /* 0x0001e8000c101904 */ /*0300*/ @!P0 STG.E.U8 [R8.64], R18 ; /* 0x0000001208008986 */ /* 0x0001e8000c101104 */ /*0310*/ @!P0 STG.E.U8 [R12.64], R18 ; /* 0x000000120c008986 */ /* 0x0001e8000c101104 */ /*0320*/ @!P0 LDG.E R16, [R2.64+0x4] ; /* 0x0000040402108981 */ /* 0x000ea8000c1e1900 */ /*0330*/ @!P0 LDG.E R17, [R2.64] ; /* 0x0000000402118981 */ /* 0x000ea2000c1e1900 */ /*0340*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */ /* 0x000fc40007ffe0ff */ /*0350*/ IADD3 R10, P1, R10, 0x4, RZ ; /* 0x000000040a0a7810 */ /* 0x000fca0007f3e0ff */ /*0360*/ IMAD.X R19, RZ, RZ, R19, P1 ; /* 0x000000ffff137224 */ /* 0x000fe400008e0613 */ /*0370*/ IMAD.IADD R15, R16, 0x1, R17 ; /* 0x00000001100f7824 */ /* 0x004fca00078e0211 */ /*0380*/ ISETP.GE.AND P0, PT, R0, R15, PT ; /* 0x0000000f0000720c */ /* 0x000fda0003f06270 */ /*0390*/ @!P0 BRA 0x1f0 ; /* 0xfffffe5000008947 */ /* 0x001fea000383ffff */ /*03a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*03b0*/ BRA 0x3b0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*03c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0400*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0410*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0420*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0430*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0440*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0450*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0460*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0470*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" #ifndef _KERNEL_H #define _KERNEL_H typedef struct Node { int starting; int no_of_edges; }Node; #endif __global__ void bfs_kernel(Node* d_graph_nodes, int* d_edge_list, bool* d_graph_level, bool* d_graph_visited, int* d_cost, bool* loop, int no_of_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; //d_graph_level[tid] is true means the vertex in the current level //is being visited if (tid < no_of_nodes && d_graph_level[tid]) { d_graph_level[tid] = false; d_graph_visited[tid] = true; for (int i = d_graph_nodes[tid].starting; i < (d_graph_nodes[tid].no_of_edges + d_graph_nodes[tid].starting); i++) { int id = d_edge_list[i]; if (!d_graph_visited[id]) { //calculate in which level the vertex is visited d_cost[id] = d_cost[tid] + 1; d_graph_level[id] = true; //to make the loop continues *loop = true; } } } }
.file "tmpxft_00112db2_00000000-6_bfs_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z48__device_stub__Z10bfs_kernelP4NodePiPbS2_S1_S2_iP4NodePiPbS2_S1_S2_i .type _Z48__device_stub__Z10bfs_kernelP4NodePiPbS2_S1_S2_iP4NodePiPbS2_S1_S2_i, @function _Z48__device_stub__Z10bfs_kernelP4NodePiPbS2_S1_S2_iP4NodePiPbS2_S1_S2_i: .LFB2051: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movq %r8, 8(%rsp) movq %r9, (%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) movq %rsp, %rax movq %rax, 152(%rsp) leaq 192(%rsp), %rax movq %rax, 160(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z10bfs_kernelP4NodePiPbS2_S1_S2_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z48__device_stub__Z10bfs_kernelP4NodePiPbS2_S1_S2_iP4NodePiPbS2_S1_S2_i, .-_Z48__device_stub__Z10bfs_kernelP4NodePiPbS2_S1_S2_iP4NodePiPbS2_S1_S2_i .globl _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .type _Z10bfs_kernelP4NodePiPbS2_S1_S2_i, @function _Z10bfs_kernelP4NodePiPbS2_S1_S2_i: .LFB2052: .cfi_startproc endbr64 subq $16, %rsp .cfi_def_cfa_offset 24 movl 24(%rsp), %eax pushq %rax .cfi_def_cfa_offset 32 call _Z48__device_stub__Z10bfs_kernelP4NodePiPbS2_S1_S2_iP4NodePiPbS2_S1_S2_i addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z10bfs_kernelP4NodePiPbS2_S1_S2_i, .-_Z10bfs_kernelP4NodePiPbS2_S1_S2_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z10bfs_kernelP4NodePiPbS2_S1_S2_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z10bfs_kernelP4NodePiPbS2_S1_S2_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" #ifndef _KERNEL_H #define _KERNEL_H typedef struct Node { int starting; int no_of_edges; }Node; #endif __global__ void bfs_kernel(Node* d_graph_nodes, int* d_edge_list, bool* d_graph_level, bool* d_graph_visited, int* d_cost, bool* loop, int no_of_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; //d_graph_level[tid] is true means the vertex in the current level //is being visited if (tid < no_of_nodes && d_graph_level[tid]) { d_graph_level[tid] = false; d_graph_visited[tid] = true; for (int i = d_graph_nodes[tid].starting; i < (d_graph_nodes[tid].no_of_edges + d_graph_nodes[tid].starting); i++) { int id = d_edge_list[i]; if (!d_graph_visited[id]) { //calculate in which level the vertex is visited d_cost[id] = d_cost[tid] + 1; d_graph_level[id] = true; //to make the loop continues *loop = true; } } } }
#include <hip/hip_runtime.h> #include "includes.h" #ifndef _KERNEL_H #define _KERNEL_H typedef struct Node { int starting; int no_of_edges; }Node; #endif __global__ void bfs_kernel(Node* d_graph_nodes, int* d_edge_list, bool* d_graph_level, bool* d_graph_visited, int* d_cost, bool* loop, int no_of_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; //d_graph_level[tid] is true means the vertex in the current level //is being visited if (tid < no_of_nodes && d_graph_level[tid]) { d_graph_level[tid] = false; d_graph_visited[tid] = true; for (int i = d_graph_nodes[tid].starting; i < (d_graph_nodes[tid].no_of_edges + d_graph_nodes[tid].starting); i++) { int id = d_edge_list[i]; if (!d_graph_visited[id]) { //calculate in which level the vertex is visited d_cost[id] = d_cost[tid] + 1; d_graph_level[id] = true; //to make the loop continues *loop = true; } } } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" #ifndef _KERNEL_H #define _KERNEL_H typedef struct Node { int starting; int no_of_edges; }Node; #endif __global__ void bfs_kernel(Node* d_graph_nodes, int* d_edge_list, bool* d_graph_level, bool* d_graph_visited, int* d_cost, bool* loop, int no_of_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; //d_graph_level[tid] is true means the vertex in the current level //is being visited if (tid < no_of_nodes && d_graph_level[tid]) { d_graph_level[tid] = false; d_graph_visited[tid] = true; for (int i = d_graph_nodes[tid].starting; i < (d_graph_nodes[tid].no_of_edges + d_graph_nodes[tid].starting); i++) { int id = d_edge_list[i]; if (!d_graph_visited[id]) { //calculate in which level the vertex is visited d_cost[id] = d_cost[tid] + 1; d_graph_level[id] = true; //to make the loop continues *loop = true; } } } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .globl _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .p2align 8 .type _Z10bfs_kernelP4NodePiPbS2_S1_S2_i,@function _Z10bfs_kernelP4NodePiPbS2_S1_S2_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x44 s_load_b32 s3, s[0:1], 0x30 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[3:4], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v3 s_cbranch_execz .LBB0_7 s_load_b64 s[2:3], s[0:1], 0x10 v_ashrrev_i32_e32 v4, 31, v3 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s2, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s3, v4, vcc_lo global_load_u8 v2, v[0:1], off s_waitcnt vmcnt(0) v_cmp_ne_u16_e32 vcc_lo, 0, v2 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_7 s_clause 0x1 s_load_b64 s[4:5], s[0:1], 0x0 s_load_b64 s[8:9], s[0:1], 0x18 v_lshlrev_b64 v[7:8], 3, v[3:4] v_dual_mov_b32 v11, 0 :: v_dual_mov_b32 v12, 1 s_mov_b32 s10, 0 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v5, vcc_lo, s4, v7 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v8, vcc_lo v_add_co_u32 v9, vcc_lo, s8, v3 v_add_co_ci_u32_e32 v10, vcc_lo, s9, v4, vcc_lo global_load_b32 v2, v[5:6], off offset:4 global_store_b8 v[0:1], v11, off global_store_b8 v[9:10], v12, off s_waitcnt vmcnt(0) v_cmp_lt_i32_e32 vcc_lo, 0, v2 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_7 v_add_co_u32 v0, vcc_lo, s4, v7 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v8, vcc_lo v_lshlrev_b64 v[7:8], 2, v[3:4] v_add_co_u32 v4, vcc_lo, v5, 4 global_load_b32 v2, v[0:1], off s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x20 s_load_b64 s[0:1], s[0:1], 0x8 v_add_co_ci_u32_e32 v5, vcc_lo, 0, v6, vcc_lo s_waitcnt lgkmcnt(0) v_add_co_u32 v6, vcc_lo, s4, v7 v_add_co_ci_u32_e32 v7, vcc_lo, s5, v8, vcc_lo s_waitcnt vmcnt(0) v_ashrrev_i32_e32 v3, 31, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[9:10], 2, v[2:3] v_mov_b32_e32 v3, 0 v_add_co_u32 v8, vcc_lo, s0, v9 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v9, vcc_lo, s1, v10, vcc_lo s_set_inst_prefetch_distance 0x1 s_branch .LBB0_5 .p2align 6 .LBB0_4: s_or_b32 exec_lo, exec_lo, s0 s_clause 0x1 global_load_b32 v10, v[4:5], off global_load_b32 v11, v[0:1], off v_add_nc_u32_e32 v2, 1, v2 v_add_co_u32 v8, s0, v8, 4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_add_co_ci_u32_e64 v9, s0, 0, v9, s0 s_waitcnt vmcnt(0) v_add_nc_u32_e32 v10, v11, v10 v_cmp_ge_i32_e32 vcc_lo, v2, v10 s_or_b32 s10, vcc_lo, s10 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s10 s_cbranch_execz .LBB0_7 .LBB0_5: global_load_b32 v10, v[8:9], off s_mov_b32 s0, exec_lo s_waitcnt vmcnt(0) v_ashrrev_i32_e32 v11, 31, v10 v_add_co_u32 v12, vcc_lo, s8, v10 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v13, vcc_lo, s9, v11, vcc_lo global_load_u8 v12, v[12:13], off s_waitcnt vmcnt(0) v_cmpx_eq_u16_e32 0, v12 s_cbranch_execz .LBB0_4 global_load_b32 v14, v[6:7], off v_lshlrev_b64 v[12:13], 2, v[10:11] v_add_co_u32 v10, vcc_lo, s2, v10 v_add_co_ci_u32_e32 v11, vcc_lo, s3, v11, vcc_lo v_mov_b32_e32 v15, 1 s_delay_alu instid0(VALU_DEP_4) v_add_co_u32 v12, vcc_lo, s4, v12 v_add_co_ci_u32_e32 v13, vcc_lo, s5, v13, vcc_lo s_waitcnt vmcnt(0) v_add_nc_u32_e32 v14, 1, v14 global_store_b8 v[10:11], v15, off global_store_b32 v[12:13], v14, off global_store_b8 v3, v15, s[6:7] s_branch .LBB0_4 .LBB0_7: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 312 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 16 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10bfs_kernelP4NodePiPbS2_S1_S2_i, .Lfunc_end0-_Z10bfs_kernelP4NodePiPbS2_S1_S2_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 32 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 40 .size: 8 .value_kind: global_buffer - .offset: 48 .size: 4 .value_kind: by_value - .offset: 56 .size: 4 .value_kind: hidden_block_count_x - .offset: 60 .size: 4 .value_kind: hidden_block_count_y - .offset: 64 .size: 4 .value_kind: hidden_block_count_z - .offset: 68 .size: 2 .value_kind: hidden_group_size_x - .offset: 70 .size: 2 .value_kind: hidden_group_size_y - .offset: 72 .size: 2 .value_kind: hidden_group_size_z - .offset: 74 .size: 2 .value_kind: hidden_remainder_x - .offset: 76 .size: 2 .value_kind: hidden_remainder_y - .offset: 78 .size: 2 .value_kind: hidden_remainder_z - .offset: 96 .size: 8 .value_kind: hidden_global_offset_x - .offset: 104 .size: 8 .value_kind: hidden_global_offset_y - .offset: 112 .size: 8 .value_kind: hidden_global_offset_z - .offset: 120 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 312 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10bfs_kernelP4NodePiPbS2_S1_S2_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 16 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" #ifndef _KERNEL_H #define _KERNEL_H typedef struct Node { int starting; int no_of_edges; }Node; #endif __global__ void bfs_kernel(Node* d_graph_nodes, int* d_edge_list, bool* d_graph_level, bool* d_graph_visited, int* d_cost, bool* loop, int no_of_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; //d_graph_level[tid] is true means the vertex in the current level //is being visited if (tid < no_of_nodes && d_graph_level[tid]) { d_graph_level[tid] = false; d_graph_visited[tid] = true; for (int i = d_graph_nodes[tid].starting; i < (d_graph_nodes[tid].no_of_edges + d_graph_nodes[tid].starting); i++) { int id = d_edge_list[i]; if (!d_graph_visited[id]) { //calculate in which level the vertex is visited d_cost[id] = d_cost[tid] + 1; d_graph_level[id] = true; //to make the loop continues *loop = true; } } } }
.text .file "bfs_kernel.hip" .globl _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i # -- Begin function _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i .p2align 4, 0x90 .type _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i,@function _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i: # @_Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) movq %r9, 48(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rax movq %rax, 136(%rsp) leaq 160(%rsp), %rax movq %rax, 144(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z10bfs_kernelP4NodePiPbS2_S1_S2_i, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i, .Lfunc_end0-_Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10bfs_kernelP4NodePiPbS2_S1_S2_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z10bfs_kernelP4NodePiPbS2_S1_S2_i,@object # @_Z10bfs_kernelP4NodePiPbS2_S1_S2_i .section .rodata,"a",@progbits .globl _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .p2align 3, 0x0 _Z10bfs_kernelP4NodePiPbS2_S1_S2_i: .quad _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i .size _Z10bfs_kernelP4NodePiPbS2_S1_S2_i, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z10bfs_kernelP4NodePiPbS2_S1_S2_i" .size .L__unnamed_1, 35 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R8, R8, c[0x0][0x0], R3 ; /* 0x0000000008087a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.AND P0, PT, R8, c[0x0][0x190], PT ; /* 0x0000640008007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ SHF.R.S32.HI R13, RZ, 0x1f, R8 ; /* 0x0000001fff0d7819 */ /* 0x000fe20000011408 */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0080*/ IADD3 R4, P0, R8, c[0x0][0x170], RZ ; /* 0x00005c0008047a10 */ /* 0x000fc80007f1e0ff */ /*0090*/ IADD3.X R5, R13, c[0x0][0x174], RZ, P0, !PT ; /* 0x00005d000d057a10 */ /* 0x000fca00007fe4ff */ /*00a0*/ LDG.E.U8 R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea4000c1e1100 */ /*00b0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */ /* 0x004fda0003f05270 */ /*00c0*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*00d0*/ IADD3 R6, P0, R8.reuse, c[0x0][0x178], RZ ; /* 0x00005e0008067a10 */ /* 0x040fe20007f1e0ff */ /*00e0*/ IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff007424 */ /* 0x000fe200078e00ff */ /*00f0*/ LEA R2, P1, R8.reuse, c[0x0][0x160], 0x3 ; /* 0x0000580008027a11 */ /* 0x040fe200078218ff */ /*0100*/ STG.E.U8 [R4.64], RZ ; /* 0x000000ff04007986 */ /* 0x0001e2000c101104 */ /*0110*/ IADD3.X R7, R13, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f000d077a10 */ /* 0x000fe400007fe4ff */ /*0120*/ LEA.HI.X R3, R8, c[0x0][0x164], R13, 0x3, P1 ; /* 0x0000590008037a11 */ /* 0x000fc600008f1c0d */ /*0130*/ STG.E.U8 [R6.64], R0 ; /* 0x0000000006007986 */ /* 0x0001e8000c101104 */ /*0140*/ LDG.E R9, [R2.64+0x4] ; /* 0x0000040402097981 */ /* 0x000ea4000c1e1900 */ /*0150*/ ISETP.GE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */ /* 0x004fda0003f06270 */ /*0160*/ @!P0 EXIT ; /* 0x000000000000894d */ /* 0x000fea0003800000 */ /*0170*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */ /* 0x001ea2000c1e1900 */ /*0180*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */ /* 0x000fe200078e00ff */ /*0190*/ LEA R4, P0, R8, c[0x0][0x180], 0x2 ; /* 0x0000600008047a11 */ /* 0x000fe200078010ff */ /*01a0*/ IMAD.MOV.U32 R16, RZ, RZ, R9 ; /* 0x000000ffff107224 */ /* 0x000fc600078e0009 */ /*01b0*/ LEA.HI.X R5, R8, c[0x0][0x184], R13, 0x2, P0 ; /* 0x0000610008057a11 */ /* 0x000fe200000f140d */ /*01c0*/ IMAD.WIDE R10, R0, R11, c[0x0][0x168] ; /* 0x00005a00000a7625 */ /* 0x004fc800078e020b */ /*01d0*/ IMAD.MOV.U32 R19, RZ, RZ, R11 ; /* 0x000000ffff137224 */ /* 0x000fe400078e000b */ /*01e0*/ IMAD.MOV.U32 R17, RZ, RZ, R0 ; /* 0x000000ffff117224 */ /* 0x000fe400078e0000 */ /*01f0*/ IMAD.MOV.U32 R11, RZ, RZ, R19 ; /* 0x000000ffff0b7224 */ /* 0x000fca00078e0013 */ /*0200*/ LDG.E R7, [R10.64] ; /* 0x000000040a077981 */ /* 0x000ea4000c1e1900 */ /*0210*/ SHF.R.S32.HI R12, RZ, 0x1f, R7 ; /* 0x0000001fff0c7819 */ /* 0x004fe40000011407 */ /*0220*/ IADD3 R14, P0, R7, c[0x0][0x178], RZ ; /* 0x00005e00070e7a10 */ /* 0x000fc80007f1e0ff */ /*0230*/ IADD3.X R15, R12, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f000c0f7a10 */ /* 0x000fca00007fe4ff */ /*0240*/ LDG.E.U8 R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x000ea4000c1e1100 */ /*0250*/ ISETP.NE.AND P0, PT, R14, RZ, PT ; /* 0x000000ff0e00720c */ /* 0x004fda0003f05270 */ /*0260*/ @!P0 LDG.E R9, [R4.64] ; /* 0x0000000404098981 */ /* 0x000ea2000c1e1900 */ /*0270*/ @!P0 LEA R6, P1, R7.reuse, c[0x0][0x180], 0x2 ; /* 0x0000600007068a11 */ /* 0x040fe200078210ff */ /*0280*/ IMAD.MOV.U32 R18, RZ, RZ, 0x1 ; /* 0x00000001ff127424 */ /* 0x000fe200078e00ff */ /*0290*/ @!P0 IADD3 R8, P2, R7.reuse, c[0x0][0x170], RZ ; /* 0x00005c0007088a10 */ /* 0x040fe20007f5e0ff */ /*02a0*/ @!P0 IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff0d8624 */ /* 0x000fe200078e00ff */ /*02b0*/ @!P0 LEA.HI.X R7, R7, c[0x0][0x184], R12, 0x2, P1 ; /* 0x0000610007078a11 */ /* 0x000fe400008f140c */ /*02c0*/ @!P0 IADD3 R11, R9, 0x1, RZ ; /* 0x00000001090b8810 */ /* 0x004fe40007ffe0ff */ /*02d0*/ @!P0 IADD3.X R9, R12, c[0x0][0x174], RZ, P2, !PT ; /* 0x00005d000c098a10 */ /* 0x000fe200017fe4ff */ /*02e0*/ @!P0 IMAD.MOV.U32 R12, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff0c8624 */ /* 0x000fc400078e00ff */ /*02f0*/ @!P0 STG.E [R6.64], R11 ; /* 0x0000000b06008986 */ /* 0x0001e8000c101904 */ /*0300*/ @!P0 STG.E.U8 [R8.64], R18 ; /* 0x0000001208008986 */ /* 0x0001e8000c101104 */ /*0310*/ @!P0 STG.E.U8 [R12.64], R18 ; /* 0x000000120c008986 */ /* 0x0001e8000c101104 */ /*0320*/ @!P0 LDG.E R16, [R2.64+0x4] ; /* 0x0000040402108981 */ /* 0x000ea8000c1e1900 */ /*0330*/ @!P0 LDG.E R17, [R2.64] ; /* 0x0000000402118981 */ /* 0x000ea2000c1e1900 */ /*0340*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */ /* 0x000fc40007ffe0ff */ /*0350*/ IADD3 R10, P1, R10, 0x4, RZ ; /* 0x000000040a0a7810 */ /* 0x000fca0007f3e0ff */ /*0360*/ IMAD.X R19, RZ, RZ, R19, P1 ; /* 0x000000ffff137224 */ /* 0x000fe400008e0613 */ /*0370*/ IMAD.IADD R15, R16, 0x1, R17 ; /* 0x00000001100f7824 */ /* 0x004fca00078e0211 */ /*0380*/ ISETP.GE.AND P0, PT, R0, R15, PT ; /* 0x0000000f0000720c */ /* 0x000fda0003f06270 */ /*0390*/ @!P0 BRA 0x1f0 ; /* 0xfffffe5000008947 */ /* 0x001fea000383ffff */ /*03a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*03b0*/ BRA 0x3b0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*03c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0400*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0410*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0420*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0430*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0440*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0450*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0460*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0470*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .globl _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .p2align 8 .type _Z10bfs_kernelP4NodePiPbS2_S1_S2_i,@function _Z10bfs_kernelP4NodePiPbS2_S1_S2_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x44 s_load_b32 s3, s[0:1], 0x30 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[3:4], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v3 s_cbranch_execz .LBB0_7 s_load_b64 s[2:3], s[0:1], 0x10 v_ashrrev_i32_e32 v4, 31, v3 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s2, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s3, v4, vcc_lo global_load_u8 v2, v[0:1], off s_waitcnt vmcnt(0) v_cmp_ne_u16_e32 vcc_lo, 0, v2 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_7 s_clause 0x1 s_load_b64 s[4:5], s[0:1], 0x0 s_load_b64 s[8:9], s[0:1], 0x18 v_lshlrev_b64 v[7:8], 3, v[3:4] v_dual_mov_b32 v11, 0 :: v_dual_mov_b32 v12, 1 s_mov_b32 s10, 0 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v5, vcc_lo, s4, v7 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v8, vcc_lo v_add_co_u32 v9, vcc_lo, s8, v3 v_add_co_ci_u32_e32 v10, vcc_lo, s9, v4, vcc_lo global_load_b32 v2, v[5:6], off offset:4 global_store_b8 v[0:1], v11, off global_store_b8 v[9:10], v12, off s_waitcnt vmcnt(0) v_cmp_lt_i32_e32 vcc_lo, 0, v2 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_7 v_add_co_u32 v0, vcc_lo, s4, v7 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v8, vcc_lo v_lshlrev_b64 v[7:8], 2, v[3:4] v_add_co_u32 v4, vcc_lo, v5, 4 global_load_b32 v2, v[0:1], off s_clause 0x1 s_load_b128 s[4:7], s[0:1], 0x20 s_load_b64 s[0:1], s[0:1], 0x8 v_add_co_ci_u32_e32 v5, vcc_lo, 0, v6, vcc_lo s_waitcnt lgkmcnt(0) v_add_co_u32 v6, vcc_lo, s4, v7 v_add_co_ci_u32_e32 v7, vcc_lo, s5, v8, vcc_lo s_waitcnt vmcnt(0) v_ashrrev_i32_e32 v3, 31, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[9:10], 2, v[2:3] v_mov_b32_e32 v3, 0 v_add_co_u32 v8, vcc_lo, s0, v9 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v9, vcc_lo, s1, v10, vcc_lo s_set_inst_prefetch_distance 0x1 s_branch .LBB0_5 .p2align 6 .LBB0_4: s_or_b32 exec_lo, exec_lo, s0 s_clause 0x1 global_load_b32 v10, v[4:5], off global_load_b32 v11, v[0:1], off v_add_nc_u32_e32 v2, 1, v2 v_add_co_u32 v8, s0, v8, 4 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_add_co_ci_u32_e64 v9, s0, 0, v9, s0 s_waitcnt vmcnt(0) v_add_nc_u32_e32 v10, v11, v10 v_cmp_ge_i32_e32 vcc_lo, v2, v10 s_or_b32 s10, vcc_lo, s10 s_delay_alu instid0(SALU_CYCLE_1) s_and_not1_b32 exec_lo, exec_lo, s10 s_cbranch_execz .LBB0_7 .LBB0_5: global_load_b32 v10, v[8:9], off s_mov_b32 s0, exec_lo s_waitcnt vmcnt(0) v_ashrrev_i32_e32 v11, 31, v10 v_add_co_u32 v12, vcc_lo, s8, v10 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v13, vcc_lo, s9, v11, vcc_lo global_load_u8 v12, v[12:13], off s_waitcnt vmcnt(0) v_cmpx_eq_u16_e32 0, v12 s_cbranch_execz .LBB0_4 global_load_b32 v14, v[6:7], off v_lshlrev_b64 v[12:13], 2, v[10:11] v_add_co_u32 v10, vcc_lo, s2, v10 v_add_co_ci_u32_e32 v11, vcc_lo, s3, v11, vcc_lo v_mov_b32_e32 v15, 1 s_delay_alu instid0(VALU_DEP_4) v_add_co_u32 v12, vcc_lo, s4, v12 v_add_co_ci_u32_e32 v13, vcc_lo, s5, v13, vcc_lo s_waitcnt vmcnt(0) v_add_nc_u32_e32 v14, 1, v14 global_store_b8 v[10:11], v15, off global_store_b32 v[12:13], v14, off global_store_b8 v3, v15, s[6:7] s_branch .LBB0_4 .LBB0_7: s_set_inst_prefetch_distance 0x2 s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 312 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 16 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z10bfs_kernelP4NodePiPbS2_S1_S2_i, .Lfunc_end0-_Z10bfs_kernelP4NodePiPbS2_S1_S2_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 24 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 32 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 40 .size: 8 .value_kind: global_buffer - .offset: 48 .size: 4 .value_kind: by_value - .offset: 56 .size: 4 .value_kind: hidden_block_count_x - .offset: 60 .size: 4 .value_kind: hidden_block_count_y - .offset: 64 .size: 4 .value_kind: hidden_block_count_z - .offset: 68 .size: 2 .value_kind: hidden_group_size_x - .offset: 70 .size: 2 .value_kind: hidden_group_size_y - .offset: 72 .size: 2 .value_kind: hidden_group_size_z - .offset: 74 .size: 2 .value_kind: hidden_remainder_x - .offset: 76 .size: 2 .value_kind: hidden_remainder_y - .offset: 78 .size: 2 .value_kind: hidden_remainder_z - .offset: 96 .size: 8 .value_kind: hidden_global_offset_x - .offset: 104 .size: 8 .value_kind: hidden_global_offset_y - .offset: 112 .size: 8 .value_kind: hidden_global_offset_z - .offset: 120 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 312 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z10bfs_kernelP4NodePiPbS2_S1_S2_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 16 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00112db2_00000000-6_bfs_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z48__device_stub__Z10bfs_kernelP4NodePiPbS2_S1_S2_iP4NodePiPbS2_S1_S2_i .type _Z48__device_stub__Z10bfs_kernelP4NodePiPbS2_S1_S2_iP4NodePiPbS2_S1_S2_i, @function _Z48__device_stub__Z10bfs_kernelP4NodePiPbS2_S1_S2_iP4NodePiPbS2_S1_S2_i: .LFB2051: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movq %rcx, 16(%rsp) movq %r8, 8(%rsp) movq %r9, (%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 16(%rsp), %rax movq %rax, 136(%rsp) leaq 8(%rsp), %rax movq %rax, 144(%rsp) movq %rsp, %rax movq %rax, 152(%rsp) leaq 192(%rsp), %rax movq %rax, 160(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z10bfs_kernelP4NodePiPbS2_S1_S2_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z48__device_stub__Z10bfs_kernelP4NodePiPbS2_S1_S2_iP4NodePiPbS2_S1_S2_i, .-_Z48__device_stub__Z10bfs_kernelP4NodePiPbS2_S1_S2_iP4NodePiPbS2_S1_S2_i .globl _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .type _Z10bfs_kernelP4NodePiPbS2_S1_S2_i, @function _Z10bfs_kernelP4NodePiPbS2_S1_S2_i: .LFB2052: .cfi_startproc endbr64 subq $16, %rsp .cfi_def_cfa_offset 24 movl 24(%rsp), %eax pushq %rax .cfi_def_cfa_offset 32 call _Z48__device_stub__Z10bfs_kernelP4NodePiPbS2_S1_S2_iP4NodePiPbS2_S1_S2_i addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z10bfs_kernelP4NodePiPbS2_S1_S2_i, .-_Z10bfs_kernelP4NodePiPbS2_S1_S2_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z10bfs_kernelP4NodePiPbS2_S1_S2_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z10bfs_kernelP4NodePiPbS2_S1_S2_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "bfs_kernel.hip" .globl _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i # -- Begin function _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i .p2align 4, 0x90 .type _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i,@function _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i: # @_Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) movq %r9, 48(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 64(%rsp), %rax movq %rax, 120(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 48(%rsp), %rax movq %rax, 136(%rsp) leaq 160(%rsp), %rax movq %rax, 144(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z10bfs_kernelP4NodePiPbS2_S1_S2_i, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i, .Lfunc_end0-_Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10bfs_kernelP4NodePiPbS2_S1_S2_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z10bfs_kernelP4NodePiPbS2_S1_S2_i,@object # @_Z10bfs_kernelP4NodePiPbS2_S1_S2_i .section .rodata,"a",@progbits .globl _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .p2align 3, 0x0 _Z10bfs_kernelP4NodePiPbS2_S1_S2_i: .quad _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i .size _Z10bfs_kernelP4NodePiPbS2_S1_S2_i, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z10bfs_kernelP4NodePiPbS2_S1_S2_i" .size .L__unnamed_1, 35 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z25__device_stub__bfs_kernelP4NodePiPbS2_S1_S2_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z10bfs_kernelP4NodePiPbS2_S1_S2_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
struct MscData { float a; float b; }; __global__ void apply_kernel(const MscData data, float const* __restrict__ step, float* __restrict__ result) { result[threadIdx.x] = data.a * step[threadIdx.x] + data.b; }
code for sm_80 Function : _Z12apply_kernel7MscDataPKfPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0040*/ IMAD.WIDE.U32 R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */ /* 0x001fcc00078e0005 */ /*0050*/ LDG.E.CONSTANT R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e9900 */ /*0060*/ MOV R7, c[0x0][0x160] ; /* 0x0000580000077a02 */ /* 0x000fe20000000f00 */ /*0070*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */ /* 0x000fc800078e0005 */ /*0080*/ FFMA R7, R2, R7, c[0x0][0x164] ; /* 0x0000590002077623 */ /* 0x004fca0000000007 */ /*0090*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x000fe2000c101904 */ /*00a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
struct MscData { float a; float b; }; __global__ void apply_kernel(const MscData data, float const* __restrict__ step, float* __restrict__ result) { result[threadIdx.x] = data.a * step[threadIdx.x] + data.b; }
.file "tmpxft_001191fc_00000000-6_simplified.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z43__device_stub__Z12apply_kernel7MscDataPKfPfRK7MscDataPKfPf .type _Z43__device_stub__Z12apply_kernel7MscDataPKfPfRK7MscDataPKfPf, @function _Z43__device_stub__Z12apply_kernel7MscDataPKfPfRK7MscDataPKfPf: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax movq %rdi, 80(%rsp) movq %rsi, (%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movq %rdx, 8(%rsp) leaq 8(%rsp), %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z12apply_kernel7MscDataPKfPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z43__device_stub__Z12apply_kernel7MscDataPKfPfRK7MscDataPKfPf, .-_Z43__device_stub__Z12apply_kernel7MscDataPKfPfRK7MscDataPKfPf .globl _Z12apply_kernel7MscDataPKfPf .type _Z12apply_kernel7MscDataPKfPf, @function _Z12apply_kernel7MscDataPKfPf: .LFB2052: .cfi_startproc endbr64 subq $24, %rsp .cfi_def_cfa_offset 32 movq %xmm0, 8(%rsp) movq %rsi, %rdx leaq 8(%rsp), %rax movq %rdi, %rsi movq %rax, %rdi call _Z43__device_stub__Z12apply_kernel7MscDataPKfPfRK7MscDataPKfPf addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z12apply_kernel7MscDataPKfPf, .-_Z12apply_kernel7MscDataPKfPf .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z12apply_kernel7MscDataPKfPf" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z12apply_kernel7MscDataPKfPf(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
struct MscData { float a; float b; }; __global__ void apply_kernel(const MscData data, float const* __restrict__ step, float* __restrict__ result) { result[threadIdx.x] = data.a * step[threadIdx.x] + data.b; }
#include <hip/hip_runtime.h> struct MscData { float a; float b; }; __global__ void apply_kernel(const MscData data, float const* __restrict__ step, float* __restrict__ result) { result[threadIdx.x] = data.a * step[threadIdx.x] + data.b; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> struct MscData { float a; float b; }; __global__ void apply_kernel(const MscData data, float const* __restrict__ step, float* __restrict__ result) { result[threadIdx.x] = data.a * step[threadIdx.x] + data.b; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12apply_kernel7MscDataPKfPf .globl _Z12apply_kernel7MscDataPKfPf .p2align 8 .type _Z12apply_kernel7MscDataPKfPf,@function _Z12apply_kernel7MscDataPKfPf: s_load_b128 s[4:7], s[0:1], 0x0 v_lshlrev_b32_e32 v0, 2, v0 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) global_load_b32 v1, v0, s[6:7] s_waitcnt vmcnt(0) v_fma_f32 v1, s4, v1, s5 global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12apply_kernel7MscDataPKfPf .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 2 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12apply_kernel7MscDataPKfPf, .Lfunc_end0-_Z12apply_kernel7MscDataPKfPf .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 8 .value_kind: by_value - .actual_access: read_only .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .actual_access: write_only .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12apply_kernel7MscDataPKfPf .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z12apply_kernel7MscDataPKfPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 2 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> struct MscData { float a; float b; }; __global__ void apply_kernel(const MscData data, float const* __restrict__ step, float* __restrict__ result) { result[threadIdx.x] = data.a * step[threadIdx.x] + data.b; }
.text .file "simplified.hip" .globl _Z27__device_stub__apply_kernel7MscDataPKfPf # -- Begin function _Z27__device_stub__apply_kernel7MscDataPKfPf .p2align 4, 0x90 .type _Z27__device_stub__apply_kernel7MscDataPKfPf,@function _Z27__device_stub__apply_kernel7MscDataPKfPf: # @_Z27__device_stub__apply_kernel7MscDataPKfPf .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movlps %xmm0, 72(%rsp) movq %rdi, 64(%rsp) movq %rsi, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12apply_kernel7MscDataPKfPf, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z27__device_stub__apply_kernel7MscDataPKfPf, .Lfunc_end0-_Z27__device_stub__apply_kernel7MscDataPKfPf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12apply_kernel7MscDataPKfPf, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z12apply_kernel7MscDataPKfPf,@object # @_Z12apply_kernel7MscDataPKfPf .section .rodata,"a",@progbits .globl _Z12apply_kernel7MscDataPKfPf .p2align 3, 0x0 _Z12apply_kernel7MscDataPKfPf: .quad _Z27__device_stub__apply_kernel7MscDataPKfPf .size _Z12apply_kernel7MscDataPKfPf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z12apply_kernel7MscDataPKfPf" .size .L__unnamed_1, 30 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__apply_kernel7MscDataPKfPf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12apply_kernel7MscDataPKfPf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z12apply_kernel7MscDataPKfPf .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0040*/ IMAD.WIDE.U32 R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */ /* 0x001fcc00078e0005 */ /*0050*/ LDG.E.CONSTANT R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e9900 */ /*0060*/ MOV R7, c[0x0][0x160] ; /* 0x0000580000077a02 */ /* 0x000fe20000000f00 */ /*0070*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */ /* 0x000fc800078e0005 */ /*0080*/ FFMA R7, R2, R7, c[0x0][0x164] ; /* 0x0000590002077623 */ /* 0x004fca0000000007 */ /*0090*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x000fe2000c101904 */ /*00a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12apply_kernel7MscDataPKfPf .globl _Z12apply_kernel7MscDataPKfPf .p2align 8 .type _Z12apply_kernel7MscDataPKfPf,@function _Z12apply_kernel7MscDataPKfPf: s_load_b128 s[4:7], s[0:1], 0x0 v_lshlrev_b32_e32 v0, 2, v0 s_load_b64 s[0:1], s[0:1], 0x10 s_waitcnt lgkmcnt(0) global_load_b32 v1, v0, s[6:7] s_waitcnt vmcnt(0) v_fma_f32 v1, s4, v1, s5 global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12apply_kernel7MscDataPKfPf .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 24 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 2 .amdhsa_next_free_sgpr 8 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12apply_kernel7MscDataPKfPf, .Lfunc_end0-_Z12apply_kernel7MscDataPKfPf .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .offset: 0 .size: 8 .value_kind: by_value - .actual_access: read_only .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .actual_access: write_only .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 24 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12apply_kernel7MscDataPKfPf .private_segment_fixed_size: 0 .sgpr_count: 8 .sgpr_spill_count: 0 .symbol: _Z12apply_kernel7MscDataPKfPf.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 2 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001191fc_00000000-6_simplified.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z43__device_stub__Z12apply_kernel7MscDataPKfPfRK7MscDataPKfPf .type _Z43__device_stub__Z12apply_kernel7MscDataPKfPfRK7MscDataPKfPf, @function _Z43__device_stub__Z12apply_kernel7MscDataPKfPfRK7MscDataPKfPf: .LFB2051: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax movq %rdi, 80(%rsp) movq %rsi, (%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movq %rdx, 8(%rsp) leaq 8(%rsp), %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z12apply_kernel7MscDataPKfPf(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z43__device_stub__Z12apply_kernel7MscDataPKfPfRK7MscDataPKfPf, .-_Z43__device_stub__Z12apply_kernel7MscDataPKfPfRK7MscDataPKfPf .globl _Z12apply_kernel7MscDataPKfPf .type _Z12apply_kernel7MscDataPKfPf, @function _Z12apply_kernel7MscDataPKfPf: .LFB2052: .cfi_startproc endbr64 subq $24, %rsp .cfi_def_cfa_offset 32 movq %xmm0, 8(%rsp) movq %rsi, %rdx leaq 8(%rsp), %rax movq %rdi, %rsi movq %rax, %rdi call _Z43__device_stub__Z12apply_kernel7MscDataPKfPfRK7MscDataPKfPf addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z12apply_kernel7MscDataPKfPf, .-_Z12apply_kernel7MscDataPKfPf .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z12apply_kernel7MscDataPKfPf" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z12apply_kernel7MscDataPKfPf(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "simplified.hip" .globl _Z27__device_stub__apply_kernel7MscDataPKfPf # -- Begin function _Z27__device_stub__apply_kernel7MscDataPKfPf .p2align 4, 0x90 .type _Z27__device_stub__apply_kernel7MscDataPKfPf,@function _Z27__device_stub__apply_kernel7MscDataPKfPf: # @_Z27__device_stub__apply_kernel7MscDataPKfPf .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movlps %xmm0, 72(%rsp) movq %rdi, 64(%rsp) movq %rsi, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12apply_kernel7MscDataPKfPf, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z27__device_stub__apply_kernel7MscDataPKfPf, .Lfunc_end0-_Z27__device_stub__apply_kernel7MscDataPKfPf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12apply_kernel7MscDataPKfPf, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z12apply_kernel7MscDataPKfPf,@object # @_Z12apply_kernel7MscDataPKfPf .section .rodata,"a",@progbits .globl _Z12apply_kernel7MscDataPKfPf .p2align 3, 0x0 _Z12apply_kernel7MscDataPKfPf: .quad _Z27__device_stub__apply_kernel7MscDataPKfPf .size _Z12apply_kernel7MscDataPKfPf, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z12apply_kernel7MscDataPKfPf" .size .L__unnamed_1, 30 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__apply_kernel7MscDataPKfPf .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12apply_kernel7MscDataPKfPf .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <cstdio> #include <cstdlib> #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } __global__ void foo(int *a, int N) { int i=blockIdx.x*blockDim.x+threadIdx.x; if (i < N){ a[i]=i; } } int main() { int N=4097; int threads=128; int blocks=(N+threads-1)/threads; int *a; cudaMallocManaged(&a,N*sizeof(int)); foo<<<blocks,threads>>>(a, N); cudaDeviceSynchronize(); for(int i=0;i<10;i++) printf("%d\n",a[i]); cudaFree(a); cudaCheckError(); return 0; }
code for sm_80 Function : _Z3fooPii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */ /* 0x000e280000002500 */ /*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R5, R5, c[0x0][0x0], R0 ; /* 0x0000000005057a24 */ /* 0x001fca00078e0200 */ /*0040*/ ISETP.GE.AND P0, PT, R5, c[0x0][0x168], PT ; /* 0x00005a0005007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */ /* 0x000fca00078e0202 */ /*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*00a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cstdio> #include <cstdlib> #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } __global__ void foo(int *a, int N) { int i=blockIdx.x*blockDim.x+threadIdx.x; if (i < N){ a[i]=i; } } int main() { int N=4097; int threads=128; int blocks=(N+threads-1)/threads; int *a; cudaMallocManaged(&a,N*sizeof(int)); foo<<<blocks,threads>>>(a, N); cudaDeviceSynchronize(); for(int i=0;i<10;i++) printf("%d\n",a[i]); cudaFree(a); cudaCheckError(); return 0; }
.file "tmpxft_0010c0ab_00000000-6_main.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z23__device_stub__Z3fooPiiPii .type _Z23__device_stub__Z3fooPiiPii, @function _Z23__device_stub__Z3fooPiiPii: .LFB2082: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z3fooPii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z23__device_stub__Z3fooPiiPii, .-_Z23__device_stub__Z3fooPiiPii .globl _Z3fooPii .type _Z3fooPii, @function _Z3fooPii: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z23__device_stub__Z3fooPiiPii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z3fooPii, .-_Z3fooPii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%d\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "/home/ubuntu/Datasets/stackv2/train-structured/dkfulp/intro_to_gpus/main/examples/example_3/main.cu" .section .rodata.str1.1 .LC2: .string "Cuda failure %s:%d: '%s'\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $56, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leaq 8(%rsp), %rdi movl $1, %edx movl $16388, %esi call cudaMallocManaged@PLT movl $128, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $33, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L18 .L12: call cudaDeviceSynchronize@PLT movl $0, %ebx leaq .LC0(%rip), %rbp .L13: movq 8(%rsp), %rax movl (%rax,%rbx), %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq $40, %rbx jne .L13 movq 8(%rsp), %rdi call cudaFree@PLT call cudaGetLastError@PLT testl %eax, %eax jne .L19 movq 40(%rsp), %rax subq %fs:40, %rax jne .L20 movl $0, %eax addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L18: .cfi_restore_state movl $4097, %esi movq 8(%rsp), %rdi call _Z23__device_stub__Z3fooPiiPii jmp .L12 .L19: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl $35, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L20: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC3: .string "_Z3fooPii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z3fooPii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cstdio> #include <cstdlib> #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } __global__ void foo(int *a, int N) { int i=blockIdx.x*blockDim.x+threadIdx.x; if (i < N){ a[i]=i; } } int main() { int N=4097; int threads=128; int blocks=(N+threads-1)/threads; int *a; cudaMallocManaged(&a,N*sizeof(int)); foo<<<blocks,threads>>>(a, N); cudaDeviceSynchronize(); for(int i=0;i<10;i++) printf("%d\n",a[i]); cudaFree(a); cudaCheckError(); return 0; }
#include <hip/hip_runtime.h> #include <cstdio> #include <cstdlib> #define cudaCheckError() { \ hipError_t e=hipGetLastError(); \ if(e!=hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } __global__ void foo(int *a, int N) { int i=blockIdx.x*blockDim.x+threadIdx.x; if (i < N){ a[i]=i; } } int main() { int N=4097; int threads=128; int blocks=(N+threads-1)/threads; int *a; hipMallocManaged(&a,N*sizeof(int)); foo<<<blocks,threads>>>(a, N); hipDeviceSynchronize(); for(int i=0;i<10;i++) printf("%d\n",a[i]); hipFree(a); cudaCheckError(); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <cstdio> #include <cstdlib> #define cudaCheckError() { \ hipError_t e=hipGetLastError(); \ if(e!=hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } __global__ void foo(int *a, int N) { int i=blockIdx.x*blockDim.x+threadIdx.x; if (i < N){ a[i]=i; } } int main() { int N=4097; int threads=128; int blocks=(N+threads-1)/threads; int *a; hipMallocManaged(&a,N*sizeof(int)); foo<<<blocks,threads>>>(a, N); hipDeviceSynchronize(); for(int i=0;i<10;i++) printf("%d\n",a[i]); hipFree(a); cudaCheckError(); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z3fooPii .globl _Z3fooPii .p2align 8 .type _Z3fooPii,@function _Z3fooPii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b32 s3, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo global_store_b32 v[2:3], v1, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z3fooPii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z3fooPii, .Lfunc_end0-_Z3fooPii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z3fooPii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z3fooPii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <cstdio> #include <cstdlib> #define cudaCheckError() { \ hipError_t e=hipGetLastError(); \ if(e!=hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } __global__ void foo(int *a, int N) { int i=blockIdx.x*blockDim.x+threadIdx.x; if (i < N){ a[i]=i; } } int main() { int N=4097; int threads=128; int blocks=(N+threads-1)/threads; int *a; hipMallocManaged(&a,N*sizeof(int)); foo<<<blocks,threads>>>(a, N); hipDeviceSynchronize(); for(int i=0;i<10;i++) printf("%d\n",a[i]); hipFree(a); cudaCheckError(); return 0; }
.text .file "main.hip" .globl _Z18__device_stub__fooPii # -- Begin function _Z18__device_stub__fooPii .p2align 4, 0x90 .type _Z18__device_stub__fooPii,@function _Z18__device_stub__fooPii: # @_Z18__device_stub__fooPii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z3fooPii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z18__device_stub__fooPii, .Lfunc_end0-_Z18__device_stub__fooPii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $96, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -16 leaq 8(%rsp), %rdi movl $16388, %esi # imm = 0x4004 movl $1, %edx callq hipMallocManaged movabsq $4294967329, %rdi # imm = 0x100000021 leaq 95(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) movl $4097, 20(%rsp) # imm = 0x1001 leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 20(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z3fooPii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize xorl %ebx, %ebx .p2align 4, 0x90 .LBB1_3: # =>This Inner Loop Header: Depth=1 movq 8(%rsp), %rax movl (%rax,%rbx,4), %esi movl $.L.str, %edi xorl %eax, %eax callq printf incq %rbx cmpq $10, %rbx jne .LBB1_3 # %bb.4: movq 8(%rsp), %rdi callq hipFree callq hipGetLastError testl %eax, %eax jne .LBB1_6 # %bb.5: xorl %eax, %eax addq $96, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .LBB1_6: .cfi_def_cfa_offset 112 movl %eax, %edi callq hipGetErrorString movl $.L.str.1, %edi movl $.L.str.2, %esi movl $37, %edx movq %rax, %rcx xorl %eax, %eax callq printf movl $1, %edi callq exit .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z3fooPii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z3fooPii,@object # @_Z3fooPii .section .rodata,"a",@progbits .globl _Z3fooPii .p2align 3, 0x0 _Z3fooPii: .quad _Z18__device_stub__fooPii .size _Z3fooPii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%d\n" .size .L.str, 4 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Cuda failure %s:%d: '%s'\n" .size .L.str.1, 26 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/dkfulp/intro_to_gpus/main/examples/example_3/main.hip" .size .L.str.2, 111 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z3fooPii" .size .L__unnamed_1, 10 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z18__device_stub__fooPii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z3fooPii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z3fooPii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */ /* 0x000e280000002500 */ /*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R5, R5, c[0x0][0x0], R0 ; /* 0x0000000005057a24 */ /* 0x001fca00078e0200 */ /*0040*/ ISETP.GE.AND P0, PT, R5, c[0x0][0x168], PT ; /* 0x00005a0005007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */ /* 0x000fe200000001ff */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0080*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */ /* 0x000fca00078e0202 */ /*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*00a0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z3fooPii .globl _Z3fooPii .p2align 8 .type _Z3fooPii,@function _Z3fooPii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b32 s3, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo global_store_b32 v[2:3], v1, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z3fooPii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z3fooPii, .Lfunc_end0-_Z3fooPii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z3fooPii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z3fooPii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0010c0ab_00000000-6_main.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z23__device_stub__Z3fooPiiPii .type _Z23__device_stub__Z3fooPiiPii, @function _Z23__device_stub__Z3fooPiiPii: .LFB2082: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z3fooPii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z23__device_stub__Z3fooPiiPii, .-_Z23__device_stub__Z3fooPiiPii .globl _Z3fooPii .type _Z3fooPii, @function _Z3fooPii: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z23__device_stub__Z3fooPiiPii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z3fooPii, .-_Z3fooPii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "%d\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "/home/ubuntu/Datasets/stackv2/train-structured/dkfulp/intro_to_gpus/main/examples/example_3/main.cu" .section .rodata.str1.1 .LC2: .string "Cuda failure %s:%d: '%s'\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $56, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leaq 8(%rsp), %rdi movl $1, %edx movl $16388, %esi call cudaMallocManaged@PLT movl $128, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $33, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L18 .L12: call cudaDeviceSynchronize@PLT movl $0, %ebx leaq .LC0(%rip), %rbp .L13: movq 8(%rsp), %rax movl (%rax,%rbx), %edx movq %rbp, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq $40, %rbx jne .L13 movq 8(%rsp), %rdi call cudaFree@PLT call cudaGetLastError@PLT testl %eax, %eax jne .L19 movq 40(%rsp), %rax subq %fs:40, %rax jne .L20 movl $0, %eax addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L18: .cfi_restore_state movl $4097, %esi movq 8(%rsp), %rdi call _Z23__device_stub__Z3fooPiiPii jmp .L12 .L19: movl %eax, %edi call cudaGetErrorString@PLT movq %rax, %r8 movl $35, %ecx leaq .LC1(%rip), %rdx leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L20: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC3: .string "_Z3fooPii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z3fooPii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "main.hip" .globl _Z18__device_stub__fooPii # -- Begin function _Z18__device_stub__fooPii .p2align 4, 0x90 .type _Z18__device_stub__fooPii,@function _Z18__device_stub__fooPii: # @_Z18__device_stub__fooPii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z3fooPii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z18__device_stub__fooPii, .Lfunc_end0-_Z18__device_stub__fooPii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $96, %rsp .cfi_def_cfa_offset 112 .cfi_offset %rbx, -16 leaq 8(%rsp), %rdi movl $16388, %esi # imm = 0x4004 movl $1, %edx callq hipMallocManaged movabsq $4294967329, %rdi # imm = 0x100000021 leaq 95(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) movl $4097, 20(%rsp) # imm = 0x1001 leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 20(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z3fooPii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize xorl %ebx, %ebx .p2align 4, 0x90 .LBB1_3: # =>This Inner Loop Header: Depth=1 movq 8(%rsp), %rax movl (%rax,%rbx,4), %esi movl $.L.str, %edi xorl %eax, %eax callq printf incq %rbx cmpq $10, %rbx jne .LBB1_3 # %bb.4: movq 8(%rsp), %rdi callq hipFree callq hipGetLastError testl %eax, %eax jne .LBB1_6 # %bb.5: xorl %eax, %eax addq $96, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .LBB1_6: .cfi_def_cfa_offset 112 movl %eax, %edi callq hipGetErrorString movl $.L.str.1, %edi movl $.L.str.2, %esi movl $37, %edx movq %rax, %rcx xorl %eax, %eax callq printf movl $1, %edi callq exit .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z3fooPii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z3fooPii,@object # @_Z3fooPii .section .rodata,"a",@progbits .globl _Z3fooPii .p2align 3, 0x0 _Z3fooPii: .quad _Z18__device_stub__fooPii .size _Z3fooPii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%d\n" .size .L.str, 4 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Cuda failure %s:%d: '%s'\n" .size .L.str.1, 26 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/dkfulp/intro_to_gpus/main/examples/example_3/main.hip" .size .L.str.2, 111 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z3fooPii" .size .L__unnamed_1, 10 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z18__device_stub__fooPii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z3fooPii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdlib.h> #include <vector> #include <algorithm> #include <iostream> #define TILE_WIDTH 16 // Task 1 - simple matrix multiplication __global__ void matrix_multiply_simple(float *ma, float *mb, float *mc, size_t width) { //TODO: calculate the row & column index of the element int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float product = 0; //TODO: do dot product between row of ma and column of mb for (int i = 0; i < width; ++i) { product += ma[row * width + i] * mb[i * width + col]; } //TODO: write result in mc mc[row * width + col] = product; } // Task 2 - optimized matrix multiplication __global__ void matrix_multiply(float *ma, float *mb, float *mc, size_t width) { int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; //TODO: allocate 2D tiles in __shared__ memory __shared__ float ma_tile[TILE_WIDTH][TILE_WIDTH]; __shared__ float mb_tile[TILE_WIDTH][TILE_WIDTH]; //TODO: calculate the row & column index of the element int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; float result = 0; // loop over the tiles of the input for(int t = 0; t < width/TILE_WIDTH; ++t) { //TODO: load tiles into __shared__ memory allocated before ma_tile[ty][tx] = ma[row * width + t * TILE_WIDTH + tx]; mb_tile[ty][tx] = mb[(t * TILE_WIDTH + ty) * width + col]; //TODO: // wait until all data is loaded before allowing // any thread in this block to continue __syncthreads(); //TODO: do dot product between row of tile from ma and column of tile from mb for (int i = 0; i < TILE_WIDTH; ++i) { result += ma_tile[ty][i] * mb_tile[i][tx]; } //TODO: // wait until all data is loaded before allowing // any thread in this block to continue __syncthreads(); } //TODO: write result in mc mc[row * width + col] = result; } int main(void) { // create a large workload so we can easily measure the // performance difference of both implementations // note that n measures the width of the matrix, not the number of total elements const size_t n = 1<<10; const dim3 block_size(TILE_WIDTH,TILE_WIDTH); const dim3 num_blocks(n / block_size.x, n / block_size.y); // generate random input on the host std::vector<float> host_a(n*n), host_b(n*n), host_c(n*n); for(int i = 0; i < n*n; ++i) { host_a[i] = static_cast<float>(rand()) / RAND_MAX; host_b[i] = static_cast<float>(rand()) / RAND_MAX; } // allocate storage for the device float *device_a = 0, *device_b = 0, *device_c = 0; cudaMalloc((void**)&device_a, sizeof(float) * n * n); cudaMalloc((void**)&device_b, sizeof(float) * n * n); cudaMalloc((void**)&device_c, sizeof(float) * n * n); // copy input to the device cudaMemcpy(device_a, &host_a[0], sizeof(float) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(device_b, &host_b[0], sizeof(float) * n * n, cudaMemcpyHostToDevice); //Task 3 - measure the time spent in the kernel for simple and optimized implementation //TODO: create CUDA events for measuring kernel time cudaEvent_t launch_begin, launch_end; cudaEventCreate(&launch_begin); cudaEventCreate(&launch_end); // time many kernel launches and take the average time const size_t num_launches = 100; float average_simple_time = 0; std::cout << "Timing simple implementation..."; for(int i = 0; i < num_launches; ++i) { //TODO: record CUDA event before and after the kernel launch cudaEventRecord(launch_begin, 0); matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); cudaEventRecord(launch_end, 0); matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); //TODO: Wait for launch_end event to complete cudaEventSynchronize(launch_end); //TODO: measure the time spent in the kernel float time = 0; cudaEventElapsedTime(&time, launch_begin, launch_end); average_simple_time += time; } average_simple_time /= num_launches; std::cout << " done." << std::endl; //now time the optimized kernel // time many kernel launches and take the average time float average_optimized_time = 0; std::cout << "Timing optimized implementation..."; for(int i = 0; i < num_launches; ++i) { //TODO: record CUDA event before and after the kernel launch cudaEventRecord(launch_begin, 0); matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); cudaEventRecord(launch_end, 0); matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); //TODO: Wait for launch_end event to complete cudaEventSynchronize(launch_end); //TODO: measure the time spent in the kernel float time = 0; cudaEventElapsedTime(&time, launch_begin, launch_end); average_optimized_time += time; } average_optimized_time /= num_launches; std::cout << " done." << std::endl; // report the effective throughput of each kernel in GFLOPS // the effective throughput is measured as the number of floating point operations performed per second: // (one mul + one add) * N^3 float simple_throughput = static_cast<float>(2 * n * n * n) / (average_simple_time / 1000.0f) / 1000000000.0f; float optimized_throughput = static_cast<float>(2 * n * n * n) / (average_optimized_time / 1000.0f) / 1000000000.0f; std::cout << "Matrix size: " << n << "x" << n << std::endl; std::cout << "Tile size: " << TILE_WIDTH << "x" << TILE_WIDTH << std::endl; std::cout << "Throughput of simple kernel: " << simple_throughput << " GFLOPS" << std::endl; std::cout << "Throughput of optimized kernel: " << optimized_throughput << " GFLOPS" << std::endl; std::cout << "Performance improvement: " << optimized_throughput / simple_throughput << "x" << std::endl; std::cout << std::endl; //TODO: destroy the CUDA events cudaEventDestroy(launch_begin); cudaEventDestroy(launch_end); // deallocate device memory cudaFree(device_a); cudaFree(device_b); cudaFree(device_c); return 0; }
.file "tmpxft_00173410_00000000-6_matrix_multiplication_skel.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB4289: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4289: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z47__device_stub__Z22matrix_multiply_simplePfS_S_mPfS_S_m .type _Z47__device_stub__Z22matrix_multiply_simplePfS_S_mPfS_S_m, @function _Z47__device_stub__Z22matrix_multiply_simplePfS_S_mPfS_S_m: .LFB4311: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z22matrix_multiply_simplePfS_S_m(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE4311: .size _Z47__device_stub__Z22matrix_multiply_simplePfS_S_mPfS_S_m, .-_Z47__device_stub__Z22matrix_multiply_simplePfS_S_mPfS_S_m .globl _Z22matrix_multiply_simplePfS_S_m .type _Z22matrix_multiply_simplePfS_S_m, @function _Z22matrix_multiply_simplePfS_S_m: .LFB4312: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z47__device_stub__Z22matrix_multiply_simplePfS_S_mPfS_S_m addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4312: .size _Z22matrix_multiply_simplePfS_S_m, .-_Z22matrix_multiply_simplePfS_S_m .globl _Z40__device_stub__Z15matrix_multiplyPfS_S_mPfS_S_m .type _Z40__device_stub__Z15matrix_multiplyPfS_S_mPfS_S_m, @function _Z40__device_stub__Z15matrix_multiplyPfS_S_mPfS_S_m: .LFB4313: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z15matrix_multiplyPfS_S_m(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE4313: .size _Z40__device_stub__Z15matrix_multiplyPfS_S_mPfS_S_m, .-_Z40__device_stub__Z15matrix_multiplyPfS_S_mPfS_S_m .globl _Z15matrix_multiplyPfS_S_m .type _Z15matrix_multiplyPfS_S_m, @function _Z15matrix_multiplyPfS_S_m: .LFB4314: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z40__device_stub__Z15matrix_multiplyPfS_S_mPfS_S_m addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4314: .size _Z15matrix_multiplyPfS_S_m, .-_Z15matrix_multiplyPfS_S_m .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z15matrix_multiplyPfS_S_m" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "_Z22matrix_multiply_simplePfS_S_m" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB4316: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z15matrix_multiplyPfS_S_m(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z22matrix_multiply_simplePfS_S_m(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4316: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .rodata._ZNSt6vectorIfSaIfEEC2EmRKS0_.str1.8,"aMS",@progbits,1 .align 8 .LC2: .string "cannot create std::vector larger than max_size()" .section .text._ZNSt6vectorIfSaIfEEC2EmRKS0_,"axG",@progbits,_ZNSt6vectorIfSaIfEEC5EmRKS0_,comdat .align 2 .weak _ZNSt6vectorIfSaIfEEC2EmRKS0_ .type _ZNSt6vectorIfSaIfEEC2EmRKS0_, @function _ZNSt6vectorIfSaIfEEC2EmRKS0_: .LFB4623: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 movq %rsi, %rax shrq $61, %rax jne .L31 movq %rdi, %rbx movq %rsi, %rbp movq $0, (%rdi) movq $0, 8(%rdi) movq $0, 16(%rdi) testq %rsi, %rsi je .L23 leaq 0(,%rsi,4), %r12 movq %r12, %rdi call _Znwm@PLT movq %rax, (%rbx) movq %rax, 8(%rbx) leaq (%rax,%r12), %rdx movq %rdx, 16(%rbx) movl $0x00000000, (%rax) addq $4, %rax cmpq $1, %rbp je .L26 cmpq %rax, %rdx je .L27 .L25: movl $0x00000000, (%rax) addq $4, %rax cmpq %rax, %rdx jne .L25 jmp .L24 .L31: leaq .LC2(%rip), %rdi call _ZSt20__throw_length_errorPKc@PLT .L26: movq %rax, %rdx jmp .L24 .L27: movq %rax, %rdx jmp .L24 .L23: movq $0, (%rdi) movq $0, 16(%rdi) movl $0, %edx .L24: movq %rdx, 8(%rbx) popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4623: .size _ZNSt6vectorIfSaIfEEC2EmRKS0_, .-_ZNSt6vectorIfSaIfEEC2EmRKS0_ .weak _ZNSt6vectorIfSaIfEEC1EmRKS0_ .set _ZNSt6vectorIfSaIfEEC1EmRKS0_,_ZNSt6vectorIfSaIfEEC2EmRKS0_ .section .text._ZNSt6vectorIfSaIfEED2Ev,"axG",@progbits,_ZNSt6vectorIfSaIfEED5Ev,comdat .align 2 .weak _ZNSt6vectorIfSaIfEED2Ev .type _ZNSt6vectorIfSaIfEED2Ev, @function _ZNSt6vectorIfSaIfEED2Ev: .LFB4626: .cfi_startproc endbr64 movq (%rdi), %rax testq %rax, %rax je .L35 subq $8, %rsp .cfi_def_cfa_offset 16 movq 16(%rdi), %rsi subq %rax, %rsi movq %rax, %rdi call _ZdlPvm@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .L35: ret .cfi_endproc .LFE4626: .size _ZNSt6vectorIfSaIfEED2Ev, .-_ZNSt6vectorIfSaIfEED2Ev .weak _ZNSt6vectorIfSaIfEED1Ev .set _ZNSt6vectorIfSaIfEED1Ev,_ZNSt6vectorIfSaIfEED2Ev .section .rodata.str1.8 .align 8 .LC5: .string "Timing simple implementation..." .section .rodata.str1.1 .LC7: .string " done." .section .rodata.str1.8 .align 8 .LC8: .string "Timing optimized implementation..." .section .rodata.str1.1 .LC12: .string "Matrix size: " .LC13: .string "x" .LC14: .string "Tile size: " .LC15: .string "Throughput of simple kernel: " .LC16: .string " GFLOPS" .section .rodata.str1.8 .align 8 .LC17: .string "Throughput of optimized kernel: " .section .rodata.str1.1 .LC18: .string "Performance improvement: " .text .globl main .type main, @function main: .LFB4286: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA4286 endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $200, %rsp .cfi_def_cfa_offset 240 movq %fs:40, %rax movq %rax, 184(%rsp) xorl %eax, %eax movl $1, 80(%rsp) movl $64, 84(%rsp) movl $64, 88(%rsp) movl $1, 92(%rsp) leaq 160(%rsp), %rbx leaq 96(%rsp), %rdi movq %rbx, %rdx movl $1048576, %esi .LEHB0: call _ZNSt6vectorIfSaIfEEC1EmRKS0_ .LEHE0: leaq 128(%rsp), %rdi movq %rbx, %rdx movl $1048576, %esi .LEHB1: call _ZNSt6vectorIfSaIfEEC1EmRKS0_ .LEHE1: leaq 64(%rsp), %rdx movq %rbx, %rdi movl $1048576, %esi .LEHB2: call _ZNSt6vectorIfSaIfEEC1EmRKS0_ .LEHE2: movl $0, %ebx .L39: call rand@PLT movq 96(%rsp), %rbp pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC4(%rip), %xmm0 movss %xmm0, 0(%rbp,%rbx) call rand@PLT movq 128(%rsp), %r12 pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC4(%rip), %xmm0 movss %xmm0, (%r12,%rbx) addq $4, %rbx cmpq $4194304, %rbx jne .L39 movq $0, 32(%rsp) movq $0, 40(%rsp) movq $0, 48(%rsp) leaq 32(%rsp), %rdi movl $4194304, %esi .LEHB3: call cudaMalloc@PLT leaq 40(%rsp), %rdi movl $4194304, %esi call cudaMalloc@PLT leaq 48(%rsp), %rdi movl $4194304, %esi call cudaMalloc@PLT movl $1, %ecx movl $4194304, %edx movq %rbp, %rsi movq 32(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $4194304, %edx movq %r12, %rsi movq 40(%rsp), %rdi call cudaMemcpy@PLT leaq 56(%rsp), %rdi call cudaEventCreate@PLT leaq 64(%rsp), %rdi call cudaEventCreate@PLT leaq .LC5(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movl $100, %ebx movl $0x00000000, %r12d leaq 28(%rsp), %rbp jmp .L42 .L60: movl $16, 72(%rsp) movl $16, 76(%rsp) movl 80(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 72(%rsp), %rdx movq 84(%rsp), %rdi movl 92(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L40 movl $1024, %ecx movq 48(%rsp), %rdx movq 40(%rsp), %rsi movq 32(%rsp), %rdi call _Z47__device_stub__Z22matrix_multiply_simplePfS_S_mPfS_S_m .L40: movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT movl 80(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 72(%rsp), %rdx movq 84(%rsp), %rdi movl 92(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L41 movl $1024, %ecx movq 48(%rsp), %rdx movq 40(%rsp), %rsi movq 32(%rsp), %rdi call _Z47__device_stub__Z22matrix_multiply_simplePfS_S_mPfS_S_m .L41: movq 64(%rsp), %rdi call cudaEventSynchronize@PLT movl $0x00000000, 28(%rsp) movq 64(%rsp), %rdx movq 56(%rsp), %rsi movq %rbp, %rdi call cudaEventElapsedTime@PLT movd %r12d, %xmm5 addss 28(%rsp), %xmm5 movd %xmm5, %r12d subq $1, %rbx je .L59 .L42: movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT jmp .L60 .L59: movaps %xmm5, %xmm7 divss .LC6(%rip), %xmm7 movd %xmm7, %r14d leaq .LC7(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT leaq .LC8(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movl $100, %ebx movl $0x00000000, %r12d leaq 28(%rsp), %rbp jmp .L45 .L62: movl $16, 72(%rsp) movl $16, 76(%rsp) movl 80(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 72(%rsp), %rdx movq 84(%rsp), %rdi movl 92(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L43 movl $1024, %ecx movq 48(%rsp), %rdx movq 40(%rsp), %rsi movq 32(%rsp), %rdi call _Z40__device_stub__Z15matrix_multiplyPfS_S_mPfS_S_m .L43: movl $0, %esi movq 64(%rsp), %rdi call cudaEventRecord@PLT movl 80(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 72(%rsp), %rdx movq 84(%rsp), %rdi movl 92(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L44 movl $1024, %ecx movq 48(%rsp), %rdx movq 40(%rsp), %rsi movq 32(%rsp), %rdi call _Z40__device_stub__Z15matrix_multiplyPfS_S_mPfS_S_m .L44: movq 64(%rsp), %rdi call cudaEventSynchronize@PLT movl $0x00000000, 28(%rsp) movq 64(%rsp), %rdx movq 56(%rsp), %rsi movq %rbp, %rdi call cudaEventElapsedTime@PLT movd %r12d, %xmm6 addss 28(%rsp), %xmm6 movd %xmm6, %r12d subq $1, %rbx je .L61 .L45: movl $0, %esi movq 56(%rsp), %rdi call cudaEventRecord@PLT jmp .L62 .L61: movaps %xmm6, %xmm2 divss .LC6(%rip), %xmm2 movd %xmm2, %ebx leaq .LC7(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movss .LC9(%rip), %xmm3 movd %r14d, %xmm1 divss %xmm3, %xmm1 movss .LC10(%rip), %xmm0 movaps %xmm0, %xmm4 divss %xmm1, %xmm4 movss .LC11(%rip), %xmm2 divss %xmm2, %xmm4 movss %xmm4, 8(%rsp) movd %ebx, %xmm1 divss %xmm3, %xmm1 divss %xmm1, %xmm0 divss %xmm2, %xmm0 movss %xmm0, 12(%rsp) leaq .LC12(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $1024, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi leaq .LC13(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $1024, %esi call _ZNSo9_M_insertImEERSoT_@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT leaq .LC14(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $16, %esi call _ZNSolsEi@PLT movq %rax, %rdi leaq .LC13(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $16, %esi call _ZNSolsEi@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT leaq .LC15(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi pxor %xmm0, %xmm0 cvtss2sd 8(%rsp), %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi leaq .LC16(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT leaq .LC17(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi pxor %xmm0, %xmm0 cvtss2sd 12(%rsp), %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi leaq .LC16(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT leaq .LC18(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movss 12(%rsp), %xmm0 divss 8(%rsp), %xmm0 cvtss2sd %xmm0, %xmm0 call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rdi leaq .LC13(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT leaq _ZSt4cout(%rip), %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movq 56(%rsp), %rdi call cudaEventDestroy@PLT movq 64(%rsp), %rdi call cudaEventDestroy@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq 48(%rsp), %rdi call cudaFree@PLT .LEHE3: leaq 160(%rsp), %rdi call _ZNSt6vectorIfSaIfEED1Ev leaq 128(%rsp), %rdi call _ZNSt6vectorIfSaIfEED1Ev leaq 96(%rsp), %rdi call _ZNSt6vectorIfSaIfEED1Ev movq 184(%rsp), %rax subq %fs:40, %rax jne .L63 movl $0, %eax addq $200, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L53: .cfi_restore_state endbr64 movq %rax, %rbx leaq 160(%rsp), %rdi call _ZNSt6vectorIfSaIfEED1Ev .L47: leaq 128(%rsp), %rdi call _ZNSt6vectorIfSaIfEED1Ev .L48: leaq 96(%rsp), %rdi call _ZNSt6vectorIfSaIfEED1Ev movq 184(%rsp), %rax subq %fs:40, %rax je .L49 call __stack_chk_fail@PLT .L52: endbr64 movq %rax, %rbx jmp .L47 .L51: endbr64 movq %rax, %rbx jmp .L48 .L49: movq %rbx, %rdi .LEHB4: call _Unwind_Resume@PLT .LEHE4: .L63: call __stack_chk_fail@PLT .cfi_endproc .LFE4286: .globl __gxx_personality_v0 .section .gcc_except_table,"a",@progbits .LLSDA4286: .byte 0xff .byte 0xff .byte 0x1 .uleb128 .LLSDACSE4286-.LLSDACSB4286 .LLSDACSB4286: .uleb128 .LEHB0-.LFB4286 .uleb128 .LEHE0-.LEHB0 .uleb128 0 .uleb128 0 .uleb128 .LEHB1-.LFB4286 .uleb128 .LEHE1-.LEHB1 .uleb128 .L51-.LFB4286 .uleb128 0 .uleb128 .LEHB2-.LFB4286 .uleb128 .LEHE2-.LEHB2 .uleb128 .L52-.LFB4286 .uleb128 0 .uleb128 .LEHB3-.LFB4286 .uleb128 .LEHE3-.LEHB3 .uleb128 .L53-.LFB4286 .uleb128 0 .uleb128 .LEHB4-.LFB4286 .uleb128 .LEHE4-.LEHB4 .uleb128 0 .uleb128 0 .LLSDACSE4286: .text .size main, .-main .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC4: .long 805306368 .align 4 .LC6: .long 1120403456 .align 4 .LC9: .long 1148846080 .align 4 .LC10: .long 1325400064 .align 4 .LC11: .long 1315859240 .hidden DW.ref.__gxx_personality_v0 .weak DW.ref.__gxx_personality_v0 .section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat .align 8 .type DW.ref.__gxx_personality_v0, @object .size DW.ref.__gxx_personality_v0, 8 DW.ref.__gxx_personality_v0: .quad __gxx_personality_v0 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdlib.h> #include <vector> #include <algorithm> #include <iostream> #define TILE_WIDTH 16 // Task 1 - simple matrix multiplication __global__ void matrix_multiply_simple(float *ma, float *mb, float *mc, size_t width) { //TODO: calculate the row & column index of the element int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float product = 0; //TODO: do dot product between row of ma and column of mb for (int i = 0; i < width; ++i) { product += ma[row * width + i] * mb[i * width + col]; } //TODO: write result in mc mc[row * width + col] = product; } // Task 2 - optimized matrix multiplication __global__ void matrix_multiply(float *ma, float *mb, float *mc, size_t width) { int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; //TODO: allocate 2D tiles in __shared__ memory __shared__ float ma_tile[TILE_WIDTH][TILE_WIDTH]; __shared__ float mb_tile[TILE_WIDTH][TILE_WIDTH]; //TODO: calculate the row & column index of the element int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; float result = 0; // loop over the tiles of the input for(int t = 0; t < width/TILE_WIDTH; ++t) { //TODO: load tiles into __shared__ memory allocated before ma_tile[ty][tx] = ma[row * width + t * TILE_WIDTH + tx]; mb_tile[ty][tx] = mb[(t * TILE_WIDTH + ty) * width + col]; //TODO: // wait until all data is loaded before allowing // any thread in this block to continue __syncthreads(); //TODO: do dot product between row of tile from ma and column of tile from mb for (int i = 0; i < TILE_WIDTH; ++i) { result += ma_tile[ty][i] * mb_tile[i][tx]; } //TODO: // wait until all data is loaded before allowing // any thread in this block to continue __syncthreads(); } //TODO: write result in mc mc[row * width + col] = result; } int main(void) { // create a large workload so we can easily measure the // performance difference of both implementations // note that n measures the width of the matrix, not the number of total elements const size_t n = 1<<10; const dim3 block_size(TILE_WIDTH,TILE_WIDTH); const dim3 num_blocks(n / block_size.x, n / block_size.y); // generate random input on the host std::vector<float> host_a(n*n), host_b(n*n), host_c(n*n); for(int i = 0; i < n*n; ++i) { host_a[i] = static_cast<float>(rand()) / RAND_MAX; host_b[i] = static_cast<float>(rand()) / RAND_MAX; } // allocate storage for the device float *device_a = 0, *device_b = 0, *device_c = 0; cudaMalloc((void**)&device_a, sizeof(float) * n * n); cudaMalloc((void**)&device_b, sizeof(float) * n * n); cudaMalloc((void**)&device_c, sizeof(float) * n * n); // copy input to the device cudaMemcpy(device_a, &host_a[0], sizeof(float) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(device_b, &host_b[0], sizeof(float) * n * n, cudaMemcpyHostToDevice); //Task 3 - measure the time spent in the kernel for simple and optimized implementation //TODO: create CUDA events for measuring kernel time cudaEvent_t launch_begin, launch_end; cudaEventCreate(&launch_begin); cudaEventCreate(&launch_end); // time many kernel launches and take the average time const size_t num_launches = 100; float average_simple_time = 0; std::cout << "Timing simple implementation..."; for(int i = 0; i < num_launches; ++i) { //TODO: record CUDA event before and after the kernel launch cudaEventRecord(launch_begin, 0); matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); cudaEventRecord(launch_end, 0); matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); //TODO: Wait for launch_end event to complete cudaEventSynchronize(launch_end); //TODO: measure the time spent in the kernel float time = 0; cudaEventElapsedTime(&time, launch_begin, launch_end); average_simple_time += time; } average_simple_time /= num_launches; std::cout << " done." << std::endl; //now time the optimized kernel // time many kernel launches and take the average time float average_optimized_time = 0; std::cout << "Timing optimized implementation..."; for(int i = 0; i < num_launches; ++i) { //TODO: record CUDA event before and after the kernel launch cudaEventRecord(launch_begin, 0); matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); cudaEventRecord(launch_end, 0); matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); //TODO: Wait for launch_end event to complete cudaEventSynchronize(launch_end); //TODO: measure the time spent in the kernel float time = 0; cudaEventElapsedTime(&time, launch_begin, launch_end); average_optimized_time += time; } average_optimized_time /= num_launches; std::cout << " done." << std::endl; // report the effective throughput of each kernel in GFLOPS // the effective throughput is measured as the number of floating point operations performed per second: // (one mul + one add) * N^3 float simple_throughput = static_cast<float>(2 * n * n * n) / (average_simple_time / 1000.0f) / 1000000000.0f; float optimized_throughput = static_cast<float>(2 * n * n * n) / (average_optimized_time / 1000.0f) / 1000000000.0f; std::cout << "Matrix size: " << n << "x" << n << std::endl; std::cout << "Tile size: " << TILE_WIDTH << "x" << TILE_WIDTH << std::endl; std::cout << "Throughput of simple kernel: " << simple_throughput << " GFLOPS" << std::endl; std::cout << "Throughput of optimized kernel: " << optimized_throughput << " GFLOPS" << std::endl; std::cout << "Performance improvement: " << optimized_throughput / simple_throughput << "x" << std::endl; std::cout << std::endl; //TODO: destroy the CUDA events cudaEventDestroy(launch_begin); cudaEventDestroy(launch_end); // deallocate device memory cudaFree(device_a); cudaFree(device_b); cudaFree(device_c); return 0; }
#include <hip/hip_runtime.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <iostream> #define TILE_WIDTH 16 // Task 1 - simple matrix multiplication __global__ void matrix_multiply_simple(float *ma, float *mb, float *mc, size_t width) { //TODO: calculate the row & column index of the element int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float product = 0; //TODO: do dot product between row of ma and column of mb for (int i = 0; i < width; ++i) { product += ma[row * width + i] * mb[i * width + col]; } //TODO: write result in mc mc[row * width + col] = product; } // Task 2 - optimized matrix multiplication __global__ void matrix_multiply(float *ma, float *mb, float *mc, size_t width) { int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; //TODO: allocate 2D tiles in __shared__ memory __shared__ float ma_tile[TILE_WIDTH][TILE_WIDTH]; __shared__ float mb_tile[TILE_WIDTH][TILE_WIDTH]; //TODO: calculate the row & column index of the element int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; float result = 0; // loop over the tiles of the input for(int t = 0; t < width/TILE_WIDTH; ++t) { //TODO: load tiles into __shared__ memory allocated before ma_tile[ty][tx] = ma[row * width + t * TILE_WIDTH + tx]; mb_tile[ty][tx] = mb[(t * TILE_WIDTH + ty) * width + col]; //TODO: // wait until all data is loaded before allowing // any thread in this block to continue __syncthreads(); //TODO: do dot product between row of tile from ma and column of tile from mb for (int i = 0; i < TILE_WIDTH; ++i) { result += ma_tile[ty][i] * mb_tile[i][tx]; } //TODO: // wait until all data is loaded before allowing // any thread in this block to continue __syncthreads(); } //TODO: write result in mc mc[row * width + col] = result; } int main(void) { // create a large workload so we can easily measure the // performance difference of both implementations // note that n measures the width of the matrix, not the number of total elements const size_t n = 1<<10; const dim3 block_size(TILE_WIDTH,TILE_WIDTH); const dim3 num_blocks(n / block_size.x, n / block_size.y); // generate random input on the host std::vector<float> host_a(n*n), host_b(n*n), host_c(n*n); for(int i = 0; i < n*n; ++i) { host_a[i] = static_cast<float>(rand()) / RAND_MAX; host_b[i] = static_cast<float>(rand()) / RAND_MAX; } // allocate storage for the device float *device_a = 0, *device_b = 0, *device_c = 0; hipMalloc((void**)&device_a, sizeof(float) * n * n); hipMalloc((void**)&device_b, sizeof(float) * n * n); hipMalloc((void**)&device_c, sizeof(float) * n * n); // copy input to the device hipMemcpy(device_a, &host_a[0], sizeof(float) * n * n, hipMemcpyHostToDevice); hipMemcpy(device_b, &host_b[0], sizeof(float) * n * n, hipMemcpyHostToDevice); //Task 3 - measure the time spent in the kernel for simple and optimized implementation //TODO: create CUDA events for measuring kernel time hipEvent_t launch_begin, launch_end; hipEventCreate(&launch_begin); hipEventCreate(&launch_end); // time many kernel launches and take the average time const size_t num_launches = 100; float average_simple_time = 0; std::cout << "Timing simple implementation..."; for(int i = 0; i < num_launches; ++i) { //TODO: record CUDA event before and after the kernel launch hipEventRecord(launch_begin, 0); matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); hipEventRecord(launch_end, 0); matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); //TODO: Wait for launch_end event to complete hipEventSynchronize(launch_end); //TODO: measure the time spent in the kernel float time = 0; hipEventElapsedTime(&time, launch_begin, launch_end); average_simple_time += time; } average_simple_time /= num_launches; std::cout << " done." << std::endl; //now time the optimized kernel // time many kernel launches and take the average time float average_optimized_time = 0; std::cout << "Timing optimized implementation..."; for(int i = 0; i < num_launches; ++i) { //TODO: record CUDA event before and after the kernel launch hipEventRecord(launch_begin, 0); matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); hipEventRecord(launch_end, 0); matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); //TODO: Wait for launch_end event to complete hipEventSynchronize(launch_end); //TODO: measure the time spent in the kernel float time = 0; hipEventElapsedTime(&time, launch_begin, launch_end); average_optimized_time += time; } average_optimized_time /= num_launches; std::cout << " done." << std::endl; // report the effective throughput of each kernel in GFLOPS // the effective throughput is measured as the number of floating point operations performed per second: // (one mul + one add) * N^3 float simple_throughput = static_cast<float>(2 * n * n * n) / (average_simple_time / 1000.0f) / 1000000000.0f; float optimized_throughput = static_cast<float>(2 * n * n * n) / (average_optimized_time / 1000.0f) / 1000000000.0f; std::cout << "Matrix size: " << n << "x" << n << std::endl; std::cout << "Tile size: " << TILE_WIDTH << "x" << TILE_WIDTH << std::endl; std::cout << "Throughput of simple kernel: " << simple_throughput << " GFLOPS" << std::endl; std::cout << "Throughput of optimized kernel: " << optimized_throughput << " GFLOPS" << std::endl; std::cout << "Performance improvement: " << optimized_throughput / simple_throughput << "x" << std::endl; std::cout << std::endl; //TODO: destroy the CUDA events hipEventDestroy(launch_begin); hipEventDestroy(launch_end); // deallocate device memory hipFree(device_a); hipFree(device_b); hipFree(device_c); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <iostream> #define TILE_WIDTH 16 // Task 1 - simple matrix multiplication __global__ void matrix_multiply_simple(float *ma, float *mb, float *mc, size_t width) { //TODO: calculate the row & column index of the element int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float product = 0; //TODO: do dot product between row of ma and column of mb for (int i = 0; i < width; ++i) { product += ma[row * width + i] * mb[i * width + col]; } //TODO: write result in mc mc[row * width + col] = product; } // Task 2 - optimized matrix multiplication __global__ void matrix_multiply(float *ma, float *mb, float *mc, size_t width) { int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; //TODO: allocate 2D tiles in __shared__ memory __shared__ float ma_tile[TILE_WIDTH][TILE_WIDTH]; __shared__ float mb_tile[TILE_WIDTH][TILE_WIDTH]; //TODO: calculate the row & column index of the element int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; float result = 0; // loop over the tiles of the input for(int t = 0; t < width/TILE_WIDTH; ++t) { //TODO: load tiles into __shared__ memory allocated before ma_tile[ty][tx] = ma[row * width + t * TILE_WIDTH + tx]; mb_tile[ty][tx] = mb[(t * TILE_WIDTH + ty) * width + col]; //TODO: // wait until all data is loaded before allowing // any thread in this block to continue __syncthreads(); //TODO: do dot product between row of tile from ma and column of tile from mb for (int i = 0; i < TILE_WIDTH; ++i) { result += ma_tile[ty][i] * mb_tile[i][tx]; } //TODO: // wait until all data is loaded before allowing // any thread in this block to continue __syncthreads(); } //TODO: write result in mc mc[row * width + col] = result; } int main(void) { // create a large workload so we can easily measure the // performance difference of both implementations // note that n measures the width of the matrix, not the number of total elements const size_t n = 1<<10; const dim3 block_size(TILE_WIDTH,TILE_WIDTH); const dim3 num_blocks(n / block_size.x, n / block_size.y); // generate random input on the host std::vector<float> host_a(n*n), host_b(n*n), host_c(n*n); for(int i = 0; i < n*n; ++i) { host_a[i] = static_cast<float>(rand()) / RAND_MAX; host_b[i] = static_cast<float>(rand()) / RAND_MAX; } // allocate storage for the device float *device_a = 0, *device_b = 0, *device_c = 0; hipMalloc((void**)&device_a, sizeof(float) * n * n); hipMalloc((void**)&device_b, sizeof(float) * n * n); hipMalloc((void**)&device_c, sizeof(float) * n * n); // copy input to the device hipMemcpy(device_a, &host_a[0], sizeof(float) * n * n, hipMemcpyHostToDevice); hipMemcpy(device_b, &host_b[0], sizeof(float) * n * n, hipMemcpyHostToDevice); //Task 3 - measure the time spent in the kernel for simple and optimized implementation //TODO: create CUDA events for measuring kernel time hipEvent_t launch_begin, launch_end; hipEventCreate(&launch_begin); hipEventCreate(&launch_end); // time many kernel launches and take the average time const size_t num_launches = 100; float average_simple_time = 0; std::cout << "Timing simple implementation..."; for(int i = 0; i < num_launches; ++i) { //TODO: record CUDA event before and after the kernel launch hipEventRecord(launch_begin, 0); matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); hipEventRecord(launch_end, 0); matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); //TODO: Wait for launch_end event to complete hipEventSynchronize(launch_end); //TODO: measure the time spent in the kernel float time = 0; hipEventElapsedTime(&time, launch_begin, launch_end); average_simple_time += time; } average_simple_time /= num_launches; std::cout << " done." << std::endl; //now time the optimized kernel // time many kernel launches and take the average time float average_optimized_time = 0; std::cout << "Timing optimized implementation..."; for(int i = 0; i < num_launches; ++i) { //TODO: record CUDA event before and after the kernel launch hipEventRecord(launch_begin, 0); matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); hipEventRecord(launch_end, 0); matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); //TODO: Wait for launch_end event to complete hipEventSynchronize(launch_end); //TODO: measure the time spent in the kernel float time = 0; hipEventElapsedTime(&time, launch_begin, launch_end); average_optimized_time += time; } average_optimized_time /= num_launches; std::cout << " done." << std::endl; // report the effective throughput of each kernel in GFLOPS // the effective throughput is measured as the number of floating point operations performed per second: // (one mul + one add) * N^3 float simple_throughput = static_cast<float>(2 * n * n * n) / (average_simple_time / 1000.0f) / 1000000000.0f; float optimized_throughput = static_cast<float>(2 * n * n * n) / (average_optimized_time / 1000.0f) / 1000000000.0f; std::cout << "Matrix size: " << n << "x" << n << std::endl; std::cout << "Tile size: " << TILE_WIDTH << "x" << TILE_WIDTH << std::endl; std::cout << "Throughput of simple kernel: " << simple_throughput << " GFLOPS" << std::endl; std::cout << "Throughput of optimized kernel: " << optimized_throughput << " GFLOPS" << std::endl; std::cout << "Performance improvement: " << optimized_throughput / simple_throughput << "x" << std::endl; std::cout << std::endl; //TODO: destroy the CUDA events hipEventDestroy(launch_begin); hipEventDestroy(launch_end); // deallocate device memory hipFree(device_a); hipFree(device_b); hipFree(device_c); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z22matrix_multiply_simplePfS_S_m .globl _Z22matrix_multiply_simplePfS_S_m .p2align 8 .type _Z22matrix_multiply_simplePfS_S_m,@function _Z22matrix_multiply_simplePfS_S_m: s_clause 0x1 s_load_b32 s4, s[0:1], 0x2c s_load_b64 s[2:3], s[0:1], 0x18 v_bfe_u32 v1, v0, 10, 10 v_and_b32_e32 v4, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_lshr_b32 s5, s4, 16 s_and_b32 s4, s4, 0xffff v_mad_u64_u32 v[2:3], null, s15, s5, v[1:2] v_mad_u64_u32 v[0:1], null, s14, s4, v[4:5] s_cmp_eq_u64 s[2:3], 0 s_mov_b64 s[4:5], 0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v7, 31, v2 v_ashrrev_i32_e32 v1, 31, v0 s_cbranch_scc1 .LBB0_3 s_load_b128 s[8:11], s[0:1], 0x0 v_mul_lo_u32 v5, v2, s3 v_mul_lo_u32 v6, v7, s2 v_mad_u64_u32 v[3:4], null, v2, s2, 0 v_mov_b32_e32 v8, 0 s_lshl_b64 s[6:7], s[2:3], 2 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_add3_u32 v4, v4, v5, v6 v_lshlrev_b64 v[5:6], 2, v[0:1] v_lshlrev_b64 v[3:4], 2, v[3:4] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s8, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s9, v4, vcc_lo s_delay_alu instid0(VALU_DEP_4) v_add_co_u32 v5, vcc_lo, s10, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s11, v6, vcc_lo .p2align 6 .LBB0_2: global_load_b32 v9, v[3:4], off global_load_b32 v10, v[5:6], off s_add_u32 s4, s4, 1 v_add_co_u32 v5, vcc_lo, v5, s6 s_addc_u32 s5, s5, 0 v_add_co_ci_u32_e32 v6, vcc_lo, s7, v6, vcc_lo v_cmp_ge_u64_e64 s8, s[4:5], s[2:3] v_add_co_u32 v3, vcc_lo, v3, 4 v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo s_delay_alu instid0(VALU_DEP_3) s_and_b32 vcc_lo, exec_lo, s8 s_waitcnt vmcnt(0) v_fmac_f32_e32 v8, v9, v10 s_cbranch_vccz .LBB0_2 s_branch .LBB0_4 .LBB0_3: v_mov_b32_e32 v8, 0 .LBB0_4: s_load_b64 s[0:1], s[0:1], 0x10 v_mul_lo_u32 v5, v2, s3 v_mul_lo_u32 v6, v7, s2 v_mad_u64_u32 v[3:4], null, v2, s2, 0 v_lshlrev_b64 v[0:1], 2, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v4, v4, v5, v6 v_lshlrev_b64 v[2:3], 2, v[3:4] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s0, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, v3, v1, vcc_lo global_store_b32 v[0:1], v8, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z22matrix_multiply_simplePfS_S_m .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 11 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z22matrix_multiply_simplePfS_S_m, .Lfunc_end0-_Z22matrix_multiply_simplePfS_S_m .section .AMDGPU.csdata,"",@progbits .text .protected _Z15matrix_multiplyPfS_S_m .globl _Z15matrix_multiplyPfS_S_m .p2align 8 .type _Z15matrix_multiplyPfS_S_m,@function _Z15matrix_multiplyPfS_S_m: s_clause 0x1 s_load_b32 s4, s[0:1], 0x2c s_load_b64 s[2:3], s[0:1], 0x18 v_bfe_u32 v4, v0, 10, 10 v_and_b32_e32 v5, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_lshr_b32 s5, s4, 16 s_and_b32 s4, s4, 0xffff s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_mad_u64_u32 v[0:1], null, s14, s4, v[5:6] v_mov_b32_e32 v6, 0 v_mad_u64_u32 v[2:3], null, s15, s5, v[4:5] v_cmp_lt_u64_e64 s4, s[2:3], 16 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v1, 31, v0 v_ashrrev_i32_e32 v3, 31, v2 s_delay_alu instid0(VALU_DEP_3) s_and_b32 vcc_lo, exec_lo, s4 s_cbranch_vccnz .LBB1_5 s_load_b128 s[4:7], s[0:1], 0x0 v_mul_lo_u32 v8, v2, s3 v_mul_lo_u32 v9, v3, s2 v_mad_u64_u32 v[6:7], null, v2, s2, 0 v_lshlrev_b64 v[11:12], 2, v[0:1] v_lshlrev_b32_e32 v13, 2, v5 v_lshlrev_b32_e32 v5, 6, v4 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v7, v7, v8, v9 v_lshlrev_b64 v[9:10], 2, v[6:7] v_mov_b32_e32 v6, 0 v_add_nc_u32_e32 v8, 0x400, v13 v_add_nc_u32_e32 v7, v5, v13 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) v_add_co_u32 v14, vcc_lo, s4, v9 v_add_co_ci_u32_e32 v15, vcc_lo, s5, v10, vcc_lo v_add_co_u32 v9, vcc_lo, s6, v11 v_add_co_ci_u32_e32 v10, vcc_lo, s7, v12, vcc_lo v_add_co_u32 v11, vcc_lo, v14, v13 s_delay_alu instid0(VALU_DEP_4) v_add_co_ci_u32_e32 v12, vcc_lo, 0, v15, vcc_lo v_add_nc_u32_e32 v13, v8, v5 s_mov_b32 s5, 0 s_lshr_b64 s[6:7], s[2:3], 4 s_mov_b32 s8, s5 s_set_inst_prefetch_distance 0x1 .p2align 6 .LBB1_2: s_lshl_b32 s4, s8, 4 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v18, s4, v4 s_lshl_b64 s[10:11], s[4:5], 2 s_mov_b32 s4, 0 v_mad_u64_u32 v[14:15], null, v18, s2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[16:17], null, v18, s3, v[15:16] v_mov_b32_e32 v15, v16 v_add_co_u32 v16, vcc_lo, v11, s10 v_add_co_ci_u32_e32 v17, vcc_lo, s11, v12, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[14:15], 2, v[14:15] v_add_co_u32 v14, vcc_lo, v9, v14 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v15, vcc_lo, v10, v15, vcc_lo global_load_b32 v16, v[16:17], off global_load_b32 v15, v[14:15], off v_mov_b32_e32 v14, v8 s_waitcnt vmcnt(1) ds_store_b32 v7, v16 s_waitcnt vmcnt(0) ds_store_b32 v13, v15 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv .LBB1_3: v_add_nc_u32_e32 v15, s4, v5 s_add_i32 s4, s4, 4 ds_load_b32 v16, v14 ds_load_b32 v15, v15 v_add_nc_u32_e32 v14, 64, v14 s_cmp_eq_u32 s4, 64 s_waitcnt lgkmcnt(0) v_fmac_f32_e32 v6, v15, v16 s_cbranch_scc0 .LBB1_3 s_add_i32 s8, s8, 1 s_mov_b32 s9, s5 s_delay_alu instid0(SALU_CYCLE_1) v_cmp_le_u64_e64 s4, s[6:7], s[8:9] s_barrier buffer_gl0_inv s_and_b32 vcc_lo, exec_lo, s4 s_cbranch_vccz .LBB1_2 .LBB1_5: s_set_inst_prefetch_distance 0x2 s_load_b64 s[0:1], s[0:1], 0x10 v_mul_lo_u32 v5, v2, s3 v_mul_lo_u32 v7, v3, s2 v_mad_u64_u32 v[3:4], null, v2, s2, 0 v_lshlrev_b64 v[0:1], 2, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v4, v4, v5, v7 v_lshlrev_b64 v[2:3], 2, v[3:4] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s0, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, v3, v1, vcc_lo global_store_b32 v[0:1], v6, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z15matrix_multiplyPfS_S_m .amdhsa_group_segment_fixed_size 2048 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 19 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z15matrix_multiplyPfS_S_m, .Lfunc_end1-_Z15matrix_multiplyPfS_S_m .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z22matrix_multiply_simplePfS_S_m .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z22matrix_multiply_simplePfS_S_m.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 11 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 2048 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z15matrix_multiplyPfS_S_m .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z15matrix_multiplyPfS_S_m.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 19 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <iostream> #define TILE_WIDTH 16 // Task 1 - simple matrix multiplication __global__ void matrix_multiply_simple(float *ma, float *mb, float *mc, size_t width) { //TODO: calculate the row & column index of the element int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float product = 0; //TODO: do dot product between row of ma and column of mb for (int i = 0; i < width; ++i) { product += ma[row * width + i] * mb[i * width + col]; } //TODO: write result in mc mc[row * width + col] = product; } // Task 2 - optimized matrix multiplication __global__ void matrix_multiply(float *ma, float *mb, float *mc, size_t width) { int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; //TODO: allocate 2D tiles in __shared__ memory __shared__ float ma_tile[TILE_WIDTH][TILE_WIDTH]; __shared__ float mb_tile[TILE_WIDTH][TILE_WIDTH]; //TODO: calculate the row & column index of the element int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; float result = 0; // loop over the tiles of the input for(int t = 0; t < width/TILE_WIDTH; ++t) { //TODO: load tiles into __shared__ memory allocated before ma_tile[ty][tx] = ma[row * width + t * TILE_WIDTH + tx]; mb_tile[ty][tx] = mb[(t * TILE_WIDTH + ty) * width + col]; //TODO: // wait until all data is loaded before allowing // any thread in this block to continue __syncthreads(); //TODO: do dot product between row of tile from ma and column of tile from mb for (int i = 0; i < TILE_WIDTH; ++i) { result += ma_tile[ty][i] * mb_tile[i][tx]; } //TODO: // wait until all data is loaded before allowing // any thread in this block to continue __syncthreads(); } //TODO: write result in mc mc[row * width + col] = result; } int main(void) { // create a large workload so we can easily measure the // performance difference of both implementations // note that n measures the width of the matrix, not the number of total elements const size_t n = 1<<10; const dim3 block_size(TILE_WIDTH,TILE_WIDTH); const dim3 num_blocks(n / block_size.x, n / block_size.y); // generate random input on the host std::vector<float> host_a(n*n), host_b(n*n), host_c(n*n); for(int i = 0; i < n*n; ++i) { host_a[i] = static_cast<float>(rand()) / RAND_MAX; host_b[i] = static_cast<float>(rand()) / RAND_MAX; } // allocate storage for the device float *device_a = 0, *device_b = 0, *device_c = 0; hipMalloc((void**)&device_a, sizeof(float) * n * n); hipMalloc((void**)&device_b, sizeof(float) * n * n); hipMalloc((void**)&device_c, sizeof(float) * n * n); // copy input to the device hipMemcpy(device_a, &host_a[0], sizeof(float) * n * n, hipMemcpyHostToDevice); hipMemcpy(device_b, &host_b[0], sizeof(float) * n * n, hipMemcpyHostToDevice); //Task 3 - measure the time spent in the kernel for simple and optimized implementation //TODO: create CUDA events for measuring kernel time hipEvent_t launch_begin, launch_end; hipEventCreate(&launch_begin); hipEventCreate(&launch_end); // time many kernel launches and take the average time const size_t num_launches = 100; float average_simple_time = 0; std::cout << "Timing simple implementation..."; for(int i = 0; i < num_launches; ++i) { //TODO: record CUDA event before and after the kernel launch hipEventRecord(launch_begin, 0); matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); hipEventRecord(launch_end, 0); matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); //TODO: Wait for launch_end event to complete hipEventSynchronize(launch_end); //TODO: measure the time spent in the kernel float time = 0; hipEventElapsedTime(&time, launch_begin, launch_end); average_simple_time += time; } average_simple_time /= num_launches; std::cout << " done." << std::endl; //now time the optimized kernel // time many kernel launches and take the average time float average_optimized_time = 0; std::cout << "Timing optimized implementation..."; for(int i = 0; i < num_launches; ++i) { //TODO: record CUDA event before and after the kernel launch hipEventRecord(launch_begin, 0); matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); hipEventRecord(launch_end, 0); matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n); //TODO: Wait for launch_end event to complete hipEventSynchronize(launch_end); //TODO: measure the time spent in the kernel float time = 0; hipEventElapsedTime(&time, launch_begin, launch_end); average_optimized_time += time; } average_optimized_time /= num_launches; std::cout << " done." << std::endl; // report the effective throughput of each kernel in GFLOPS // the effective throughput is measured as the number of floating point operations performed per second: // (one mul + one add) * N^3 float simple_throughput = static_cast<float>(2 * n * n * n) / (average_simple_time / 1000.0f) / 1000000000.0f; float optimized_throughput = static_cast<float>(2 * n * n * n) / (average_optimized_time / 1000.0f) / 1000000000.0f; std::cout << "Matrix size: " << n << "x" << n << std::endl; std::cout << "Tile size: " << TILE_WIDTH << "x" << TILE_WIDTH << std::endl; std::cout << "Throughput of simple kernel: " << simple_throughput << " GFLOPS" << std::endl; std::cout << "Throughput of optimized kernel: " << optimized_throughput << " GFLOPS" << std::endl; std::cout << "Performance improvement: " << optimized_throughput / simple_throughput << "x" << std::endl; std::cout << std::endl; //TODO: destroy the CUDA events hipEventDestroy(launch_begin); hipEventDestroy(launch_end); // deallocate device memory hipFree(device_a); hipFree(device_b); hipFree(device_c); return 0; }
.text .file "matrix_multiplication_skel.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z37__device_stub__matrix_multiply_simplePfS_S_m # -- Begin function _Z37__device_stub__matrix_multiply_simplePfS_S_m .p2align 4, 0x90 .type _Z37__device_stub__matrix_multiply_simplePfS_S_m,@function _Z37__device_stub__matrix_multiply_simplePfS_S_m: # @_Z37__device_stub__matrix_multiply_simplePfS_S_m .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movq %rcx, 48(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rax movq %rax, 104(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z22matrix_multiply_simplePfS_S_m, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z37__device_stub__matrix_multiply_simplePfS_S_m, .Lfunc_end0-_Z37__device_stub__matrix_multiply_simplePfS_S_m .cfi_endproc # -- End function .globl _Z30__device_stub__matrix_multiplyPfS_S_m # -- Begin function _Z30__device_stub__matrix_multiplyPfS_S_m .p2align 4, 0x90 .type _Z30__device_stub__matrix_multiplyPfS_S_m,@function _Z30__device_stub__matrix_multiplyPfS_S_m: # @_Z30__device_stub__matrix_multiplyPfS_S_m .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movq %rcx, 48(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rax movq %rax, 104(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z15matrix_multiplyPfS_S_m, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z30__device_stub__matrix_multiplyPfS_S_m, .Lfunc_end1-_Z30__device_stub__matrix_multiplyPfS_S_m .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI2_0: .long 0x30000000 # float 4.65661287E-10 .LCPI2_1: .long 0x42c80000 # float 100 .LCPI2_2: .long 0x447a0000 # float 1000 .LCPI2_3: .long 0x4f000000 # float 2.14748365E+9 .LCPI2_4: .long 0x4e6e6b28 # float 1.0E+9 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .Lfunc_begin0: .cfi_startproc .cfi_personality 3, __gxx_personality_v0 .cfi_lsda 3, .Lexception0 # %bb.0: # %_ZNSt6vectorIfSaIfEEC2EmRKS0_.exit pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $184, %rsp .cfi_def_cfa_offset 240 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 .cfi_escape 0x2e, 0x00 movl $4194304, %edi # imm = 0x400000 callq _Znwm .cfi_escape 0x2e, 0x00 movl $4194304, %edx # imm = 0x400000 movq %rax, 168(%rsp) # 8-byte Spill movq %rax, %rdi xorl %esi, %esi callq memset@PLT .Ltmp0: .cfi_escape 0x2e, 0x00 movl $4194304, %edi # imm = 0x400000 callq _Znwm .Ltmp1: # %bb.1: # %_ZNSt6vectorIfSaIfEEC2EmRKS0_.exit78 movq %rax, %r15 .cfi_escape 0x2e, 0x00 xorl %ebx, %ebx movl $4194304, %edx # imm = 0x400000 movq %rax, %rdi xorl %esi, %esi callq memset@PLT movq 168(%rsp), %r14 # 8-byte Reload .p2align 4, 0x90 .LBB2_2: # =>This Inner Loop Header: Depth=1 .cfi_escape 0x2e, 0x00 callq rand xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 movss .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss %xmm1, %xmm0 movss %xmm0, (%r14,%rbx,4) .cfi_escape 0x2e, 0x00 callq rand xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss .LCPI2_0(%rip), %xmm0 movss %xmm0, (%r15,%rbx,4) incq %rbx cmpq $1048576, %rbx # imm = 0x100000 jne .LBB2_2 # %bb.3: movq %r15, 176(%rsp) # 8-byte Spill movq $0, 24(%rsp) movq $0, 16(%rsp) movq $0, 96(%rsp) .Ltmp3: .cfi_escape 0x2e, 0x00 leaq 24(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc .Ltmp4: # %bb.4: .Ltmp5: .cfi_escape 0x2e, 0x00 leaq 16(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc .Ltmp6: # %bb.5: .Ltmp7: .cfi_escape 0x2e, 0x00 leaq 96(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc .Ltmp8: # %bb.6: movq 24(%rsp), %rdi .Ltmp9: .cfi_escape 0x2e, 0x00 movl $4194304, %edx # imm = 0x400000 movq 168(%rsp), %rsi # 8-byte Reload movl $1, %ecx callq hipMemcpy .Ltmp10: # %bb.7: movq 16(%rsp), %rdi .Ltmp11: .cfi_escape 0x2e, 0x00 movl $4194304, %edx # imm = 0x400000 movq 176(%rsp), %rsi # 8-byte Reload movl $1, %ecx callq hipMemcpy .Ltmp12: # %bb.8: .Ltmp14: .cfi_escape 0x2e, 0x00 leaq 104(%rsp), %rdi callq hipEventCreate .Ltmp15: # %bb.9: .Ltmp16: .cfi_escape 0x2e, 0x00 leaq 8(%rsp), %rdi callq hipEventCreate .Ltmp17: # %bb.10: .Ltmp19: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str, %esi movl $31, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp20: # %bb.11: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit.preheader xorps %xmm0, %xmm0 movss %xmm0, (%rsp) # 4-byte Spill movl $100, %ebp movabsq $274877907008, %r15 # imm = 0x4000000040 movabsq $68719476752, %r12 # imm = 0x1000000010 leaq 120(%rsp), %rbx leaq 112(%rsp), %r14 leaq 128(%rsp), %r13 .p2align 4, 0x90 .LBB2_12: # =>This Inner Loop Header: Depth=1 movq 104(%rsp), %rdi .Ltmp21: .cfi_escape 0x2e, 0x00 xorl %esi, %esi callq hipEventRecord .Ltmp22: # %bb.13: # in Loop: Header=BB2_12 Depth=1 .Ltmp23: .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration .Ltmp24: # %bb.14: # in Loop: Header=BB2_12 Depth=1 testl %eax, %eax jne .LBB2_17 # %bb.15: # in Loop: Header=BB2_12 Depth=1 movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 96(%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %rdx, 72(%rsp) movq $1024, 64(%rsp) # imm = 0x400 leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 80(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) .Ltmp25: .cfi_escape 0x2e, 0x00 leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi movq %rbx, %rdx movq %r14, %rcx callq __hipPopCallConfiguration .Ltmp26: # %bb.16: # %.noexc # in Loop: Header=BB2_12 Depth=1 movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d .Ltmp27: .cfi_escape 0x2e, 0x10 movl $_Z22matrix_multiply_simplePfS_S_m, %edi movq %r13, %r9 pushq 112(%rsp) .cfi_adjust_cfa_offset 8 pushq 128(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp28: .LBB2_17: # in Loop: Header=BB2_12 Depth=1 movq 8(%rsp), %rdi .Ltmp29: .cfi_escape 0x2e, 0x00 xorl %esi, %esi callq hipEventRecord .Ltmp30: # %bb.18: # in Loop: Header=BB2_12 Depth=1 .Ltmp31: .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration .Ltmp32: # %bb.19: # in Loop: Header=BB2_12 Depth=1 testl %eax, %eax jne .LBB2_22 # %bb.20: # in Loop: Header=BB2_12 Depth=1 movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 96(%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %rdx, 72(%rsp) movq $1024, 64(%rsp) # imm = 0x400 leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 80(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) .Ltmp33: .cfi_escape 0x2e, 0x00 leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi movq %rbx, %rdx movq %r14, %rcx callq __hipPopCallConfiguration .Ltmp34: # %bb.21: # %.noexc88 # in Loop: Header=BB2_12 Depth=1 movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d .Ltmp35: .cfi_escape 0x2e, 0x10 movl $_Z22matrix_multiply_simplePfS_S_m, %edi movq %r13, %r9 pushq 112(%rsp) .cfi_adjust_cfa_offset 8 pushq 128(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp36: .LBB2_22: # in Loop: Header=BB2_12 Depth=1 movq 8(%rsp), %rdi .Ltmp37: .cfi_escape 0x2e, 0x00 callq hipEventSynchronize .Ltmp38: # %bb.23: # in Loop: Header=BB2_12 Depth=1 movl $0, 128(%rsp) movq 104(%rsp), %rsi movq 8(%rsp), %rdx .Ltmp40: .cfi_escape 0x2e, 0x00 movq %r13, %rdi callq hipEventElapsedTime .Ltmp41: # %bb.24: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit # in Loop: Header=BB2_12 Depth=1 movss (%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero addss 128(%rsp), %xmm0 movss %xmm0, (%rsp) # 4-byte Spill decl %ebp jne .LBB2_12 # %bb.25: .Ltmp43: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $6, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp44: # %bb.26: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit80 movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_27 # %bb.34: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%rbx) je .LBB2_36 # %bb.35: movzbl 67(%rbx), %eax jmp .LBB2_38 .LBB2_36: .Ltmp45: .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp46: # %bb.37: # %.noexc167 movq (%rbx), %rax .Ltmp47: .cfi_escape 0x2e, 0x00 movq %rbx, %rdi movl $10, %esi callq *48(%rax) .Ltmp48: .LBB2_38: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i .Ltmp49: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc .Ltmp50: # %bb.39: # %.noexc169 .Ltmp51: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp52: # %bb.40: # %_ZNSolsEPFRSoS_E.exit .Ltmp53: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.2, %esi movl $34, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp54: # %bb.41: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit93.preheader movss (%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero divss .LCPI2_1(%rip), %xmm0 movss %xmm0, (%rsp) # 4-byte Spill xorps %xmm0, %xmm0 movss %xmm0, 4(%rsp) # 4-byte Spill movl $100, %ebp movabsq $274877907008, %r15 # imm = 0x4000000040 movabsq $68719476752, %r12 # imm = 0x1000000010 leaq 120(%rsp), %rbx leaq 112(%rsp), %r14 leaq 128(%rsp), %r13 .p2align 4, 0x90 .LBB2_42: # =>This Inner Loop Header: Depth=1 movq 104(%rsp), %rdi .Ltmp55: .cfi_escape 0x2e, 0x00 xorl %esi, %esi callq hipEventRecord .Ltmp56: # %bb.43: # in Loop: Header=BB2_42 Depth=1 .Ltmp57: .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration .Ltmp58: # %bb.44: # in Loop: Header=BB2_42 Depth=1 testl %eax, %eax jne .LBB2_47 # %bb.45: # in Loop: Header=BB2_42 Depth=1 movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 96(%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %rdx, 72(%rsp) movq $1024, 64(%rsp) # imm = 0x400 leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 80(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) .Ltmp59: .cfi_escape 0x2e, 0x00 leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi movq %rbx, %rdx movq %r14, %rcx callq __hipPopCallConfiguration .Ltmp60: # %bb.46: # %.noexc102 # in Loop: Header=BB2_42 Depth=1 movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d .Ltmp61: .cfi_escape 0x2e, 0x10 movl $_Z15matrix_multiplyPfS_S_m, %edi movq %r13, %r9 pushq 112(%rsp) .cfi_adjust_cfa_offset 8 pushq 128(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp62: .LBB2_47: # in Loop: Header=BB2_42 Depth=1 movq 8(%rsp), %rdi .Ltmp63: .cfi_escape 0x2e, 0x00 xorl %esi, %esi callq hipEventRecord .Ltmp64: # %bb.48: # in Loop: Header=BB2_42 Depth=1 .Ltmp65: .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration .Ltmp66: # %bb.49: # in Loop: Header=BB2_42 Depth=1 testl %eax, %eax jne .LBB2_52 # %bb.50: # in Loop: Header=BB2_42 Depth=1 movq 24(%rsp), %rax movq 16(%rsp), %rcx movq 96(%rsp), %rdx movq %rax, 88(%rsp) movq %rcx, 80(%rsp) movq %rdx, 72(%rsp) movq $1024, 64(%rsp) # imm = 0x400 leaq 88(%rsp), %rax movq %rax, 128(%rsp) leaq 80(%rsp), %rax movq %rax, 136(%rsp) leaq 72(%rsp), %rax movq %rax, 144(%rsp) leaq 64(%rsp), %rax movq %rax, 152(%rsp) .Ltmp67: .cfi_escape 0x2e, 0x00 leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi movq %rbx, %rdx movq %r14, %rcx callq __hipPopCallConfiguration .Ltmp68: # %bb.51: # %.noexc110 # in Loop: Header=BB2_42 Depth=1 movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d .Ltmp69: .cfi_escape 0x2e, 0x10 movl $_Z15matrix_multiplyPfS_S_m, %edi movq %r13, %r9 pushq 112(%rsp) .cfi_adjust_cfa_offset 8 pushq 128(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp70: .LBB2_52: # in Loop: Header=BB2_42 Depth=1 movq 8(%rsp), %rdi .Ltmp71: .cfi_escape 0x2e, 0x00 callq hipEventSynchronize .Ltmp72: # %bb.53: # in Loop: Header=BB2_42 Depth=1 movl $0, 128(%rsp) movq 104(%rsp), %rsi movq 8(%rsp), %rdx .Ltmp74: .cfi_escape 0x2e, 0x00 movq %r13, %rdi callq hipEventElapsedTime .Ltmp75: # %bb.54: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit93 # in Loop: Header=BB2_42 Depth=1 movss 4(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero addss 128(%rsp), %xmm0 movss %xmm0, 4(%rsp) # 4-byte Spill decl %ebp jne .LBB2_42 # %bb.55: .Ltmp77: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.1, %esi movl $6, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp78: # %bb.56: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit95 movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_57 # %bb.59: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i172 cmpb $0, 56(%rbx) je .LBB2_61 # %bb.60: movzbl 67(%rbx), %eax jmp .LBB2_63 .LBB2_61: .Ltmp79: .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp80: # %bb.62: # %.noexc177 movq (%rbx), %rax .Ltmp81: .cfi_escape 0x2e, 0x00 movq %rbx, %rdi movl $10, %esi callq *48(%rax) .Ltmp82: .LBB2_63: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i174 .Ltmp83: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc .Ltmp84: # %bb.64: # %.noexc179 .Ltmp85: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp86: # %bb.65: # %_ZNSolsEPFRSoS_E.exit114 .Ltmp87: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.3, %esi movl $13, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp88: # %bb.66: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit116 .Ltmp89: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $1024, %esi # imm = 0x400 callq _ZNSo9_M_insertImEERSoT_ .Ltmp90: # %bb.67: # %_ZNSolsEm.exit .Ltmp91: movq %rax, %rbx .cfi_escape 0x2e, 0x00 movl $.L.str.4, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp92: # %bb.68: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit119 .Ltmp93: .cfi_escape 0x2e, 0x00 movl $1024, %esi # imm = 0x400 movq %rbx, %rdi callq _ZNSo9_M_insertImEERSoT_ .Ltmp94: # %bb.69: # %_ZNSolsEm.exit121 movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .LBB2_118 # %bb.70: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i183 cmpb $0, 56(%r14) je .LBB2_72 # %bb.71: movzbl 67(%r14), %eax jmp .LBB2_74 .LBB2_72: .Ltmp95: .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp96: # %bb.73: # %.noexc188 movq (%r14), %rax .Ltmp97: .cfi_escape 0x2e, 0x00 movq %r14, %rdi movl $10, %esi callq *48(%rax) .Ltmp98: .LBB2_74: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i185 .Ltmp99: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %rbx, %rdi callq _ZNSo3putEc .Ltmp100: # %bb.75: # %.noexc190 .Ltmp101: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp102: # %bb.76: # %_ZNSolsEPFRSoS_E.exit123 .Ltmp103: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.5, %esi movl $11, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp104: # %bb.77: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit125 .Ltmp105: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $16, %esi callq _ZNSolsEi .Ltmp106: # %bb.78: .Ltmp107: movq %rax, %rbx .cfi_escape 0x2e, 0x00 movl $.L.str.4, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp108: # %bb.79: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit127 .Ltmp109: .cfi_escape 0x2e, 0x00 movq %rbx, %rdi movl $16, %esi callq _ZNSolsEi .Ltmp110: # %bb.80: movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .LBB2_118 # %bb.81: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i194 cmpb $0, 56(%r14) je .LBB2_83 # %bb.82: movzbl 67(%r14), %eax jmp .LBB2_85 .LBB2_83: .Ltmp111: .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp112: # %bb.84: # %.noexc199 movq (%r14), %rax .Ltmp113: .cfi_escape 0x2e, 0x00 movq %r14, %rdi movl $10, %esi callq *48(%rax) .Ltmp114: .LBB2_85: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i196 .Ltmp115: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %rbx, %rdi callq _ZNSo3putEc .Ltmp116: # %bb.86: # %.noexc201 .Ltmp117: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp118: # %bb.87: # %_ZNSolsEPFRSoS_E.exit129 .Ltmp119: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.6, %esi movl $29, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp120: # %bb.88: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit131 movss (%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero divss .LCPI2_2(%rip), %xmm0 movss .LCPI2_3(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero divss %xmm0, %xmm1 divss .LCPI2_4(%rip), %xmm1 movss %xmm1, (%rsp) # 4-byte Spill xorps %xmm0, %xmm0 cvtss2sd %xmm1, %xmm0 .Ltmp121: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ .Ltmp122: # %bb.89: # %_ZNSolsEf.exit .Ltmp123: movq %rax, %rbx .cfi_escape 0x2e, 0x00 movl $.L.str.7, %esi movl $7, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp124: # %bb.90: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit134 movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .LBB2_118 # %bb.91: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i205 cmpb $0, 56(%r14) je .LBB2_93 # %bb.92: movzbl 67(%r14), %eax jmp .LBB2_95 .LBB2_93: .Ltmp125: .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp126: # %bb.94: # %.noexc210 movq (%r14), %rax .Ltmp127: .cfi_escape 0x2e, 0x00 movq %r14, %rdi movl $10, %esi callq *48(%rax) .Ltmp128: .LBB2_95: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i207 .Ltmp129: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %rbx, %rdi callq _ZNSo3putEc .Ltmp130: # %bb.96: # %.noexc212 .Ltmp131: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp132: # %bb.97: # %_ZNSolsEPFRSoS_E.exit136 .Ltmp133: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.8, %esi movl $32, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp134: # %bb.98: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit138 movss 4(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero divss .LCPI2_1(%rip), %xmm0 divss .LCPI2_2(%rip), %xmm0 movss .LCPI2_3(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero divss %xmm0, %xmm1 divss .LCPI2_4(%rip), %xmm1 movss %xmm1, 4(%rsp) # 4-byte Spill xorps %xmm0, %xmm0 cvtss2sd %xmm1, %xmm0 .Ltmp135: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ .Ltmp136: # %bb.99: # %_ZNSolsEf.exit140 .Ltmp137: movq %rax, %rbx .cfi_escape 0x2e, 0x00 movl $.L.str.7, %esi movl $7, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp138: # %bb.100: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit142 movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .LBB2_118 # %bb.101: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i216 cmpb $0, 56(%r14) je .LBB2_103 # %bb.102: movzbl 67(%r14), %eax jmp .LBB2_105 .LBB2_103: .Ltmp139: .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp140: # %bb.104: # %.noexc221 movq (%r14), %rax .Ltmp141: .cfi_escape 0x2e, 0x00 movq %r14, %rdi movl $10, %esi callq *48(%rax) .Ltmp142: .LBB2_105: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i218 .Ltmp143: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %rbx, %rdi callq _ZNSo3putEc .Ltmp144: # %bb.106: # %.noexc223 .Ltmp145: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp146: # %bb.107: # %_ZNSolsEPFRSoS_E.exit144 .Ltmp147: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.9, %esi movl $25, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp148: # %bb.108: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit146 movss 4(%rsp), %xmm0 # 4-byte Reload # xmm0 = mem[0],zero,zero,zero divss (%rsp), %xmm0 # 4-byte Folded Reload cvtss2sd %xmm0, %xmm0 .Ltmp149: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ .Ltmp150: # %bb.109: # %_ZNSolsEf.exit148 .Ltmp151: movq %rax, %rbx .cfi_escape 0x2e, 0x00 movl $.L.str.4, %esi movl $1, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp152: # %bb.110: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit150 movq (%rbx), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %r14 testq %r14, %r14 je .LBB2_118 # %bb.111: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i227 cmpb $0, 56(%r14) je .LBB2_113 # %bb.112: movzbl 67(%r14), %eax jmp .LBB2_115 .LBB2_113: .Ltmp153: .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp154: # %bb.114: # %.noexc232 movq (%r14), %rax .Ltmp155: .cfi_escape 0x2e, 0x00 movq %r14, %rdi movl $10, %esi callq *48(%rax) .Ltmp156: .LBB2_115: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i229 .Ltmp157: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %rbx, %rdi callq _ZNSo3putEc .Ltmp158: # %bb.116: # %.noexc234 .Ltmp159: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp160: # %bb.117: # %_ZNSolsEPFRSoS_E.exit152 movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB2_118 # %bb.120: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i238 cmpb $0, 56(%rbx) je .LBB2_122 # %bb.121: movzbl 67(%rbx), %eax jmp .LBB2_124 .LBB2_122: .Ltmp161: .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp162: # %bb.123: # %.noexc243 movq (%rbx), %rax .Ltmp163: .cfi_escape 0x2e, 0x00 movq %rbx, %rdi movl $10, %esi callq *48(%rax) .Ltmp164: .LBB2_124: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i240 .Ltmp165: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc .Ltmp166: # %bb.125: # %.noexc245 .Ltmp167: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp168: # %bb.126: # %_ZNSolsEPFRSoS_E.exit154 movq 104(%rsp), %rdi .Ltmp169: .cfi_escape 0x2e, 0x00 callq hipEventDestroy .Ltmp170: # %bb.127: movq 8(%rsp), %rdi .Ltmp171: .cfi_escape 0x2e, 0x00 callq hipEventDestroy .Ltmp172: # %bb.128: movq 24(%rsp), %rdi .Ltmp173: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp174: # %bb.129: movq 16(%rsp), %rdi .Ltmp175: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp176: # %bb.130: movq 96(%rsp), %rdi .Ltmp177: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp178: # %bb.131: # %_ZNSt6vectorIfSaIfEED2Ev.exit .cfi_escape 0x2e, 0x00 movq 176(%rsp), %rdi # 8-byte Reload callq _ZdlPv .cfi_escape 0x2e, 0x00 movq 168(%rsp), %rdi # 8-byte Reload callq _ZdlPv xorl %eax, %eax addq $184, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB2_118: # %.invoke .cfi_def_cfa_offset 240 .Ltmp179: .cfi_escape 0x2e, 0x00 callq _ZSt16__throw_bad_castv .Ltmp180: # %bb.119: # %.cont .LBB2_27: .Ltmp185: .cfi_escape 0x2e, 0x00 callq _ZSt16__throw_bad_castv .Ltmp186: # %bb.33: # %.noexc166 .LBB2_57: .Ltmp182: .cfi_escape 0x2e, 0x00 callq _ZSt16__throw_bad_castv .Ltmp183: # %bb.58: # %.noexc176 .LBB2_135: .Ltmp2: movq %rax, %rbx jmp .LBB2_134 .LBB2_29: .Ltmp18: jmp .LBB2_133 .LBB2_28: .Ltmp13: jmp .LBB2_133 .LBB2_136: .Ltmp184: jmp .LBB2_133 .LBB2_30: .Ltmp187: jmp .LBB2_133 .LBB2_138: .Ltmp76: jmp .LBB2_133 .LBB2_32: .Ltmp42: jmp .LBB2_133 .LBB2_132: .Ltmp181: jmp .LBB2_133 .LBB2_137: .Ltmp73: jmp .LBB2_133 .LBB2_31: .Ltmp39: .LBB2_133: movq %rax, %rbx .cfi_escape 0x2e, 0x00 movq 176(%rsp), %rdi # 8-byte Reload callq _ZdlPv .LBB2_134: # %_ZNSt6vectorIfSaIfEED2Ev.exit164 .cfi_escape 0x2e, 0x00 movq 168(%rsp), %rdi # 8-byte Reload callq _ZdlPv .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _Unwind_Resume@PLT .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc .section .gcc_except_table,"a",@progbits .p2align 2, 0x0 GCC_except_table2: .Lexception0: .byte 255 # @LPStart Encoding = omit .byte 255 # @TType Encoding = omit .byte 1 # Call site Encoding = uleb128 .uleb128 .Lcst_end0-.Lcst_begin0 .Lcst_begin0: .uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 << .uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0 .byte 0 # has no landing pad .byte 0 # On action: cleanup .uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 << .uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1 .uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2 .byte 0 # On action: cleanup .uleb128 .Ltmp1-.Lfunc_begin0 # >> Call Site 3 << .uleb128 .Ltmp3-.Ltmp1 # Call between .Ltmp1 and .Ltmp3 .byte 0 # has no landing pad .byte 0 # On action: cleanup .uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 4 << .uleb128 .Ltmp12-.Ltmp3 # Call between .Ltmp3 and .Ltmp12 .uleb128 .Ltmp13-.Lfunc_begin0 # jumps to .Ltmp13 .byte 0 # On action: cleanup .uleb128 .Ltmp14-.Lfunc_begin0 # >> Call Site 5 << .uleb128 .Ltmp17-.Ltmp14 # Call between .Ltmp14 and .Ltmp17 .uleb128 .Ltmp18-.Lfunc_begin0 # jumps to .Ltmp18 .byte 0 # On action: cleanup .uleb128 .Ltmp19-.Lfunc_begin0 # >> Call Site 6 << .uleb128 .Ltmp20-.Ltmp19 # Call between .Ltmp19 and .Ltmp20 .uleb128 .Ltmp187-.Lfunc_begin0 # jumps to .Ltmp187 .byte 0 # On action: cleanup .uleb128 .Ltmp21-.Lfunc_begin0 # >> Call Site 7 << .uleb128 .Ltmp38-.Ltmp21 # Call between .Ltmp21 and .Ltmp38 .uleb128 .Ltmp39-.Lfunc_begin0 # jumps to .Ltmp39 .byte 0 # On action: cleanup .uleb128 .Ltmp40-.Lfunc_begin0 # >> Call Site 8 << .uleb128 .Ltmp41-.Ltmp40 # Call between .Ltmp40 and .Ltmp41 .uleb128 .Ltmp42-.Lfunc_begin0 # jumps to .Ltmp42 .byte 0 # On action: cleanup .uleb128 .Ltmp43-.Lfunc_begin0 # >> Call Site 9 << .uleb128 .Ltmp52-.Ltmp43 # Call between .Ltmp43 and .Ltmp52 .uleb128 .Ltmp187-.Lfunc_begin0 # jumps to .Ltmp187 .byte 0 # On action: cleanup .uleb128 .Ltmp53-.Lfunc_begin0 # >> Call Site 10 << .uleb128 .Ltmp54-.Ltmp53 # Call between .Ltmp53 and .Ltmp54 .uleb128 .Ltmp184-.Lfunc_begin0 # jumps to .Ltmp184 .byte 0 # On action: cleanup .uleb128 .Ltmp55-.Lfunc_begin0 # >> Call Site 11 << .uleb128 .Ltmp72-.Ltmp55 # Call between .Ltmp55 and .Ltmp72 .uleb128 .Ltmp73-.Lfunc_begin0 # jumps to .Ltmp73 .byte 0 # On action: cleanup .uleb128 .Ltmp74-.Lfunc_begin0 # >> Call Site 12 << .uleb128 .Ltmp75-.Ltmp74 # Call between .Ltmp74 and .Ltmp75 .uleb128 .Ltmp76-.Lfunc_begin0 # jumps to .Ltmp76 .byte 0 # On action: cleanup .uleb128 .Ltmp77-.Lfunc_begin0 # >> Call Site 13 << .uleb128 .Ltmp86-.Ltmp77 # Call between .Ltmp77 and .Ltmp86 .uleb128 .Ltmp184-.Lfunc_begin0 # jumps to .Ltmp184 .byte 0 # On action: cleanup .uleb128 .Ltmp87-.Lfunc_begin0 # >> Call Site 14 << .uleb128 .Ltmp180-.Ltmp87 # Call between .Ltmp87 and .Ltmp180 .uleb128 .Ltmp181-.Lfunc_begin0 # jumps to .Ltmp181 .byte 0 # On action: cleanup .uleb128 .Ltmp185-.Lfunc_begin0 # >> Call Site 15 << .uleb128 .Ltmp186-.Ltmp185 # Call between .Ltmp185 and .Ltmp186 .uleb128 .Ltmp187-.Lfunc_begin0 # jumps to .Ltmp187 .byte 0 # On action: cleanup .uleb128 .Ltmp182-.Lfunc_begin0 # >> Call Site 16 << .uleb128 .Ltmp183-.Ltmp182 # Call between .Ltmp182 and .Ltmp183 .uleb128 .Ltmp184-.Lfunc_begin0 # jumps to .Ltmp184 .byte 0 # On action: cleanup .uleb128 .Ltmp183-.Lfunc_begin0 # >> Call Site 17 << .uleb128 .Lfunc_end2-.Ltmp183 # Call between .Ltmp183 and .Lfunc_end2 .byte 0 # has no landing pad .byte 0 # On action: cleanup .Lcst_end0: .p2align 2, 0x0 # -- End function .text .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z22matrix_multiply_simplePfS_S_m, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z15matrix_multiplyPfS_S_m, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z22matrix_multiply_simplePfS_S_m,@object # @_Z22matrix_multiply_simplePfS_S_m .section .rodata,"a",@progbits .globl _Z22matrix_multiply_simplePfS_S_m .p2align 3, 0x0 _Z22matrix_multiply_simplePfS_S_m: .quad _Z37__device_stub__matrix_multiply_simplePfS_S_m .size _Z22matrix_multiply_simplePfS_S_m, 8 .type _Z15matrix_multiplyPfS_S_m,@object # @_Z15matrix_multiplyPfS_S_m .globl _Z15matrix_multiplyPfS_S_m .p2align 3, 0x0 _Z15matrix_multiplyPfS_S_m: .quad _Z30__device_stub__matrix_multiplyPfS_S_m .size _Z15matrix_multiplyPfS_S_m, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Timing simple implementation..." .size .L.str, 32 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz " done." .size .L.str.1, 7 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "Timing optimized implementation..." .size .L.str.2, 35 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Matrix size: " .size .L.str.3, 14 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "x" .size .L.str.4, 2 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "Tile size: " .size .L.str.5, 12 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "Throughput of simple kernel: " .size .L.str.6, 30 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz " GFLOPS" .size .L.str.7, 8 .type .L.str.8,@object # @.str.8 .L.str.8: .asciz "Throughput of optimized kernel: " .size .L.str.8, 33 .type .L.str.9,@object # @.str.9 .L.str.9: .asciz "Performance improvement: " .size .L.str.9, 26 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z22matrix_multiply_simplePfS_S_m" .size .L__unnamed_1, 34 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z15matrix_multiplyPfS_S_m" .size .L__unnamed_2, 27 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z37__device_stub__matrix_multiply_simplePfS_S_m .addrsig_sym _Z30__device_stub__matrix_multiplyPfS_S_m .addrsig_sym __gxx_personality_v0 .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Unwind_Resume .addrsig_sym _Z22matrix_multiply_simplePfS_S_m .addrsig_sym _Z15matrix_multiplyPfS_S_m .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/** Thrust Library **/ #include <thrust/random.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> /** Std library **/ #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <random> #include <time.h> #include <chrono> #include <fstream> #include <string> #include <iomanip> /** performMults(double * arr, double * b, const int N, const int SIZE) * For every ith row in matrix a, multiply a[i,j] by b[j] */ __global__ void performMults(double * a, double * b, int ROW_SIZE, int SIZE) { int a_index = blockIdx.x * blockDim.x + threadIdx.x; int b_index = a_index % ROW_SIZE; if (a_index >= SIZE) return; // The multiplication stage must be done before the mapping and reduction stage // all of these tasks can be done in parallel a[a_index] *= b[b_index]; } using namespace std; /** sumRows(double * arr, double * c, const int N, const int SIZE) * Expects arr to be a matrix, and c a result vector * c[i] = sum(a[i,j] * b[i]) * */ __global__ void sumRows(double * a, double * b, double * c, const int ROW_SIZE, const int SIZE) { int a_index = blockIdx.x * blockDim.x + threadIdx.x; int c_index = a_index / ROW_SIZE; int b_index = a_index % ROW_SIZE; // you can consider b_index the row id (0 start, ROW_SIZE-1 end) /* a 3x3 matrix example a index values: the specific element to operate on 0 1 2 3 4 5 6 7 8 c index values: where to add sum to 0 0 0 1 1 1 2 2 2 b index values: where we are in the row 0 1 2 0 1 2 0 1 2*/ if (b_index == 0) // if we are a zero index, sum up the row up to but not including the next 0 row. { int local_c_sum = 0; for (int i = 0; i < ROW_SIZE; i++) local_c_sum += a[c_index * ROW_SIZE + i] * b[i]; c[c_index] = local_c_sum; } // this method is bad because its tasks size grow with the problem instead of the number of tasks. } const int INCORRECT_NUM_ARGS_ERROR = 1; const unsigned THREADS = 512; void usage(); using namespace std; /**** MAIN ***********************/ /*********************************/ int main( int argc, char* argv[] ) { int N = 0; // row size char mode = 'v'; // what to print int threads = 0; // total amount of threads if 0 defaults to 512 per block char values = '1'; // what to fill vectors with switch ( argc ) { case 5: threads = atoi(argv[4]); case 4: values = argv[3][0]; case 3: mode = argv[2][0]; case 2: N = atoi(argv[1]); break; default: usage(); } const int SIZE = N * N; // square matrix N by N thrust::host_vector<double> h_a(SIZE); thrust::host_vector<double> h_b(N); thrust::device_vector<double> d_a(SIZE, 1); thrust::device_vector<double> d_b(N, 1); thrust::device_vector<double> c(N); // if mode is load, load vectors from file, otherwise generate them ourselves if (values != 'l') { bool random = values == 'r'; double lowerlimit = random ? 0 : 1; double upperlimit = random ? 10 : 1; #ifdef DEBUG printf("upperLimit: %f lowerLimit: %f\n", upperlimit, lowerlimit); #endif unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine re(seed); std::uniform_real_distribution<double> unif(lowerlimit,upperlimit); for (int i = 0; i < SIZE; i++) h_a[i] = floor(unif(re)); for (int i = 0; i < N; i++) h_b[i] = floor(unif(re)); } else // load vectors from file { ifstream myfile("input.txt"); for (int i = 0; i < SIZE; i++) myfile >> h_a[i]; for (int i = 0; i < N; i++) myfile >> h_b[i]; myfile.close(); } /* thrust handles the copying of memory from host vectors to device vectors with a simple assignment. */ // record action time auto start = chrono::steady_clock::now(); d_a = h_a; d_b = h_b; auto transfer = chrono::steady_clock::now(); #ifdef DEBUG cout << "Matrix values:" << endl; for (int i = 0; i < SIZE; i++) { cout << h_a[i] << " "; if ((i + 1) % N == 0) cout << endl; } cout << "\n\n"; cout << "Vector values:" << endl; for (int i = 0; i < N; i++) cout << h_b[i] << " "; cout << endl; #endif // vectors are unfortunatly not available on cuda device // but you can get the memory address, pass it to the device, // and treat it as a normal array. double * p_a = thrust::raw_pointer_cast(&d_a[0]); double * p_b = thrust::raw_pointer_cast(&d_b[0]); double * p_c = thrust::raw_pointer_cast(&c[0]); unsigned blocks; // one thread per block, if doing the Karp-Flatt Metric // if we were given a set amount of threads // set to it if ( threads ) { #ifdef DEBUG if (N > threads) cout << "Warning! incorrect number of threads will not perform correctly." << endl; #endif // assume threads is a multiple of 32 blocks = threads/32; // ensures that there are exactly as many given threads on the problem threads = 32; } else { threads = THREADS; blocks = (SIZE / THREADS) + 1; } #ifdef DEBUG cout << "blocks: " << blocks << " threads: " << threads << endl; #endif // record action time //auto start = chrono::steady_clock::now(); //performMults<<<blocks, threads>>>(p_a, p_b, N, SIZE); //cudaDeviceSynchronize(); #ifdef DEBUG h_a = d_a; cout << "Matrix values after mulltiplication:" << endl; for (int i = 0; i < SIZE; i++) { cout << h_a[i] << " "; if ((i + 1) % N == 0) cout << endl; } #endif sumRows<<<blocks, threads>>>(p_a, p_b, p_c, N, SIZE); cudaDeviceSynchronize(); auto end = chrono::steady_clock::now(); // print out time took if requested #ifndef DEBUG // if debug dont check just print if (mode == 't') #endif #ifdef DEBUG // with a title too cout << "time ns:\n"; #endif cout << chrono::duration_cast<chrono::nanoseconds>(end - start).count(); thrust::host_vector<double> result = c; #ifdef DEBUG printf("\n\nresult:\n"); #endif #ifndef DEBUG if (mode == 'v') #endif for (int i = 0; i < N; i++) cout << fixed << setprecision(2) << result[i] << " "; #ifdef DEBUG cout << endl; #endif return 0; } void usage() { printf("./main <row size> <mode> <values> <threads>\n"); printf("<row size> : required\n<mode> : v to print result, t to print time nanoseconds\n<values> : 1 all 1 values, r all random, l load from file.\n"); exit(INCORRECT_NUM_ARGS_ERROR); }
/** Thrust Library **/ #include <hip/hip_runtime.h> #include <thrust/random.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> /** Std library **/ #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <random> #include <time.h> #include <chrono> #include <fstream> #include <string> #include <iomanip> /** performMults(double * arr, double * b, const int N, const int SIZE) * For every ith row in matrix a, multiply a[i,j] by b[j] */ __global__ void performMults(double * a, double * b, int ROW_SIZE, int SIZE) { int a_index = blockIdx.x * blockDim.x + threadIdx.x; int b_index = a_index % ROW_SIZE; if (a_index >= SIZE) return; // The multiplication stage must be done before the mapping and reduction stage // all of these tasks can be done in parallel a[a_index] *= b[b_index]; } using namespace std; /** sumRows(double * arr, double * c, const int N, const int SIZE) * Expects arr to be a matrix, and c a result vector * c[i] = sum(a[i,j] * b[i]) * */ __global__ void sumRows(double * a, double * b, double * c, const int ROW_SIZE, const int SIZE) { int a_index = blockIdx.x * blockDim.x + threadIdx.x; int c_index = a_index / ROW_SIZE; int b_index = a_index % ROW_SIZE; // you can consider b_index the row id (0 start, ROW_SIZE-1 end) /* a 3x3 matrix example a index values: the specific element to operate on 0 1 2 3 4 5 6 7 8 c index values: where to add sum to 0 0 0 1 1 1 2 2 2 b index values: where we are in the row 0 1 2 0 1 2 0 1 2*/ if (b_index == 0) // if we are a zero index, sum up the row up to but not including the next 0 row. { int local_c_sum = 0; for (int i = 0; i < ROW_SIZE; i++) local_c_sum += a[c_index * ROW_SIZE + i] * b[i]; c[c_index] = local_c_sum; } // this method is bad because its tasks size grow with the problem instead of the number of tasks. } const int INCORRECT_NUM_ARGS_ERROR = 1; const unsigned THREADS = 512; void usage(); using namespace std; /**** MAIN ***********************/ /*********************************/ int main( int argc, char* argv[] ) { int N = 0; // row size char mode = 'v'; // what to print int threads = 0; // total amount of threads if 0 defaults to 512 per block char values = '1'; // what to fill vectors with switch ( argc ) { case 5: threads = atoi(argv[4]); case 4: values = argv[3][0]; case 3: mode = argv[2][0]; case 2: N = atoi(argv[1]); break; default: usage(); } const int SIZE = N * N; // square matrix N by N thrust::host_vector<double> h_a(SIZE); thrust::host_vector<double> h_b(N); thrust::device_vector<double> d_a(SIZE, 1); thrust::device_vector<double> d_b(N, 1); thrust::device_vector<double> c(N); // if mode is load, load vectors from file, otherwise generate them ourselves if (values != 'l') { bool random = values == 'r'; double lowerlimit = random ? 0 : 1; double upperlimit = random ? 10 : 1; #ifdef DEBUG printf("upperLimit: %f lowerLimit: %f\n", upperlimit, lowerlimit); #endif unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine re(seed); std::uniform_real_distribution<double> unif(lowerlimit,upperlimit); for (int i = 0; i < SIZE; i++) h_a[i] = floor(unif(re)); for (int i = 0; i < N; i++) h_b[i] = floor(unif(re)); } else // load vectors from file { ifstream myfile("input.txt"); for (int i = 0; i < SIZE; i++) myfile >> h_a[i]; for (int i = 0; i < N; i++) myfile >> h_b[i]; myfile.close(); } /* thrust handles the copying of memory from host vectors to device vectors with a simple assignment. */ // record action time auto start = chrono::steady_clock::now(); d_a = h_a; d_b = h_b; auto transfer = chrono::steady_clock::now(); #ifdef DEBUG cout << "Matrix values:" << endl; for (int i = 0; i < SIZE; i++) { cout << h_a[i] << " "; if ((i + 1) % N == 0) cout << endl; } cout << "\n\n"; cout << "Vector values:" << endl; for (int i = 0; i < N; i++) cout << h_b[i] << " "; cout << endl; #endif // vectors are unfortunatly not available on cuda device // but you can get the memory address, pass it to the device, // and treat it as a normal array. double * p_a = thrust::raw_pointer_cast(&d_a[0]); double * p_b = thrust::raw_pointer_cast(&d_b[0]); double * p_c = thrust::raw_pointer_cast(&c[0]); unsigned blocks; // one thread per block, if doing the Karp-Flatt Metric // if we were given a set amount of threads // set to it if ( threads ) { #ifdef DEBUG if (N > threads) cout << "Warning! incorrect number of threads will not perform correctly." << endl; #endif // assume threads is a multiple of 32 blocks = threads/32; // ensures that there are exactly as many given threads on the problem threads = 32; } else { threads = THREADS; blocks = (SIZE / THREADS) + 1; } #ifdef DEBUG cout << "blocks: " << blocks << " threads: " << threads << endl; #endif // record action time //auto start = chrono::steady_clock::now(); //performMults<<<blocks, threads>>>(p_a, p_b, N, SIZE); //cudaDeviceSynchronize(); #ifdef DEBUG h_a = d_a; cout << "Matrix values after mulltiplication:" << endl; for (int i = 0; i < SIZE; i++) { cout << h_a[i] << " "; if ((i + 1) % N == 0) cout << endl; } #endif sumRows<<<blocks, threads>>>(p_a, p_b, p_c, N, SIZE); hipDeviceSynchronize(); auto end = chrono::steady_clock::now(); // print out time took if requested #ifndef DEBUG // if debug dont check just print if (mode == 't') #endif #ifdef DEBUG // with a title too cout << "time ns:\n"; #endif cout << chrono::duration_cast<chrono::nanoseconds>(end - start).count(); thrust::host_vector<double> result = c; #ifdef DEBUG printf("\n\nresult:\n"); #endif #ifndef DEBUG if (mode == 'v') #endif for (int i = 0; i < N; i++) cout << fixed << setprecision(2) << result[i] << " "; #ifdef DEBUG cout << endl; #endif return 0; } void usage() { printf("./main <row size> <mode> <values> <threads>\n"); printf("<row size> : required\n<mode> : v to print result, t to print time nanoseconds\n<values> : 1 all 1 values, r all random, l load from file.\n"); exit(INCORRECT_NUM_ARGS_ERROR); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/** Thrust Library **/ #include <hip/hip_runtime.h> #include <thrust/random.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> /** Std library **/ #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <random> #include <time.h> #include <chrono> #include <fstream> #include <string> #include <iomanip> /** performMults(double * arr, double * b, const int N, const int SIZE) * For every ith row in matrix a, multiply a[i,j] by b[j] */ __global__ void performMults(double * a, double * b, int ROW_SIZE, int SIZE) { int a_index = blockIdx.x * blockDim.x + threadIdx.x; int b_index = a_index % ROW_SIZE; if (a_index >= SIZE) return; // The multiplication stage must be done before the mapping and reduction stage // all of these tasks can be done in parallel a[a_index] *= b[b_index]; } using namespace std; /** sumRows(double * arr, double * c, const int N, const int SIZE) * Expects arr to be a matrix, and c a result vector * c[i] = sum(a[i,j] * b[i]) * */ __global__ void sumRows(double * a, double * b, double * c, const int ROW_SIZE, const int SIZE) { int a_index = blockIdx.x * blockDim.x + threadIdx.x; int c_index = a_index / ROW_SIZE; int b_index = a_index % ROW_SIZE; // you can consider b_index the row id (0 start, ROW_SIZE-1 end) /* a 3x3 matrix example a index values: the specific element to operate on 0 1 2 3 4 5 6 7 8 c index values: where to add sum to 0 0 0 1 1 1 2 2 2 b index values: where we are in the row 0 1 2 0 1 2 0 1 2*/ if (b_index == 0) // if we are a zero index, sum up the row up to but not including the next 0 row. { int local_c_sum = 0; for (int i = 0; i < ROW_SIZE; i++) local_c_sum += a[c_index * ROW_SIZE + i] * b[i]; c[c_index] = local_c_sum; } // this method is bad because its tasks size grow with the problem instead of the number of tasks. } const int INCORRECT_NUM_ARGS_ERROR = 1; const unsigned THREADS = 512; void usage(); using namespace std; /**** MAIN ***********************/ /*********************************/ int main( int argc, char* argv[] ) { int N = 0; // row size char mode = 'v'; // what to print int threads = 0; // total amount of threads if 0 defaults to 512 per block char values = '1'; // what to fill vectors with switch ( argc ) { case 5: threads = atoi(argv[4]); case 4: values = argv[3][0]; case 3: mode = argv[2][0]; case 2: N = atoi(argv[1]); break; default: usage(); } const int SIZE = N * N; // square matrix N by N thrust::host_vector<double> h_a(SIZE); thrust::host_vector<double> h_b(N); thrust::device_vector<double> d_a(SIZE, 1); thrust::device_vector<double> d_b(N, 1); thrust::device_vector<double> c(N); // if mode is load, load vectors from file, otherwise generate them ourselves if (values != 'l') { bool random = values == 'r'; double lowerlimit = random ? 0 : 1; double upperlimit = random ? 10 : 1; #ifdef DEBUG printf("upperLimit: %f lowerLimit: %f\n", upperlimit, lowerlimit); #endif unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine re(seed); std::uniform_real_distribution<double> unif(lowerlimit,upperlimit); for (int i = 0; i < SIZE; i++) h_a[i] = floor(unif(re)); for (int i = 0; i < N; i++) h_b[i] = floor(unif(re)); } else // load vectors from file { ifstream myfile("input.txt"); for (int i = 0; i < SIZE; i++) myfile >> h_a[i]; for (int i = 0; i < N; i++) myfile >> h_b[i]; myfile.close(); } /* thrust handles the copying of memory from host vectors to device vectors with a simple assignment. */ // record action time auto start = chrono::steady_clock::now(); d_a = h_a; d_b = h_b; auto transfer = chrono::steady_clock::now(); #ifdef DEBUG cout << "Matrix values:" << endl; for (int i = 0; i < SIZE; i++) { cout << h_a[i] << " "; if ((i + 1) % N == 0) cout << endl; } cout << "\n\n"; cout << "Vector values:" << endl; for (int i = 0; i < N; i++) cout << h_b[i] << " "; cout << endl; #endif // vectors are unfortunatly not available on cuda device // but you can get the memory address, pass it to the device, // and treat it as a normal array. double * p_a = thrust::raw_pointer_cast(&d_a[0]); double * p_b = thrust::raw_pointer_cast(&d_b[0]); double * p_c = thrust::raw_pointer_cast(&c[0]); unsigned blocks; // one thread per block, if doing the Karp-Flatt Metric // if we were given a set amount of threads // set to it if ( threads ) { #ifdef DEBUG if (N > threads) cout << "Warning! incorrect number of threads will not perform correctly." << endl; #endif // assume threads is a multiple of 32 blocks = threads/32; // ensures that there are exactly as many given threads on the problem threads = 32; } else { threads = THREADS; blocks = (SIZE / THREADS) + 1; } #ifdef DEBUG cout << "blocks: " << blocks << " threads: " << threads << endl; #endif // record action time //auto start = chrono::steady_clock::now(); //performMults<<<blocks, threads>>>(p_a, p_b, N, SIZE); //cudaDeviceSynchronize(); #ifdef DEBUG h_a = d_a; cout << "Matrix values after mulltiplication:" << endl; for (int i = 0; i < SIZE; i++) { cout << h_a[i] << " "; if ((i + 1) % N == 0) cout << endl; } #endif sumRows<<<blocks, threads>>>(p_a, p_b, p_c, N, SIZE); hipDeviceSynchronize(); auto end = chrono::steady_clock::now(); // print out time took if requested #ifndef DEBUG // if debug dont check just print if (mode == 't') #endif #ifdef DEBUG // with a title too cout << "time ns:\n"; #endif cout << chrono::duration_cast<chrono::nanoseconds>(end - start).count(); thrust::host_vector<double> result = c; #ifdef DEBUG printf("\n\nresult:\n"); #endif #ifndef DEBUG if (mode == 'v') #endif for (int i = 0; i < N; i++) cout << fixed << setprecision(2) << result[i] << " "; #ifdef DEBUG cout << endl; #endif return 0; } void usage() { printf("./main <row size> <mode> <values> <threads>\n"); printf("<row size> : required\n<mode> : v to print result, t to print time nanoseconds\n<values> : 1 all 1 values, r all random, l load from file.\n"); exit(INCORRECT_NUM_ARGS_ERROR); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12performMultsPdS_ii .globl _Z12performMultsPdS_ii .p2align 8 .type _Z12performMultsPdS_ii,@function _Z12performMultsPdS_ii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b32 s3, s[0:1], 0x14 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b32 s2, s[0:1], 0x10 v_ashrrev_i32_e32 v3, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v4, v1, v3 v_xor_b32_e32 v4, v4, v3 s_waitcnt lgkmcnt(0) s_ashr_i32 s3, s2, 31 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_add_i32 s2, s2, s3 s_xor_b32 s2, s2, s3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_cvt_f32_u32_e32 v0, s2 s_sub_i32 s3, 0, s2 v_rcp_iflag_f32_e32 v0, v0 s_waitcnt_depctr 0xfff v_mul_f32_e32 v0, 0x4f7ffffe, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cvt_u32_f32_e32 v0, v0 v_mul_lo_u32 v2, s3, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_hi_u32 v2, v0, v2 v_add_nc_u32_e32 v0, v0, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_hi_u32 v0, v4, v0 v_mul_lo_u32 v0, v0, s2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v0, v4, v0 v_subrev_nc_u32_e32 v2, s2, v0 v_cmp_le_u32_e32 vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v0, v0, v2, vcc_lo v_subrev_nc_u32_e32 v2, s2, v0 v_cmp_le_u32_e32 vcc_lo, s2, v0 s_load_b128 s[0:3], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v0, v0, v2, vcc_lo v_ashrrev_i32_e32 v2, 31, v1 v_xor_b32_e32 v0, v0, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_sub_nc_u32_e32 v3, v0, v3 v_lshlrev_b64 v[0:1], 3, v[1:2] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v4, 31, v3 v_lshlrev_b64 v[3:4], 3, v[3:4] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s2, v3 v_add_co_ci_u32_e32 v3, vcc_lo, s3, v4, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b64 v[2:3], v[2:3], off global_load_b64 v[4:5], v[0:1], off s_waitcnt vmcnt(0) v_mul_f64 v[2:3], v[2:3], v[4:5] global_store_b64 v[0:1], v[2:3], off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12performMultsPdS_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12performMultsPdS_ii, .Lfunc_end0-_Z12performMultsPdS_ii .section .AMDGPU.csdata,"",@progbits .text .protected _Z7sumRowsPdS_S_ii .globl _Z7sumRowsPdS_S_ii .p2align 8 .type _Z7sumRowsPdS_S_ii,@function _Z7sumRowsPdS_S_ii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x18 s_load_b32 s3, s[0:1], 0x2c s_waitcnt lgkmcnt(0) s_ashr_i32 s4, s2, 31 s_and_b32 s3, s3, 0xffff s_add_i32 s5, s2, s4 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_xor_b32 s5, s5, s4 v_cvt_f32_u32_e32 v1, s5 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_rcp_iflag_f32_e32 v1, v1 s_waitcnt_depctr 0xfff v_mul_f32_e32 v1, 0x4f7ffffe, v1 v_cvt_u32_f32_e32 v1, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_mad_u64_u32 v[2:3], null, s15, s3, v[0:1] s_sub_i32 s3, 0, s5 v_mul_lo_u32 v0, s3, v1 s_mov_b32 s3, exec_lo s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v3, 31, v2 v_mul_hi_u32 v0, v1, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v4, v2, v3 v_xor_b32_e32 v4, v4, v3 v_xor_b32_e32 v3, s4, v3 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v0, v1, v0 v_mul_hi_u32 v0, v4, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v1, v0, s5 v_sub_nc_u32_e32 v1, v4, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_subrev_nc_u32_e32 v5, s5, v1 v_cmp_le_u32_e32 vcc_lo, s5, v1 v_dual_cndmask_b32 v1, v1, v5 :: v_dual_add_nc_u32 v4, 1, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v0, v0, v4, vcc_lo v_cmp_le_u32_e32 vcc_lo, s5, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v4, 1, v0 v_cndmask_b32_e32 v0, v0, v4, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_xor_b32_e32 v0, v0, v3 v_sub_nc_u32_e32 v0, v0, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v1, v0, s2 v_sub_nc_u32_e32 v2, v2, v1 s_delay_alu instid0(VALU_DEP_1) v_cmpx_eq_u32_e32 0, v2 s_cbranch_execz .LBB1_7 s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB1_5 s_load_b128 s[4:7], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 v_mov_b32_e32 v3, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[1:2], 3, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v1, vcc_lo, s4, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo .LBB1_3: global_load_b64 v[4:5], v[1:2], off v_cvt_f64_i32_e32 v[6:7], v3 s_load_b64 s[4:5], s[6:7], 0x0 v_add_co_u32 v1, vcc_lo, v1, 8 s_add_i32 s2, s2, -1 v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo s_add_u32 s6, s6, 8 s_addc_u32 s7, s7, 0 s_cmp_eq_u32 s2, 0 s_waitcnt vmcnt(0) lgkmcnt(0) s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f64 v[3:4], v[4:5], s[4:5], v[6:7] v_cvt_i32_f64_e32 v3, v[3:4] s_cbranch_scc0 .LBB1_3 s_delay_alu instid0(VALU_DEP_1) v_cvt_f64_i32_e32 v[2:3], v3 s_branch .LBB1_6 .LBB1_5: v_mov_b32_e32 v2, 0 v_mov_b32_e32 v3, 0 .LBB1_6: s_load_b64 s[0:1], s[0:1], 0x10 v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 3, v[0:1] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b64 v[0:1], v[2:3], off .LBB1_7: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z7sumRowsPdS_S_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z7sumRowsPdS_S_ii, .Lfunc_end1-_Z7sumRowsPdS_S_ii .section .AMDGPU.csdata,"",@progbits .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_,comdat .protected _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_ .globl _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_ .p2align 8 .type _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_,@function _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_: s_load_b128 s[4:7], s[0:1], 0x10 s_lshl_b32 s2, s15, 8 s_waitcnt lgkmcnt(0) s_add_u32 s2, s2, s6 s_addc_u32 s3, 0, s7 s_sub_u32 s4, s4, s2 s_subb_u32 s5, s5, s3 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_u64_e64 s5, 0x100, s[4:5] s_and_b32 s5, s5, exec_lo s_cselect_b32 s4, s4, 0x100 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) v_cmp_gt_u32_e32 vcc_lo, s4, v0 s_cmpk_eq_i32 s4, 0x100 s_cselect_b32 s4, -1, 0 s_or_b32 s4, s4, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s5, s4 s_cbranch_execz .LBB2_2 s_load_b128 s[4:7], s[0:1], 0x0 v_lshlrev_b32_e32 v0, 3, v0 s_lshl_b64 s[0:1], s[2:3], 3 s_waitcnt lgkmcnt(0) s_add_u32 s0, s4, s0 v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 s_addc_u32 s1, s5, s1 v_add_co_u32 v0, s0, s0, v0 s_delay_alu instid0(VALU_DEP_1) v_add_co_ci_u32_e64 v1, null, s1, 0, s0 flat_store_b64 v[0:1], v[2:3] .LBB2_2: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 32 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_,comdat .Lfunc_end2: .size _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_, .Lfunc_end2-_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_ .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12performMultsPdS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12performMultsPdS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z7sumRowsPdS_S_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z7sumRowsPdS_S_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .offset: 0 .size: 16 .value_kind: by_value - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 8 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 32 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 256 .name: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_20__uninitialized_fill7functorINS_10device_ptrIdEEdEEmLj1EEEvT0_T1_S9_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void AvgCentroidCoordinatesKernel( float *centroidCoordinates, float *pointsWeight, int inputSize, int centroids ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < centroids * 2) { if(pointsWeight[threadId / 2] == 0.00f) { centroidCoordinates[threadId] = 0.00f; } else { centroidCoordinates[threadId] = centroidCoordinates[threadId] / pointsWeight[threadId / 2]; } } }
code for sm_80 Function : _Z28AvgCentroidCoordinatesKernelPfS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */ /* 0x000e220000002600 */ /*0020*/ ULDC UR4, c[0x0][0x174] ; /* 0x00005d0000047ab9 */ /* 0x000fe40000000800 */ /*0030*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */ /* 0x000fe2000800063f */ /*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e280000002500 */ /*0050*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0060*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */ /* 0x001fc800078e0203 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0205 */ /*0080*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */ /* 0x000fda000bf06270 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ LEA.HI R2, R0, R0, RZ, 0x1 ; /* 0x0000000000027211 */ /* 0x000fe200078f08ff */ /*00b0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fe200078e00ff */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*00d0*/ SHF.R.S32.HI R2, RZ, 0x1, R2 ; /* 0x00000001ff027819 */ /* 0x000fca0000011402 */ /*00e0*/ IMAD.WIDE R4, R2, R3, c[0x0][0x168] ; /* 0x00005a0002047625 */ /* 0x000fcc00078e0203 */ /*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*0100*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fe200078e0203 */ /*0110*/ FSETP.NEU.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720b */ /* 0x004fda0003f0d000 */ /*0120*/ @!P0 BRA 0x220 ; /* 0x000000f000008947 */ /* 0x000fea0003800000 */ /*0130*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */ /* 0x000ea2000c1e1900 */ /*0140*/ MUFU.RCP R7, R4 ; /* 0x0000000400077308 */ /* 0x000e220000001000 */ /*0150*/ BSSY B0, 0x200 ; /* 0x000000a000007945 */ /* 0x000fe20003800000 */ /*0160*/ FFMA R0, -R4, R7, 1 ; /* 0x3f80000004007423 */ /* 0x001fc80000000107 */ /*0170*/ FFMA R0, R7, R0, R7 ; /* 0x0000000007007223 */ /* 0x000fe40000000007 */ /*0180*/ FCHK P0, R5, R4 ; /* 0x0000000405007302 */ /* 0x004e240000000000 */ /*0190*/ FFMA R7, R5, R0, RZ ; /* 0x0000000005077223 */ /* 0x000fc800000000ff */ /*01a0*/ FFMA R6, -R4, R7, R5 ; /* 0x0000000704067223 */ /* 0x000fc80000000105 */ /*01b0*/ FFMA R7, R0, R6, R7 ; /* 0x0000000600077223 */ /* 0x000fe20000000007 */ /*01c0*/ @!P0 BRA 0x1f0 ; /* 0x0000002000008947 */ /* 0x001fea0003800000 */ /*01d0*/ MOV R0, 0x1f0 ; /* 0x000001f000007802 */ /* 0x000fe40000000f00 */ /*01e0*/ CALL.REL.NOINC 0x240 ; /* 0x0000005000007944 */ /* 0x000fea0003c00000 */ /*01f0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0200*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x000fe2000c101904 */ /*0210*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0220*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */ /* 0x000fe2000c101904 */ /*0230*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0240*/ SHF.R.U32.HI R7, RZ, 0x17, R4.reuse ; /* 0x00000017ff077819 */ /* 0x100fe20000011604 */ /*0250*/ BSSY B1, 0x8a0 ; /* 0x0000064000017945 */ /* 0x000fe20003800000 */ /*0260*/ SHF.R.U32.HI R6, RZ, 0x17, R5.reuse ; /* 0x00000017ff067819 */ /* 0x100fe20000011605 */ /*0270*/ IMAD.MOV.U32 R8, RZ, RZ, R5 ; /* 0x000000ffff087224 */ /* 0x000fe200078e0005 */ /*0280*/ LOP3.LUT R7, R7, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff07077812 */ /* 0x000fe200078ec0ff */ /*0290*/ IMAD.MOV.U32 R9, RZ, RZ, R4 ; /* 0x000000ffff097224 */ /* 0x000fe200078e0004 */ /*02a0*/ LOP3.LUT R6, R6, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff06067812 */ /* 0x000fe400078ec0ff */ /*02b0*/ IADD3 R12, R7, -0x1, RZ ; /* 0xffffffff070c7810 */ /* 0x000fc40007ffe0ff */ /*02c0*/ IADD3 R11, R6, -0x1, RZ ; /* 0xffffffff060b7810 */ /* 0x000fe40007ffe0ff */ /*02d0*/ ISETP.GT.U32.AND P0, PT, R12, 0xfd, PT ; /* 0x000000fd0c00780c */ /* 0x000fc80003f04070 */ /*02e0*/ ISETP.GT.U32.OR P0, PT, R11, 0xfd, P0 ; /* 0x000000fd0b00780c */ /* 0x000fda0000704470 */ /*02f0*/ @!P0 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a8224 */ /* 0x000fe200078e00ff */ /*0300*/ @!P0 BRA 0x480 ; /* 0x0000017000008947 */ /* 0x000fea0003800000 */ /*0310*/ FSETP.GTU.FTZ.AND P0, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */ /* 0x000fe40003f1c200 */ /*0320*/ FSETP.GTU.FTZ.AND P1, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */ /* 0x000fc80003f3c200 */ /*0330*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000703570 */ /*0340*/ @P0 BRA 0x880 ; /* 0x0000053000000947 */ /* 0x000fea0003800000 */ /*0350*/ LOP3.LUT P0, RZ, R9, 0x7fffffff, R8, 0xc8, !PT ; /* 0x7fffffff09ff7812 */ /* 0x000fda000780c808 */ /*0360*/ @!P0 BRA 0x860 ; /* 0x000004f000008947 */ /* 0x000fea0003800000 */ /*0370*/ FSETP.NEU.FTZ.AND P2, PT, |R5|.reuse, +INF , PT ; /* 0x7f8000000500780b */ /* 0x040fe40003f5d200 */ /*0380*/ FSETP.NEU.FTZ.AND P1, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */ /* 0x000fe40003f3d200 */ /*0390*/ FSETP.NEU.FTZ.AND P0, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */ /* 0x000fd60003f1d200 */ /*03a0*/ @!P1 BRA !P2, 0x860 ; /* 0x000004b000009947 */ /* 0x000fea0005000000 */ /*03b0*/ LOP3.LUT P2, RZ, R8, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff08ff7812 */ /* 0x000fc8000784c0ff */ /*03c0*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000f24572 */ /*03d0*/ @P1 BRA 0x840 ; /* 0x0000046000001947 */ /* 0x000fea0003800000 */ /*03e0*/ LOP3.LUT P1, RZ, R9, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff09ff7812 */ /* 0x000fc8000782c0ff */ /*03f0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000702572 */ /*0400*/ @P0 BRA 0x810 ; /* 0x0000040000000947 */ /* 0x000fea0003800000 */ /*0410*/ ISETP.GE.AND P0, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */ /* 0x000fe40003f06270 */ /*0420*/ ISETP.GE.AND P1, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */ /* 0x000fd60003f26270 */ /*0430*/ @P0 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a0224 */ /* 0x000fe400078e00ff */ /*0440*/ @!P0 IMAD.MOV.U32 R10, RZ, RZ, -0x40 ; /* 0xffffffc0ff0a8424 */ /* 0x000fe400078e00ff */ /*0450*/ @!P0 FFMA R8, R5, 1.84467440737095516160e+19, RZ ; /* 0x5f80000005088823 */ /* 0x000fe400000000ff */ /*0460*/ @!P1 FFMA R9, R4, 1.84467440737095516160e+19, RZ ; /* 0x5f80000004099823 */ /* 0x000fe200000000ff */ /*0470*/ @!P1 IADD3 R10, R10, 0x40, RZ ; /* 0x000000400a0a9810 */ /* 0x000fe40007ffe0ff */ /*0480*/ LEA R4, R7, 0xc0800000, 0x17 ; /* 0xc080000007047811 */ /* 0x000fe200078eb8ff */ /*0490*/ BSSY B2, 0x800 ; /* 0x0000036000027945 */ /* 0x000fe20003800000 */ /*04a0*/ IADD3 R6, R6, -0x7f, RZ ; /* 0xffffff8106067810 */ /* 0x000fc60007ffe0ff */ /*04b0*/ IMAD.IADD R9, R9, 0x1, -R4 ; /* 0x0000000109097824 */ /* 0x000fe200078e0a04 */ /*04c0*/ IADD3 R7, R6.reuse, 0x7f, -R7 ; /* 0x0000007f06077810 */ /* 0x040fe20007ffe807 */ /*04d0*/ IMAD R8, R6, -0x800000, R8 ; /* 0xff80000006087824 */ /* 0x000fe400078e0208 */ /*04e0*/ MUFU.RCP R4, R9 ; /* 0x0000000900047308 */ /* 0x000e220000001000 */ /*04f0*/ FADD.FTZ R5, -R9, -RZ ; /* 0x800000ff09057221 */ /* 0x000fe40000010100 */ /*0500*/ IMAD.IADD R7, R7, 0x1, R10 ; /* 0x0000000107077824 */ /* 0x000fe400078e020a */ /*0510*/ FFMA R11, R4, R5, 1 ; /* 0x3f800000040b7423 */ /* 0x001fc80000000005 */ /*0520*/ FFMA R13, R4, R11, R4 ; /* 0x0000000b040d7223 */ /* 0x000fc80000000004 */ /*0530*/ FFMA R4, R8, R13, RZ ; /* 0x0000000d08047223 */ /* 0x000fc800000000ff */ /*0540*/ FFMA R11, R5, R4, R8 ; /* 0x00000004050b7223 */ /* 0x000fc80000000008 */ /*0550*/ FFMA R12, R13, R11, R4 ; /* 0x0000000b0d0c7223 */ /* 0x000fc80000000004 */ /*0560*/ FFMA R8, R5, R12, R8 ; /* 0x0000000c05087223 */ /* 0x000fc80000000008 */ /*0570*/ FFMA R4, R13, R8, R12 ; /* 0x000000080d047223 */ /* 0x000fca000000000c */ /*0580*/ SHF.R.U32.HI R5, RZ, 0x17, R4 ; /* 0x00000017ff057819 */ /* 0x000fc80000011604 */ /*0590*/ LOP3.LUT R5, R5, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff05057812 */ /* 0x000fca00078ec0ff */ /*05a0*/ IMAD.IADD R9, R5, 0x1, R7 ; /* 0x0000000105097824 */ /* 0x000fca00078e0207 */ /*05b0*/ IADD3 R5, R9, -0x1, RZ ; /* 0xffffffff09057810 */ /* 0x000fc80007ffe0ff */ /*05c0*/ ISETP.GE.U32.AND P0, PT, R5, 0xfe, PT ; /* 0x000000fe0500780c */ /* 0x000fda0003f06070 */ /*05d0*/ @!P0 BRA 0x7e0 ; /* 0x0000020000008947 */ /* 0x000fea0003800000 */ /*05e0*/ ISETP.GT.AND P0, PT, R9, 0xfe, PT ; /* 0x000000fe0900780c */ /* 0x000fda0003f04270 */ /*05f0*/ @P0 BRA 0x7b0 ; /* 0x000001b000000947 */ /* 0x000fea0003800000 */ /*0600*/ ISETP.GE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */ /* 0x000fda0003f06270 */ /*0610*/ @P0 BRA 0x7f0 ; /* 0x000001d000000947 */ /* 0x000fea0003800000 */ /*0620*/ ISETP.GE.AND P0, PT, R9, -0x18, PT ; /* 0xffffffe80900780c */ /* 0x000fe40003f06270 */ /*0630*/ LOP3.LUT R4, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000004047812 */ /* 0x000fd600078ec0ff */ /*0640*/ @!P0 BRA 0x7f0 ; /* 0x000001a000008947 */ /* 0x000fea0003800000 */ /*0650*/ FFMA.RZ R5, R13, R8.reuse, R12.reuse ; /* 0x000000080d057223 */ /* 0x180fe2000000c00c */ /*0660*/ ISETP.NE.AND P2, PT, R9, RZ, PT ; /* 0x000000ff0900720c */ /* 0x000fe20003f45270 */ /*0670*/ FFMA.RM R6, R13, R8.reuse, R12.reuse ; /* 0x000000080d067223 */ /* 0x180fe2000000400c */ /*0680*/ ISETP.NE.AND P1, PT, R9, RZ, PT ; /* 0x000000ff0900720c */ /* 0x000fe40003f25270 */ /*0690*/ LOP3.LUT R7, R5, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff05077812 */ /* 0x000fe200078ec0ff */ /*06a0*/ FFMA.RP R5, R13, R8, R12 ; /* 0x000000080d057223 */ /* 0x000fe2000000800c */ /*06b0*/ IADD3 R8, R9, 0x20, RZ ; /* 0x0000002009087810 */ /* 0x000fe20007ffe0ff */ /*06c0*/ IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff097224 */ /* 0x000fe200078e0a09 */ /*06d0*/ LOP3.LUT R7, R7, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000007077812 */ /* 0x000fe400078efcff */ /*06e0*/ FSETP.NEU.FTZ.AND P0, PT, R5, R6, PT ; /* 0x000000060500720b */ /* 0x000fc40003f1d000 */ /*06f0*/ SHF.L.U32 R8, R7, R8, RZ ; /* 0x0000000807087219 */ /* 0x000fe400000006ff */ /*0700*/ SEL R6, R9, RZ, P2 ; /* 0x000000ff09067207 */ /* 0x000fe40001000000 */ /*0710*/ ISETP.NE.AND P1, PT, R8, RZ, P1 ; /* 0x000000ff0800720c */ /* 0x000fe40000f25270 */ /*0720*/ SHF.R.U32.HI R6, RZ, R6, R7 ; /* 0x00000006ff067219 */ /* 0x000fe40000011607 */ /*0730*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40000703570 */ /*0740*/ SHF.R.U32.HI R8, RZ, 0x1, R6 ; /* 0x00000001ff087819 */ /* 0x000fc40000011606 */ /*0750*/ SEL R5, RZ, 0x1, !P0 ; /* 0x00000001ff057807 */ /* 0x000fc80004000000 */ /*0760*/ LOP3.LUT R5, R5, 0x1, R8, 0xf8, !PT ; /* 0x0000000105057812 */ /* 0x000fc800078ef808 */ /*0770*/ LOP3.LUT R5, R5, R6, RZ, 0xc0, !PT ; /* 0x0000000605057212 */ /* 0x000fca00078ec0ff */ /*0780*/ IMAD.IADD R5, R8, 0x1, R5 ; /* 0x0000000108057824 */ /* 0x000fca00078e0205 */ /*0790*/ LOP3.LUT R4, R5, R4, RZ, 0xfc, !PT ; /* 0x0000000405047212 */ /* 0x000fe200078efcff */ /*07a0*/ BRA 0x7f0 ; /* 0x0000004000007947 */ /* 0x000fea0003800000 */ /*07b0*/ LOP3.LUT R4, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000004047812 */ /* 0x000fc800078ec0ff */ /*07c0*/ LOP3.LUT R4, R4, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000004047812 */ /* 0x000fe200078efcff */ /*07d0*/ BRA 0x7f0 ; /* 0x0000001000007947 */ /* 0x000fea0003800000 */ /*07e0*/ IMAD R4, R7, 0x800000, R4 ; /* 0x0080000007047824 */ /* 0x000fe400078e0204 */ /*07f0*/ BSYNC B2 ; /* 0x0000000000027941 */ /* 0x000fea0003800000 */ /*0800*/ BRA 0x890 ; /* 0x0000008000007947 */ /* 0x000fea0003800000 */ /*0810*/ LOP3.LUT R4, R9, 0x80000000, R8, 0x48, !PT ; /* 0x8000000009047812 */ /* 0x000fc800078e4808 */ /*0820*/ LOP3.LUT R4, R4, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000004047812 */ /* 0x000fe200078efcff */ /*0830*/ BRA 0x890 ; /* 0x0000005000007947 */ /* 0x000fea0003800000 */ /*0840*/ LOP3.LUT R4, R9, 0x80000000, R8, 0x48, !PT ; /* 0x8000000009047812 */ /* 0x000fe200078e4808 */ /*0850*/ BRA 0x890 ; /* 0x0000003000007947 */ /* 0x000fea0003800000 */ /*0860*/ MUFU.RSQ R4, -QNAN ; /* 0xffc0000000047908 */ /* 0x000e220000001400 */ /*0870*/ BRA 0x890 ; /* 0x0000001000007947 */ /* 0x000fea0003800000 */ /*0880*/ FADD.FTZ R4, R5, R4 ; /* 0x0000000405047221 */ /* 0x000fe40000010000 */ /*0890*/ BSYNC B1 ; /* 0x0000000000017941 */ /* 0x000fea0003800000 */ /*08a0*/ IMAD.MOV.U32 R7, RZ, RZ, R4 ; /* 0x000000ffff077224 */ /* 0x001fe400078e0004 */ /*08b0*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */ /* 0x000fe400078e0000 */ /*08c0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */ /* 0x000fc800078e00ff */ /*08d0*/ RET.REL.NODEC R4 0x0 ; /* 0xfffff72004007950 */ /* 0x000fea0003c3ffff */ /*08e0*/ BRA 0x8e0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*08f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0900*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0910*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0920*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0930*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0940*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0950*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0960*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0970*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void AvgCentroidCoordinatesKernel( float *centroidCoordinates, float *pointsWeight, int inputSize, int centroids ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < centroids * 2) { if(pointsWeight[threadId / 2] == 0.00f) { centroidCoordinates[threadId] = 0.00f; } else { centroidCoordinates[threadId] = centroidCoordinates[threadId] / pointsWeight[threadId / 2]; } } }
.file "tmpxft_00065088_00000000-6_AvgCentroidCoordinatesKernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z52__device_stub__Z28AvgCentroidCoordinatesKernelPfS_iiPfS_ii .type _Z52__device_stub__Z28AvgCentroidCoordinatesKernelPfS_iiPfS_ii, @function _Z52__device_stub__Z28AvgCentroidCoordinatesKernelPfS_iiPfS_ii: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z28AvgCentroidCoordinatesKernelPfS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z52__device_stub__Z28AvgCentroidCoordinatesKernelPfS_iiPfS_ii, .-_Z52__device_stub__Z28AvgCentroidCoordinatesKernelPfS_iiPfS_ii .globl _Z28AvgCentroidCoordinatesKernelPfS_ii .type _Z28AvgCentroidCoordinatesKernelPfS_ii, @function _Z28AvgCentroidCoordinatesKernelPfS_ii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z52__device_stub__Z28AvgCentroidCoordinatesKernelPfS_iiPfS_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z28AvgCentroidCoordinatesKernelPfS_ii, .-_Z28AvgCentroidCoordinatesKernelPfS_ii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z28AvgCentroidCoordinatesKernelPfS_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z28AvgCentroidCoordinatesKernelPfS_ii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void AvgCentroidCoordinatesKernel( float *centroidCoordinates, float *pointsWeight, int inputSize, int centroids ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < centroids * 2) { if(pointsWeight[threadId / 2] == 0.00f) { centroidCoordinates[threadId] = 0.00f; } else { centroidCoordinates[threadId] = centroidCoordinates[threadId] / pointsWeight[threadId / 2]; } } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void AvgCentroidCoordinatesKernel( float *centroidCoordinates, float *pointsWeight, int inputSize, int centroids ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < centroids * 2) { if(pointsWeight[threadId / 2] == 0.00f) { centroidCoordinates[threadId] = 0.00f; } else { centroidCoordinates[threadId] = centroidCoordinates[threadId] / pointsWeight[threadId / 2]; } } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void AvgCentroidCoordinatesKernel( float *centroidCoordinates, float *pointsWeight, int inputSize, int centroids ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < centroids * 2) { if(pointsWeight[threadId / 2] == 0.00f) { centroidCoordinates[threadId] = 0.00f; } else { centroidCoordinates[threadId] = centroidCoordinates[threadId] / pointsWeight[threadId / 2]; } } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z28AvgCentroidCoordinatesKernelPfS_ii .globl _Z28AvgCentroidCoordinatesKernelPfS_ii .p2align 8 .type _Z28AvgCentroidCoordinatesKernelPfS_ii,@function _Z28AvgCentroidCoordinatesKernelPfS_ii: s_clause 0x1 s_load_b64 s[2:3], s[0:1], 0x14 s_load_b32 s4, s[0:1], 0x24 s_waitcnt lgkmcnt(0) s_mul_i32 s3, s3, s15 s_and_b32 s4, s4, 0xffff s_add_i32 s3, s3, s14 s_lshl_b32 s2, s2, 1 v_mad_u64_u32 v[1:2], null, s3, s4, v[0:1] s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_i32_e32 vcc_lo, s2, v1 s_and_saveexec_b32 s2, vcc_lo s_cbranch_execz .LBB0_4 v_lshrrev_b32_e32 v0, 31, v1 s_load_b128 s[0:3], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v0, v1, v0 v_ashrrev_i32_e32 v2, 1, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s2, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo global_load_b32 v3, v[2:3], off v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[0:1], 2, v[1:2] v_mov_b32_e32 v2, 0 v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_mov_b32 s0, exec_lo s_waitcnt vmcnt(0) v_cmpx_neq_f32_e32 0, v3 s_cbranch_execz .LBB0_3 global_load_b32 v2, v[0:1], off s_waitcnt vmcnt(0) v_div_scale_f32 v4, null, v3, v3, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_rcp_f32_e32 v5, v4 s_waitcnt_depctr 0xfff v_fma_f32 v6, -v4, v5, 1.0 v_fmac_f32_e32 v5, v6, v5 v_div_scale_f32 v6, vcc_lo, v2, v3, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v7, v6, v5 v_fma_f32 v8, -v4, v7, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v7, v8, v5 v_fma_f32 v4, -v4, v7, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_div_fmas_f32 v4, v4, v5, v7 v_div_fixup_f32 v2, v4, v3, v2 .LBB0_3: s_or_b32 exec_lo, exec_lo, s0 global_store_b32 v[0:1], v2, off .LBB0_4: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z28AvgCentroidCoordinatesKernelPfS_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z28AvgCentroidCoordinatesKernelPfS_ii, .Lfunc_end0-_Z28AvgCentroidCoordinatesKernelPfS_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z28AvgCentroidCoordinatesKernelPfS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z28AvgCentroidCoordinatesKernelPfS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void AvgCentroidCoordinatesKernel( float *centroidCoordinates, float *pointsWeight, int inputSize, int centroids ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < centroids * 2) { if(pointsWeight[threadId / 2] == 0.00f) { centroidCoordinates[threadId] = 0.00f; } else { centroidCoordinates[threadId] = centroidCoordinates[threadId] / pointsWeight[threadId / 2]; } } }
.text .file "AvgCentroidCoordinatesKernel.hip" .globl _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii # -- Begin function _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii .p2align 4, 0x90 .type _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii,@function _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii: # @_Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z28AvgCentroidCoordinatesKernelPfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii, .Lfunc_end0-_Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z28AvgCentroidCoordinatesKernelPfS_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z28AvgCentroidCoordinatesKernelPfS_ii,@object # @_Z28AvgCentroidCoordinatesKernelPfS_ii .section .rodata,"a",@progbits .globl _Z28AvgCentroidCoordinatesKernelPfS_ii .p2align 3, 0x0 _Z28AvgCentroidCoordinatesKernelPfS_ii: .quad _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii .size _Z28AvgCentroidCoordinatesKernelPfS_ii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z28AvgCentroidCoordinatesKernelPfS_ii" .size .L__unnamed_1, 39 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z28AvgCentroidCoordinatesKernelPfS_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z28AvgCentroidCoordinatesKernelPfS_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */ /* 0x000e220000002600 */ /*0020*/ ULDC UR4, c[0x0][0x174] ; /* 0x00005d0000047ab9 */ /* 0x000fe40000000800 */ /*0030*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */ /* 0x000fe2000800063f */ /*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e280000002500 */ /*0050*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0060*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */ /* 0x001fc800078e0203 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0205 */ /*0080*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */ /* 0x000fda000bf06270 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ LEA.HI R2, R0, R0, RZ, 0x1 ; /* 0x0000000000027211 */ /* 0x000fe200078f08ff */ /*00b0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fe200078e00ff */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*00d0*/ SHF.R.S32.HI R2, RZ, 0x1, R2 ; /* 0x00000001ff027819 */ /* 0x000fca0000011402 */ /*00e0*/ IMAD.WIDE R4, R2, R3, c[0x0][0x168] ; /* 0x00005a0002047625 */ /* 0x000fcc00078e0203 */ /*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*0100*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fe200078e0203 */ /*0110*/ FSETP.NEU.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720b */ /* 0x004fda0003f0d000 */ /*0120*/ @!P0 BRA 0x220 ; /* 0x000000f000008947 */ /* 0x000fea0003800000 */ /*0130*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */ /* 0x000ea2000c1e1900 */ /*0140*/ MUFU.RCP R7, R4 ; /* 0x0000000400077308 */ /* 0x000e220000001000 */ /*0150*/ BSSY B0, 0x200 ; /* 0x000000a000007945 */ /* 0x000fe20003800000 */ /*0160*/ FFMA R0, -R4, R7, 1 ; /* 0x3f80000004007423 */ /* 0x001fc80000000107 */ /*0170*/ FFMA R0, R7, R0, R7 ; /* 0x0000000007007223 */ /* 0x000fe40000000007 */ /*0180*/ FCHK P0, R5, R4 ; /* 0x0000000405007302 */ /* 0x004e240000000000 */ /*0190*/ FFMA R7, R5, R0, RZ ; /* 0x0000000005077223 */ /* 0x000fc800000000ff */ /*01a0*/ FFMA R6, -R4, R7, R5 ; /* 0x0000000704067223 */ /* 0x000fc80000000105 */ /*01b0*/ FFMA R7, R0, R6, R7 ; /* 0x0000000600077223 */ /* 0x000fe20000000007 */ /*01c0*/ @!P0 BRA 0x1f0 ; /* 0x0000002000008947 */ /* 0x001fea0003800000 */ /*01d0*/ MOV R0, 0x1f0 ; /* 0x000001f000007802 */ /* 0x000fe40000000f00 */ /*01e0*/ CALL.REL.NOINC 0x240 ; /* 0x0000005000007944 */ /* 0x000fea0003c00000 */ /*01f0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0200*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x000fe2000c101904 */ /*0210*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0220*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */ /* 0x000fe2000c101904 */ /*0230*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0240*/ SHF.R.U32.HI R7, RZ, 0x17, R4.reuse ; /* 0x00000017ff077819 */ /* 0x100fe20000011604 */ /*0250*/ BSSY B1, 0x8a0 ; /* 0x0000064000017945 */ /* 0x000fe20003800000 */ /*0260*/ SHF.R.U32.HI R6, RZ, 0x17, R5.reuse ; /* 0x00000017ff067819 */ /* 0x100fe20000011605 */ /*0270*/ IMAD.MOV.U32 R8, RZ, RZ, R5 ; /* 0x000000ffff087224 */ /* 0x000fe200078e0005 */ /*0280*/ LOP3.LUT R7, R7, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff07077812 */ /* 0x000fe200078ec0ff */ /*0290*/ IMAD.MOV.U32 R9, RZ, RZ, R4 ; /* 0x000000ffff097224 */ /* 0x000fe200078e0004 */ /*02a0*/ LOP3.LUT R6, R6, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff06067812 */ /* 0x000fe400078ec0ff */ /*02b0*/ IADD3 R12, R7, -0x1, RZ ; /* 0xffffffff070c7810 */ /* 0x000fc40007ffe0ff */ /*02c0*/ IADD3 R11, R6, -0x1, RZ ; /* 0xffffffff060b7810 */ /* 0x000fe40007ffe0ff */ /*02d0*/ ISETP.GT.U32.AND P0, PT, R12, 0xfd, PT ; /* 0x000000fd0c00780c */ /* 0x000fc80003f04070 */ /*02e0*/ ISETP.GT.U32.OR P0, PT, R11, 0xfd, P0 ; /* 0x000000fd0b00780c */ /* 0x000fda0000704470 */ /*02f0*/ @!P0 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a8224 */ /* 0x000fe200078e00ff */ /*0300*/ @!P0 BRA 0x480 ; /* 0x0000017000008947 */ /* 0x000fea0003800000 */ /*0310*/ FSETP.GTU.FTZ.AND P0, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */ /* 0x000fe40003f1c200 */ /*0320*/ FSETP.GTU.FTZ.AND P1, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */ /* 0x000fc80003f3c200 */ /*0330*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000703570 */ /*0340*/ @P0 BRA 0x880 ; /* 0x0000053000000947 */ /* 0x000fea0003800000 */ /*0350*/ LOP3.LUT P0, RZ, R9, 0x7fffffff, R8, 0xc8, !PT ; /* 0x7fffffff09ff7812 */ /* 0x000fda000780c808 */ /*0360*/ @!P0 BRA 0x860 ; /* 0x000004f000008947 */ /* 0x000fea0003800000 */ /*0370*/ FSETP.NEU.FTZ.AND P2, PT, |R5|.reuse, +INF , PT ; /* 0x7f8000000500780b */ /* 0x040fe40003f5d200 */ /*0380*/ FSETP.NEU.FTZ.AND P1, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */ /* 0x000fe40003f3d200 */ /*0390*/ FSETP.NEU.FTZ.AND P0, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */ /* 0x000fd60003f1d200 */ /*03a0*/ @!P1 BRA !P2, 0x860 ; /* 0x000004b000009947 */ /* 0x000fea0005000000 */ /*03b0*/ LOP3.LUT P2, RZ, R8, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff08ff7812 */ /* 0x000fc8000784c0ff */ /*03c0*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000f24572 */ /*03d0*/ @P1 BRA 0x840 ; /* 0x0000046000001947 */ /* 0x000fea0003800000 */ /*03e0*/ LOP3.LUT P1, RZ, R9, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff09ff7812 */ /* 0x000fc8000782c0ff */ /*03f0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */ /* 0x000fda0000702572 */ /*0400*/ @P0 BRA 0x810 ; /* 0x0000040000000947 */ /* 0x000fea0003800000 */ /*0410*/ ISETP.GE.AND P0, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */ /* 0x000fe40003f06270 */ /*0420*/ ISETP.GE.AND P1, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */ /* 0x000fd60003f26270 */ /*0430*/ @P0 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a0224 */ /* 0x000fe400078e00ff */ /*0440*/ @!P0 IMAD.MOV.U32 R10, RZ, RZ, -0x40 ; /* 0xffffffc0ff0a8424 */ /* 0x000fe400078e00ff */ /*0450*/ @!P0 FFMA R8, R5, 1.84467440737095516160e+19, RZ ; /* 0x5f80000005088823 */ /* 0x000fe400000000ff */ /*0460*/ @!P1 FFMA R9, R4, 1.84467440737095516160e+19, RZ ; /* 0x5f80000004099823 */ /* 0x000fe200000000ff */ /*0470*/ @!P1 IADD3 R10, R10, 0x40, RZ ; /* 0x000000400a0a9810 */ /* 0x000fe40007ffe0ff */ /*0480*/ LEA R4, R7, 0xc0800000, 0x17 ; /* 0xc080000007047811 */ /* 0x000fe200078eb8ff */ /*0490*/ BSSY B2, 0x800 ; /* 0x0000036000027945 */ /* 0x000fe20003800000 */ /*04a0*/ IADD3 R6, R6, -0x7f, RZ ; /* 0xffffff8106067810 */ /* 0x000fc60007ffe0ff */ /*04b0*/ IMAD.IADD R9, R9, 0x1, -R4 ; /* 0x0000000109097824 */ /* 0x000fe200078e0a04 */ /*04c0*/ IADD3 R7, R6.reuse, 0x7f, -R7 ; /* 0x0000007f06077810 */ /* 0x040fe20007ffe807 */ /*04d0*/ IMAD R8, R6, -0x800000, R8 ; /* 0xff80000006087824 */ /* 0x000fe400078e0208 */ /*04e0*/ MUFU.RCP R4, R9 ; /* 0x0000000900047308 */ /* 0x000e220000001000 */ /*04f0*/ FADD.FTZ R5, -R9, -RZ ; /* 0x800000ff09057221 */ /* 0x000fe40000010100 */ /*0500*/ IMAD.IADD R7, R7, 0x1, R10 ; /* 0x0000000107077824 */ /* 0x000fe400078e020a */ /*0510*/ FFMA R11, R4, R5, 1 ; /* 0x3f800000040b7423 */ /* 0x001fc80000000005 */ /*0520*/ FFMA R13, R4, R11, R4 ; /* 0x0000000b040d7223 */ /* 0x000fc80000000004 */ /*0530*/ FFMA R4, R8, R13, RZ ; /* 0x0000000d08047223 */ /* 0x000fc800000000ff */ /*0540*/ FFMA R11, R5, R4, R8 ; /* 0x00000004050b7223 */ /* 0x000fc80000000008 */ /*0550*/ FFMA R12, R13, R11, R4 ; /* 0x0000000b0d0c7223 */ /* 0x000fc80000000004 */ /*0560*/ FFMA R8, R5, R12, R8 ; /* 0x0000000c05087223 */ /* 0x000fc80000000008 */ /*0570*/ FFMA R4, R13, R8, R12 ; /* 0x000000080d047223 */ /* 0x000fca000000000c */ /*0580*/ SHF.R.U32.HI R5, RZ, 0x17, R4 ; /* 0x00000017ff057819 */ /* 0x000fc80000011604 */ /*0590*/ LOP3.LUT R5, R5, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff05057812 */ /* 0x000fca00078ec0ff */ /*05a0*/ IMAD.IADD R9, R5, 0x1, R7 ; /* 0x0000000105097824 */ /* 0x000fca00078e0207 */ /*05b0*/ IADD3 R5, R9, -0x1, RZ ; /* 0xffffffff09057810 */ /* 0x000fc80007ffe0ff */ /*05c0*/ ISETP.GE.U32.AND P0, PT, R5, 0xfe, PT ; /* 0x000000fe0500780c */ /* 0x000fda0003f06070 */ /*05d0*/ @!P0 BRA 0x7e0 ; /* 0x0000020000008947 */ /* 0x000fea0003800000 */ /*05e0*/ ISETP.GT.AND P0, PT, R9, 0xfe, PT ; /* 0x000000fe0900780c */ /* 0x000fda0003f04270 */ /*05f0*/ @P0 BRA 0x7b0 ; /* 0x000001b000000947 */ /* 0x000fea0003800000 */ /*0600*/ ISETP.GE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */ /* 0x000fda0003f06270 */ /*0610*/ @P0 BRA 0x7f0 ; /* 0x000001d000000947 */ /* 0x000fea0003800000 */ /*0620*/ ISETP.GE.AND P0, PT, R9, -0x18, PT ; /* 0xffffffe80900780c */ /* 0x000fe40003f06270 */ /*0630*/ LOP3.LUT R4, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000004047812 */ /* 0x000fd600078ec0ff */ /*0640*/ @!P0 BRA 0x7f0 ; /* 0x000001a000008947 */ /* 0x000fea0003800000 */ /*0650*/ FFMA.RZ R5, R13, R8.reuse, R12.reuse ; /* 0x000000080d057223 */ /* 0x180fe2000000c00c */ /*0660*/ ISETP.NE.AND P2, PT, R9, RZ, PT ; /* 0x000000ff0900720c */ /* 0x000fe20003f45270 */ /*0670*/ FFMA.RM R6, R13, R8.reuse, R12.reuse ; /* 0x000000080d067223 */ /* 0x180fe2000000400c */ /*0680*/ ISETP.NE.AND P1, PT, R9, RZ, PT ; /* 0x000000ff0900720c */ /* 0x000fe40003f25270 */ /*0690*/ LOP3.LUT R7, R5, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff05077812 */ /* 0x000fe200078ec0ff */ /*06a0*/ FFMA.RP R5, R13, R8, R12 ; /* 0x000000080d057223 */ /* 0x000fe2000000800c */ /*06b0*/ IADD3 R8, R9, 0x20, RZ ; /* 0x0000002009087810 */ /* 0x000fe20007ffe0ff */ /*06c0*/ IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff097224 */ /* 0x000fe200078e0a09 */ /*06d0*/ LOP3.LUT R7, R7, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000007077812 */ /* 0x000fe400078efcff */ /*06e0*/ FSETP.NEU.FTZ.AND P0, PT, R5, R6, PT ; /* 0x000000060500720b */ /* 0x000fc40003f1d000 */ /*06f0*/ SHF.L.U32 R8, R7, R8, RZ ; /* 0x0000000807087219 */ /* 0x000fe400000006ff */ /*0700*/ SEL R6, R9, RZ, P2 ; /* 0x000000ff09067207 */ /* 0x000fe40001000000 */ /*0710*/ ISETP.NE.AND P1, PT, R8, RZ, P1 ; /* 0x000000ff0800720c */ /* 0x000fe40000f25270 */ /*0720*/ SHF.R.U32.HI R6, RZ, R6, R7 ; /* 0x00000006ff067219 */ /* 0x000fe40000011607 */ /*0730*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40000703570 */ /*0740*/ SHF.R.U32.HI R8, RZ, 0x1, R6 ; /* 0x00000001ff087819 */ /* 0x000fc40000011606 */ /*0750*/ SEL R5, RZ, 0x1, !P0 ; /* 0x00000001ff057807 */ /* 0x000fc80004000000 */ /*0760*/ LOP3.LUT R5, R5, 0x1, R8, 0xf8, !PT ; /* 0x0000000105057812 */ /* 0x000fc800078ef808 */ /*0770*/ LOP3.LUT R5, R5, R6, RZ, 0xc0, !PT ; /* 0x0000000605057212 */ /* 0x000fca00078ec0ff */ /*0780*/ IMAD.IADD R5, R8, 0x1, R5 ; /* 0x0000000108057824 */ /* 0x000fca00078e0205 */ /*0790*/ LOP3.LUT R4, R5, R4, RZ, 0xfc, !PT ; /* 0x0000000405047212 */ /* 0x000fe200078efcff */ /*07a0*/ BRA 0x7f0 ; /* 0x0000004000007947 */ /* 0x000fea0003800000 */ /*07b0*/ LOP3.LUT R4, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000004047812 */ /* 0x000fc800078ec0ff */ /*07c0*/ LOP3.LUT R4, R4, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000004047812 */ /* 0x000fe200078efcff */ /*07d0*/ BRA 0x7f0 ; /* 0x0000001000007947 */ /* 0x000fea0003800000 */ /*07e0*/ IMAD R4, R7, 0x800000, R4 ; /* 0x0080000007047824 */ /* 0x000fe400078e0204 */ /*07f0*/ BSYNC B2 ; /* 0x0000000000027941 */ /* 0x000fea0003800000 */ /*0800*/ BRA 0x890 ; /* 0x0000008000007947 */ /* 0x000fea0003800000 */ /*0810*/ LOP3.LUT R4, R9, 0x80000000, R8, 0x48, !PT ; /* 0x8000000009047812 */ /* 0x000fc800078e4808 */ /*0820*/ LOP3.LUT R4, R4, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000004047812 */ /* 0x000fe200078efcff */ /*0830*/ BRA 0x890 ; /* 0x0000005000007947 */ /* 0x000fea0003800000 */ /*0840*/ LOP3.LUT R4, R9, 0x80000000, R8, 0x48, !PT ; /* 0x8000000009047812 */ /* 0x000fe200078e4808 */ /*0850*/ BRA 0x890 ; /* 0x0000003000007947 */ /* 0x000fea0003800000 */ /*0860*/ MUFU.RSQ R4, -QNAN ; /* 0xffc0000000047908 */ /* 0x000e220000001400 */ /*0870*/ BRA 0x890 ; /* 0x0000001000007947 */ /* 0x000fea0003800000 */ /*0880*/ FADD.FTZ R4, R5, R4 ; /* 0x0000000405047221 */ /* 0x000fe40000010000 */ /*0890*/ BSYNC B1 ; /* 0x0000000000017941 */ /* 0x000fea0003800000 */ /*08a0*/ IMAD.MOV.U32 R7, RZ, RZ, R4 ; /* 0x000000ffff077224 */ /* 0x001fe400078e0004 */ /*08b0*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */ /* 0x000fe400078e0000 */ /*08c0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */ /* 0x000fc800078e00ff */ /*08d0*/ RET.REL.NODEC R4 0x0 ; /* 0xfffff72004007950 */ /* 0x000fea0003c3ffff */ /*08e0*/ BRA 0x8e0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*08f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0900*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0910*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0920*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0930*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0940*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0950*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0960*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0970*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z28AvgCentroidCoordinatesKernelPfS_ii .globl _Z28AvgCentroidCoordinatesKernelPfS_ii .p2align 8 .type _Z28AvgCentroidCoordinatesKernelPfS_ii,@function _Z28AvgCentroidCoordinatesKernelPfS_ii: s_clause 0x1 s_load_b64 s[2:3], s[0:1], 0x14 s_load_b32 s4, s[0:1], 0x24 s_waitcnt lgkmcnt(0) s_mul_i32 s3, s3, s15 s_and_b32 s4, s4, 0xffff s_add_i32 s3, s3, s14 s_lshl_b32 s2, s2, 1 v_mad_u64_u32 v[1:2], null, s3, s4, v[0:1] s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_i32_e32 vcc_lo, s2, v1 s_and_saveexec_b32 s2, vcc_lo s_cbranch_execz .LBB0_4 v_lshrrev_b32_e32 v0, 31, v1 s_load_b128 s[0:3], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v0, v1, v0 v_ashrrev_i32_e32 v2, 1, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s2, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo global_load_b32 v3, v[2:3], off v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[0:1], 2, v[1:2] v_mov_b32_e32 v2, 0 v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_mov_b32 s0, exec_lo s_waitcnt vmcnt(0) v_cmpx_neq_f32_e32 0, v3 s_cbranch_execz .LBB0_3 global_load_b32 v2, v[0:1], off s_waitcnt vmcnt(0) v_div_scale_f32 v4, null, v3, v3, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_rcp_f32_e32 v5, v4 s_waitcnt_depctr 0xfff v_fma_f32 v6, -v4, v5, 1.0 v_fmac_f32_e32 v5, v6, v5 v_div_scale_f32 v6, vcc_lo, v2, v3, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v7, v6, v5 v_fma_f32 v8, -v4, v7, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v7, v8, v5 v_fma_f32 v4, -v4, v7, v6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_div_fmas_f32 v4, v4, v5, v7 v_div_fixup_f32 v2, v4, v3, v2 .LBB0_3: s_or_b32 exec_lo, exec_lo, s0 global_store_b32 v[0:1], v2, off .LBB0_4: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z28AvgCentroidCoordinatesKernelPfS_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 9 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z28AvgCentroidCoordinatesKernelPfS_ii, .Lfunc_end0-_Z28AvgCentroidCoordinatesKernelPfS_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z28AvgCentroidCoordinatesKernelPfS_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z28AvgCentroidCoordinatesKernelPfS_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 9 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00065088_00000000-6_AvgCentroidCoordinatesKernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z52__device_stub__Z28AvgCentroidCoordinatesKernelPfS_iiPfS_ii .type _Z52__device_stub__Z28AvgCentroidCoordinatesKernelPfS_iiPfS_ii, @function _Z52__device_stub__Z28AvgCentroidCoordinatesKernelPfS_iiPfS_ii: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z28AvgCentroidCoordinatesKernelPfS_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z52__device_stub__Z28AvgCentroidCoordinatesKernelPfS_iiPfS_ii, .-_Z52__device_stub__Z28AvgCentroidCoordinatesKernelPfS_iiPfS_ii .globl _Z28AvgCentroidCoordinatesKernelPfS_ii .type _Z28AvgCentroidCoordinatesKernelPfS_ii, @function _Z28AvgCentroidCoordinatesKernelPfS_ii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z52__device_stub__Z28AvgCentroidCoordinatesKernelPfS_iiPfS_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z28AvgCentroidCoordinatesKernelPfS_ii, .-_Z28AvgCentroidCoordinatesKernelPfS_ii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z28AvgCentroidCoordinatesKernelPfS_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z28AvgCentroidCoordinatesKernelPfS_ii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "AvgCentroidCoordinatesKernel.hip" .globl _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii # -- Begin function _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii .p2align 4, 0x90 .type _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii,@function _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii: # @_Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z28AvgCentroidCoordinatesKernelPfS_ii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii, .Lfunc_end0-_Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z28AvgCentroidCoordinatesKernelPfS_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z28AvgCentroidCoordinatesKernelPfS_ii,@object # @_Z28AvgCentroidCoordinatesKernelPfS_ii .section .rodata,"a",@progbits .globl _Z28AvgCentroidCoordinatesKernelPfS_ii .p2align 3, 0x0 _Z28AvgCentroidCoordinatesKernelPfS_ii: .quad _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii .size _Z28AvgCentroidCoordinatesKernelPfS_ii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z28AvgCentroidCoordinatesKernelPfS_ii" .size .L__unnamed_1, 39 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z43__device_stub__AvgCentroidCoordinatesKernelPfS_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z28AvgCentroidCoordinatesKernelPfS_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <time.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <errno.h> #include <cuda.h> #define THREADS 32 #define WIDTH 4 #define HEIGHT 2 // See times between copy and traspose and realize the same operation // with shared memory. // Traspose is slower than copy... // Traspose is need to shared memory to improve the performance... // https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ __global__ void copy(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idy * WIDTH + idx] = src[idy * WIDTH + idx]; // Copio tal cual con los mismos indices facil... :) } __global__ void copy_shared(int *src, int *dst) { __shared__ int mem[THREADS][THREADS + 1]; int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; mem[threadIdx.x][threadIdx.y] = src[idy * WIDTH + idx]; // Añado el valor en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la memoria // compartida... } __global__ void traspose(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idx * HEIGHT + idy] = src[idy * WIDTH + idx]; // Cambio el valor de la matriz a la traspuesta // con los índices de acceso a la matriz... } __global__ void traspose_shared(int *src, int *dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; __shared__ int mem[THREADS][THREADS + 1]; mem[threadIdx.x][threadIdx.y] = src[idx * HEIGHT + idy]; // Hago las posiciones traspuestas // en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la shared // que tiene el valor de la traspuesta.... } __global__ void matrixAddPitch (int *a, int *b, int*c, int pitch) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx > pitch || idy > HEIGHT) return; c[idy * pitch + idx] = a[idy * pitch + idx] + b[idy * pitch + idx]; } unsigned long get_time() { struct timespec ts; if (clock_gettime(0, &ts) < 0) { fprintf(stderr, "Error calc time... %s\n", strerror(errno)); exit(1); } return ts.tv_sec * 1000000000L + ts.tv_nsec; } void mi_malloc_int(int **i, int size) { *i = (int *) malloc( sizeof(int) * size); if (*i == NULL) { fprintf(stderr, "Error malloc %s\n", strerror(errno)); exit(1); } memset(*i, 0, sizeof(int) * size); } void init(int *h_v) { for (int i = 0; i < HEIGHT; i++) { for (int j = 0; j < WIDTH; ++j) { h_v[i * WIDTH + j] = i * WIDTH + j; } } } void print_matrix(const int *matrix, const int w, const int h) { fprintf(stdout, "%s\n", "Print matrix..."); for (int i = 0; i < h; i++) { for (int j = 0; j < w; ++j) { fprintf(stdout, "%5d", matrix[i * w + j]); } fprintf(stdout, "%s\n", ""); } fprintf(stdout, "%s\n", ""); } void addPitch() { int n = WIDTH * HEIGHT; dim3 t (16, 16); dim3 b ( (WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = sizeof(int) * n; size_t pitch; h_a = (int *) malloc (size); h_b = (int *) malloc (size); h_c = (int *) malloc (size); for (int i = 0; i < n; i++) { h_a[i] = i; h_b[i] = i; } cudaMallocPitch(&d_a, &pitch, WIDTH * sizeof(int), HEIGHT); cudaMallocPitch(&d_b, &pitch, WIDTH * sizeof(int), HEIGHT); cudaMallocPitch(&d_c, &pitch, WIDTH * sizeof(int), HEIGHT); cudaMemcpy2D (d_a, pitch, h_a, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, cudaMemcpyHostToDevice); cudaMemcpy2D (d_b, pitch, h_b, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, cudaMemcpyHostToDevice); matrixAddPitch <<<b, t>>> (d_a, d_b, d_c, pitch / sizeof(int)); cudaMemcpy2D (h_c, WIDTH * sizeof(int), d_c, pitch, WIDTH * sizeof(int), HEIGHT, cudaMemcpyDeviceToHost); print_matrix(h_c, HEIGHT, WIDTH); free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } void traspose() { int *matrix = NULL; int *dev_matrix = NULL; int *dev_traspose = NULL; mi_malloc_int(&matrix, WIDTH * HEIGHT); init(matrix); print_matrix(matrix, WIDTH, HEIGHT); cudaMalloc(&dev_matrix, sizeof(int) * WIDTH * HEIGHT); // &dst, size... cudaMemcpy(dev_matrix, matrix, sizeof(int) * WIDTH * HEIGHT, cudaMemcpyHostToDevice); // dst, src, size, cudaMemcpyHostToDevice...; cudaMalloc(&dev_traspose, sizeof(int) * WIDTH * HEIGHT); // &dst, size... cudaMemset(dev_traspose, 0, sizeof(int) * WIDTH * HEIGHT); // dst, value byte 0, size... dim3 t(THREADS, THREADS); dim3 b((WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); // ... START PARARELL CODE ... unsigned long now = get_time(); traspose<<<b, t>>>(dev_matrix, dev_traspose); // Call kernel << b , t >> (a , b); cudaMemcpy(matrix, dev_traspose, sizeof(int) * WIDTH * HEIGHT, cudaMemcpyDeviceToHost); // dest, src, size, cudaMemcpyDeviceToHost; fprintf(stdout, "Time : %lf ms\n", (get_time() - now) / 1000000.0f); // ... END PARARELL CODE ... print_matrix(matrix, HEIGHT, WIDTH); fprintf(stdout, "Num Blocks (x:%d y:%d)\n", b.x, b.y); } int main(int argc, char const *argv[]) { traspose(); return 0; }
code for sm_80 Function : _Z14matrixAddPitchPiS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e280000002100 */ /*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e280000002500 */ /*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000e680000002600 */ /*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */ /* 0x000e620000002200 */ /*0050*/ IMAD R0, R3, c[0x0][0x0], R0 ; /* 0x0000000003007a24 */ /* 0x001fca00078e0200 */ /*0060*/ ISETP.GT.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */ /* 0x000fe20003f04270 */ /*0070*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */ /* 0x002fca00078e0205 */ /*0080*/ ISETP.GT.OR P0, PT, R3, 0x2, P0 ; /* 0x000000020300780c */ /* 0x000fda0000704670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*00b0*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */ /* 0x000fe200078e0200 */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fc800078e0207 */ /*00e0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x0c0fe400078e0207 */ /*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */ /* 0x000fe200078e0207 */ /*0120*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */ /* 0x004fca0007ffe0ff */ /*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*0140*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0150*/ BRA 0x150; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z15traspose_sharedPiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */ /* 0x000e280000002600 */ /*0020*/ S2R R4, SR_TID.Y ; /* 0x0000000000047919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R5, R5, c[0x0][0x4], R4 ; /* 0x0000010005057a24 */ /* 0x001fca00078e0204 */ /*0060*/ ISETP.GT.AND P0, PT, R5, 0x1, PT ; /* 0x000000010500780c */ /* 0x000fe20003f04270 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0207 */ /*0080*/ ISETP.GT.OR P0, PT, R0, 0x3, P0 ; /* 0x000000030000780c */ /* 0x000fda0000704670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */ /* 0x000fe200000001ff */ /*00b0*/ LEA R2, R0, R5, 0x1 ; /* 0x0000000500027211 */ /* 0x000fe200078e08ff */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE R2, R2, R6, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fcc00078e0206 */ /*00e0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ IMAD R9, R7, 0x21, R4 ; /* 0x0000002107097824 */ /* 0x000fe200078e0204 */ /*0100*/ LEA R5, R5, R0, 0x2 ; /* 0x0000000005057211 */ /* 0x000fca00078e10ff */ /*0110*/ IMAD.WIDE R4, R5, R6, c[0x0][0x168] ; /* 0x00005a0005047625 */ /* 0x000fe200078e0206 */ /*0120*/ STS [R9.X4], R2 ; /* 0x0000000209007388 */ /* 0x004fe80000004800 */ /*0130*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0140*/ LDS R7, [R9.X4] ; /* 0x0000000009077984 */ /* 0x000e280000004800 */ /*0150*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x001fe2000c101904 */ /*0160*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0170*/ BRA 0x170; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z8trasposePiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */ /* 0x000e280000002600 */ /*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R5, R5, c[0x0][0x4], R2 ; /* 0x0000010005057a24 */ /* 0x001fca00078e0202 */ /*0060*/ ISETP.GT.AND P0, PT, R5, 0x1, PT ; /* 0x000000010500780c */ /* 0x000fe20003f04270 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0203 */ /*0080*/ ISETP.GT.OR P0, PT, R0, 0x3, P0 ; /* 0x000000030000780c */ /* 0x000fda0000704670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */ /* 0x000fe200000001ff */ /*00b0*/ LEA R2, R5, R0, 0x2 ; /* 0x0000000005027211 */ /* 0x000fe200078e10ff */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE R2, R2, R4, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fcc00078e0204 */ /*00e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ LEA R5, R0, R5, 0x1 ; /* 0x0000000500057211 */ /* 0x000fca00078e08ff */ /*0100*/ IMAD.WIDE R4, R5, R4, c[0x0][0x168] ; /* 0x00005a0005047625 */ /* 0x000fca00078e0204 */ /*0110*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe2000c101904 */ /*0120*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0130*/ BRA 0x130; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z11copy_sharedPiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002600 */ /*0020*/ S2R R4, SR_TID.Y ; /* 0x0000000000047919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R3, R3, c[0x0][0x4], R4 ; /* 0x0000010003037a24 */ /* 0x001fca00078e0204 */ /*0060*/ ISETP.GT.AND P0, PT, R3, 0x1, PT ; /* 0x000000010300780c */ /* 0x000fe20003f04270 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0205 */ /*0080*/ ISETP.GT.OR P0, PT, R0, 0x3, P0 ; /* 0x000000030000780c */ /* 0x000fda0000704670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R11, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0b7435 */ /* 0x000fe200000001ff */ /*00b0*/ LEA R0, R3, R0, 0x2 ; /* 0x0000000003007211 */ /* 0x000fe200078e10ff */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE R2, R0, R11, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e020b */ /*00e0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ IMAD R9, R5, 0x21, R4 ; /* 0x0000002105097824 */ /* 0x000fe400078e0204 */ /*0100*/ IMAD.WIDE R4, R0, R11, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fc600078e020b */ /*0110*/ STS [R9.X4], R2 ; /* 0x0000000209007388 */ /* 0x004fe80000004800 */ /*0120*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0130*/ LDS R7, [R9.X4] ; /* 0x0000000009077984 */ /* 0x000e280000004800 */ /*0140*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x001fe2000c101904 */ /*0150*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0160*/ BRA 0x160; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z4copyPiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002600 */ /*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */ /* 0x001fca00078e0202 */ /*0060*/ ISETP.GT.AND P0, PT, R3, 0x1, PT ; /* 0x000000010300780c */ /* 0x000fe20003f04270 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0205 */ /*0080*/ ISETP.GT.OR P0, PT, R0, 0x3, P0 ; /* 0x000000030000780c */ /* 0x000fda0000704670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*00b0*/ LEA R0, R3, R0, 0x2 ; /* 0x0000000003007211 */ /* 0x000fe200078e10ff */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e0205 */ /*00e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fca00078e0205 */ /*0100*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe2000c101904 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <time.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <errno.h> #include <cuda.h> #define THREADS 32 #define WIDTH 4 #define HEIGHT 2 // See times between copy and traspose and realize the same operation // with shared memory. // Traspose is slower than copy... // Traspose is need to shared memory to improve the performance... // https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ __global__ void copy(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idy * WIDTH + idx] = src[idy * WIDTH + idx]; // Copio tal cual con los mismos indices facil... :) } __global__ void copy_shared(int *src, int *dst) { __shared__ int mem[THREADS][THREADS + 1]; int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; mem[threadIdx.x][threadIdx.y] = src[idy * WIDTH + idx]; // Añado el valor en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la memoria // compartida... } __global__ void traspose(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idx * HEIGHT + idy] = src[idy * WIDTH + idx]; // Cambio el valor de la matriz a la traspuesta // con los índices de acceso a la matriz... } __global__ void traspose_shared(int *src, int *dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; __shared__ int mem[THREADS][THREADS + 1]; mem[threadIdx.x][threadIdx.y] = src[idx * HEIGHT + idy]; // Hago las posiciones traspuestas // en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la shared // que tiene el valor de la traspuesta.... } __global__ void matrixAddPitch (int *a, int *b, int*c, int pitch) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx > pitch || idy > HEIGHT) return; c[idy * pitch + idx] = a[idy * pitch + idx] + b[idy * pitch + idx]; } unsigned long get_time() { struct timespec ts; if (clock_gettime(0, &ts) < 0) { fprintf(stderr, "Error calc time... %s\n", strerror(errno)); exit(1); } return ts.tv_sec * 1000000000L + ts.tv_nsec; } void mi_malloc_int(int **i, int size) { *i = (int *) malloc( sizeof(int) * size); if (*i == NULL) { fprintf(stderr, "Error malloc %s\n", strerror(errno)); exit(1); } memset(*i, 0, sizeof(int) * size); } void init(int *h_v) { for (int i = 0; i < HEIGHT; i++) { for (int j = 0; j < WIDTH; ++j) { h_v[i * WIDTH + j] = i * WIDTH + j; } } } void print_matrix(const int *matrix, const int w, const int h) { fprintf(stdout, "%s\n", "Print matrix..."); for (int i = 0; i < h; i++) { for (int j = 0; j < w; ++j) { fprintf(stdout, "%5d", matrix[i * w + j]); } fprintf(stdout, "%s\n", ""); } fprintf(stdout, "%s\n", ""); } void addPitch() { int n = WIDTH * HEIGHT; dim3 t (16, 16); dim3 b ( (WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = sizeof(int) * n; size_t pitch; h_a = (int *) malloc (size); h_b = (int *) malloc (size); h_c = (int *) malloc (size); for (int i = 0; i < n; i++) { h_a[i] = i; h_b[i] = i; } cudaMallocPitch(&d_a, &pitch, WIDTH * sizeof(int), HEIGHT); cudaMallocPitch(&d_b, &pitch, WIDTH * sizeof(int), HEIGHT); cudaMallocPitch(&d_c, &pitch, WIDTH * sizeof(int), HEIGHT); cudaMemcpy2D (d_a, pitch, h_a, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, cudaMemcpyHostToDevice); cudaMemcpy2D (d_b, pitch, h_b, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, cudaMemcpyHostToDevice); matrixAddPitch <<<b, t>>> (d_a, d_b, d_c, pitch / sizeof(int)); cudaMemcpy2D (h_c, WIDTH * sizeof(int), d_c, pitch, WIDTH * sizeof(int), HEIGHT, cudaMemcpyDeviceToHost); print_matrix(h_c, HEIGHT, WIDTH); free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } void traspose() { int *matrix = NULL; int *dev_matrix = NULL; int *dev_traspose = NULL; mi_malloc_int(&matrix, WIDTH * HEIGHT); init(matrix); print_matrix(matrix, WIDTH, HEIGHT); cudaMalloc(&dev_matrix, sizeof(int) * WIDTH * HEIGHT); // &dst, size... cudaMemcpy(dev_matrix, matrix, sizeof(int) * WIDTH * HEIGHT, cudaMemcpyHostToDevice); // dst, src, size, cudaMemcpyHostToDevice...; cudaMalloc(&dev_traspose, sizeof(int) * WIDTH * HEIGHT); // &dst, size... cudaMemset(dev_traspose, 0, sizeof(int) * WIDTH * HEIGHT); // dst, value byte 0, size... dim3 t(THREADS, THREADS); dim3 b((WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); // ... START PARARELL CODE ... unsigned long now = get_time(); traspose<<<b, t>>>(dev_matrix, dev_traspose); // Call kernel << b , t >> (a , b); cudaMemcpy(matrix, dev_traspose, sizeof(int) * WIDTH * HEIGHT, cudaMemcpyDeviceToHost); // dest, src, size, cudaMemcpyDeviceToHost; fprintf(stdout, "Time : %lf ms\n", (get_time() - now) / 1000000.0f); // ... END PARARELL CODE ... print_matrix(matrix, HEIGHT, WIDTH); fprintf(stdout, "Num Blocks (x:%d y:%d)\n", b.x, b.y); } int main(int argc, char const *argv[]) { traspose(); return 0; }
.file "tmpxft_00114244_00000000-6_traspose.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2066: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2066: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Error calc time... %s\n" .text .globl _Z8get_timev .type _Z8get_timev, @function _Z8get_timev: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movq %fs:40, %rax movq %rax, 24(%rsp) xorl %eax, %eax movq %rsp, %rsi movl $0, %edi call clock_gettime@PLT testl %eax, %eax js .L7 imulq $1000000000, (%rsp), %rax addq 8(%rsp), %rax movq 24(%rsp), %rdx subq %fs:40, %rdx jne .L8 addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state call __errno_location@PLT movl (%rax), %edi call strerror@PLT movq %rax, %rcx leaq .LC0(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z8get_timev, .-_Z8get_timev .section .rodata.str1.1 .LC1: .string "Error malloc %s\n" .text .globl _Z13mi_malloc_intPPii .type _Z13mi_malloc_intPPii, @function _Z13mi_malloc_intPPii: .LFB2058: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movq %rdi, %rbp movslq %esi, %rbx salq $2, %rbx movq %rbx, %rdi call malloc@PLT movq %rax, 0(%rbp) testq %rax, %rax je .L12 movq %rax, %rdi movq %rbx, %rcx movq %rbx, %rdx movl $0, %esi call __memset_chk@PLT addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L12: .cfi_restore_state call __errno_location@PLT movl (%rax), %edi call strerror@PLT movq %rax, %rcx leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .cfi_endproc .LFE2058: .size _Z13mi_malloc_intPPii, .-_Z13mi_malloc_intPPii .globl _Z4initPi .type _Z4initPi, @function _Z4initPi: .LFB2059: .cfi_startproc endbr64 movl $0, (%rdi) movl $1, 4(%rdi) movl $2, 8(%rdi) movl $3, 12(%rdi) movl $0, %eax .L14: leal 4(%rax), %edx movl %edx, 16(%rdi,%rax,4) addq $1, %rax cmpq $4, %rax jne .L14 ret .cfi_endproc .LFE2059: .size _Z4initPi, .-_Z4initPi .section .rodata.str1.1 .LC2: .string "Print matrix..." .LC3: .string "%s\n" .LC4: .string "%5d" .LC5: .string "" .text .globl _Z12print_matrixPKiii .type _Z12print_matrixPKiii, @function _Z12print_matrixPKiii: .LFB2060: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $40, %rsp .cfi_def_cfa_offset 96 movq %rdi, 16(%rsp) movl %esi, %r15d movl %edx, %ebx movl %edx, 12(%rsp) leaq .LC2(%rip), %rcx leaq .LC3(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT testl %ebx, %ebx jle .L17 movl $0, %r14d movl $0, %r13d movslq %r15d, %rax movq %rax, 24(%rsp) leaq .LC4(%rip), %r12 jmp .L18 .L20: movslq %r14d, %rax movq 16(%rsp), %rsi leaq (%rsi,%rax,4), %rbx movq 24(%rsp), %rdi addq %rdi, %rax leaq (%rsi,%rax,4), %rbp .L19: movl (%rbx), %ecx movq %r12, %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT addq $4, %rbx cmpq %rbp, %rbx jne .L19 .L21: leaq .LC5(%rip), %rcx leaq .LC3(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT addl $1, %r13d addl %r15d, %r14d cmpl %r13d, 12(%rsp) je .L17 .L18: testl %r15d, %r15d jg .L20 jmp .L21 .L17: leaq .LC5(%rip), %rcx leaq .LC3(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _Z12print_matrixPKiii, .-_Z12print_matrixPKiii .globl _Z25__device_stub__Z4copyPiS_PiS_ .type _Z25__device_stub__Z4copyPiS_PiS_, @function _Z25__device_stub__Z4copyPiS_PiS_: .LFB2088: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L28 .L24: movq 104(%rsp), %rax subq %fs:40, %rax jne .L29 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L28: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z4copyPiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L24 .L29: call __stack_chk_fail@PLT .cfi_endproc .LFE2088: .size _Z25__device_stub__Z4copyPiS_PiS_, .-_Z25__device_stub__Z4copyPiS_PiS_ .globl _Z4copyPiS_ .type _Z4copyPiS_, @function _Z4copyPiS_: .LFB2089: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z25__device_stub__Z4copyPiS_PiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _Z4copyPiS_, .-_Z4copyPiS_ .globl _Z33__device_stub__Z11copy_sharedPiS_PiS_ .type _Z33__device_stub__Z11copy_sharedPiS_PiS_, @function _Z33__device_stub__Z11copy_sharedPiS_PiS_: .LFB2090: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L36 .L32: movq 104(%rsp), %rax subq %fs:40, %rax jne .L37 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z11copy_sharedPiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L32 .L37: call __stack_chk_fail@PLT .cfi_endproc .LFE2090: .size _Z33__device_stub__Z11copy_sharedPiS_PiS_, .-_Z33__device_stub__Z11copy_sharedPiS_PiS_ .globl _Z11copy_sharedPiS_ .type _Z11copy_sharedPiS_, @function _Z11copy_sharedPiS_: .LFB2091: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z33__device_stub__Z11copy_sharedPiS_PiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2091: .size _Z11copy_sharedPiS_, .-_Z11copy_sharedPiS_ .globl _Z29__device_stub__Z8trasposePiS_PiS_ .type _Z29__device_stub__Z8trasposePiS_PiS_, @function _Z29__device_stub__Z8trasposePiS_PiS_: .LFB2092: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L44 .L40: movq 104(%rsp), %rax subq %fs:40, %rax jne .L45 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L44: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z8trasposePiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L40 .L45: call __stack_chk_fail@PLT .cfi_endproc .LFE2092: .size _Z29__device_stub__Z8trasposePiS_PiS_, .-_Z29__device_stub__Z8trasposePiS_PiS_ .globl _Z8trasposePiS_ .type _Z8trasposePiS_, @function _Z8trasposePiS_: .LFB2093: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z8trasposePiS_PiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2093: .size _Z8trasposePiS_, .-_Z8trasposePiS_ .section .rodata.str1.1 .LC7: .string "Time : %lf ms\n" .LC8: .string "Num Blocks (x:%d y:%d)\n" .text .globl _Z8trasposev .type _Z8trasposev, @function _Z8trasposev: .LFB2062: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $72, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movq $0, 16(%rsp) movq $0, 24(%rsp) leaq 8(%rsp), %rdi movl $8, %esi call _Z13mi_malloc_intPPii movq 8(%rsp), %rbx movq %rbx, %rdi call _Z4initPi movl $2, %edx movl $4, %esi movq %rbx, %rdi call _Z12print_matrixPKiii leaq 16(%rsp), %rdi movl $32, %esi call cudaMalloc@PLT movl $1, %ecx movl $32, %edx movq %rbx, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT leaq 24(%rsp), %rdi movl $32, %esi call cudaMalloc@PLT movl $32, %edx movl $0, %esi movq 24(%rsp), %rdi call cudaMemset@PLT movl $1, 40(%rsp) movl $1, 52(%rsp) call _Z8get_timev movq %rax, %rbp movl $1, 44(%rsp) movl $1, 48(%rsp) movl $32, 32(%rsp) movl $32, 36(%rsp) movl 40(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 32(%rsp), %rdx movq 44(%rsp), %rdi movl 52(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L54 .L49: movl $2, %ecx movl $32, %edx movq 24(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT call _Z8get_timev subq %rbp, %rax js .L50 pxor %xmm0, %xmm0 cvtsi2ssq %rax, %xmm0 .L51: divss .LC6(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 leaq .LC7(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $1, %eax call __fprintf_chk@PLT movl $4, %edx movl $2, %esi movq %rbx, %rdi call _Z12print_matrixPKiii movl $1, %r8d movl $1, %ecx leaq .LC8(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L55 addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L54: .cfi_restore_state movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z29__device_stub__Z8trasposePiS_PiS_ jmp .L49 .L50: movq %rax, %rdx shrq %rdx andl $1, %eax orq %rax, %rdx pxor %xmm0, %xmm0 cvtsi2ssq %rdx, %xmm0 addss %xmm0, %xmm0 jmp .L51 .L55: call __stack_chk_fail@PLT .cfi_endproc .LFE2062: .size _Z8trasposev, .-_Z8trasposev .globl main .type main, @function main: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z8trasposev movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size main, .-main .globl _Z37__device_stub__Z15traspose_sharedPiS_PiS_ .type _Z37__device_stub__Z15traspose_sharedPiS_PiS_, @function _Z37__device_stub__Z15traspose_sharedPiS_PiS_: .LFB2094: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L62 .L58: movq 104(%rsp), %rax subq %fs:40, %rax jne .L63 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L62: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z15traspose_sharedPiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L58 .L63: call __stack_chk_fail@PLT .cfi_endproc .LFE2094: .size _Z37__device_stub__Z15traspose_sharedPiS_PiS_, .-_Z37__device_stub__Z15traspose_sharedPiS_PiS_ .globl _Z15traspose_sharedPiS_ .type _Z15traspose_sharedPiS_, @function _Z15traspose_sharedPiS_: .LFB2095: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z15traspose_sharedPiS_PiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2095: .size _Z15traspose_sharedPiS_, .-_Z15traspose_sharedPiS_ .globl _Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i .type _Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i, @function _Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i: .LFB2096: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L70 .L66: movq 136(%rsp), %rax subq %fs:40, %rax jne .L71 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L70: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14matrixAddPitchPiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L66 .L71: call __stack_chk_fail@PLT .cfi_endproc .LFE2096: .size _Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i, .-_Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i .globl _Z14matrixAddPitchPiS_S_i .type _Z14matrixAddPitchPiS_S_i, @function _Z14matrixAddPitchPiS_S_i: .LFB2097: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2097: .size _Z14matrixAddPitchPiS_S_i, .-_Z14matrixAddPitchPiS_S_i .globl _Z8addPitchv .type _Z8addPitchv, @function _Z8addPitchv: .LFB2061: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $72, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $32, %edi call malloc@PLT movq %rax, %rbp movl $32, %edi call malloc@PLT movq %rax, %rbx movl $32, %edi call malloc@PLT movq %rax, %r12 movl $0, %eax .L75: movl %eax, 0(%rbp,%rax,4) movl %eax, (%rbx,%rax,4) addq $1, %rax cmpq $8, %rax jne .L75 leaq 24(%rsp), %r13 movq %rsp, %rdi movl $2, %ecx movl $16, %edx movq %r13, %rsi call cudaMallocPitch@PLT leaq 8(%rsp), %rdi movl $2, %ecx movl $16, %edx movq %r13, %rsi call cudaMallocPitch@PLT leaq 16(%rsp), %rdi movl $2, %ecx movl $16, %edx movq %r13, %rsi call cudaMallocPitch@PLT subq $8, %rsp .cfi_def_cfa_offset 120 pushq $1 .cfi_def_cfa_offset 128 movl $2, %r9d movl $16, %r8d movl $16, %ecx movq %rbp, %rdx movq 40(%rsp), %rsi movq 16(%rsp), %rdi call cudaMemcpy2D@PLT movl $1, (%rsp) movl $2, %r9d movl $16, %r8d movl $16, %ecx movq %rbx, %rdx movq 40(%rsp), %rsi movq 24(%rsp), %rdi call cudaMemcpy2D@PLT movl $16, 48(%rsp) movl $16, 52(%rsp) addq $16, %rsp .cfi_def_cfa_offset 112 movl 40(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 32(%rsp), %rdx movq 44(%rsp), %rdi movl 52(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L80 .L76: subq $8, %rsp .cfi_def_cfa_offset 120 pushq $2 .cfi_def_cfa_offset 128 movl $2, %r9d movl $16, %r8d movq 40(%rsp), %rcx movq 32(%rsp), %rdx movl $16, %esi movq %r12, %rdi call cudaMemcpy2D@PLT addq $16, %rsp .cfi_def_cfa_offset 112 movl $4, %edx movl $2, %esi movq %r12, %rdi call _Z12print_matrixPKiii movq %rbp, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq %r12, %rdi call free@PLT movq (%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L81 addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L80: .cfi_restore_state movq 24(%rsp), %rcx shrq $2, %rcx movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i jmp .L76 .L81: call __stack_chk_fail@PLT .cfi_endproc .LFE2061: .size _Z8addPitchv, .-_Z8addPitchv .section .rodata.str1.1 .LC9: .string "_Z14matrixAddPitchPiS_S_i" .LC10: .string "_Z15traspose_sharedPiS_" .LC11: .string "_Z8trasposePiS_" .LC12: .string "_Z11copy_sharedPiS_" .LC13: .string "_Z4copyPiS_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2099: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z14matrixAddPitchPiS_S_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC10(%rip), %rdx movq %rdx, %rcx leaq _Z15traspose_sharedPiS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC11(%rip), %rdx movq %rdx, %rcx leaq _Z8trasposePiS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC12(%rip), %rdx movq %rdx, %rcx leaq _Z11copy_sharedPiS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC13(%rip), %rdx movq %rdx, %rcx leaq _Z4copyPiS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2099: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC6: .long 1232348160 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <time.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <errno.h> #include <cuda.h> #define THREADS 32 #define WIDTH 4 #define HEIGHT 2 // See times between copy and traspose and realize the same operation // with shared memory. // Traspose is slower than copy... // Traspose is need to shared memory to improve the performance... // https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ __global__ void copy(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idy * WIDTH + idx] = src[idy * WIDTH + idx]; // Copio tal cual con los mismos indices facil... :) } __global__ void copy_shared(int *src, int *dst) { __shared__ int mem[THREADS][THREADS + 1]; int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; mem[threadIdx.x][threadIdx.y] = src[idy * WIDTH + idx]; // Añado el valor en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la memoria // compartida... } __global__ void traspose(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idx * HEIGHT + idy] = src[idy * WIDTH + idx]; // Cambio el valor de la matriz a la traspuesta // con los índices de acceso a la matriz... } __global__ void traspose_shared(int *src, int *dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; __shared__ int mem[THREADS][THREADS + 1]; mem[threadIdx.x][threadIdx.y] = src[idx * HEIGHT + idy]; // Hago las posiciones traspuestas // en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la shared // que tiene el valor de la traspuesta.... } __global__ void matrixAddPitch (int *a, int *b, int*c, int pitch) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx > pitch || idy > HEIGHT) return; c[idy * pitch + idx] = a[idy * pitch + idx] + b[idy * pitch + idx]; } unsigned long get_time() { struct timespec ts; if (clock_gettime(0, &ts) < 0) { fprintf(stderr, "Error calc time... %s\n", strerror(errno)); exit(1); } return ts.tv_sec * 1000000000L + ts.tv_nsec; } void mi_malloc_int(int **i, int size) { *i = (int *) malloc( sizeof(int) * size); if (*i == NULL) { fprintf(stderr, "Error malloc %s\n", strerror(errno)); exit(1); } memset(*i, 0, sizeof(int) * size); } void init(int *h_v) { for (int i = 0; i < HEIGHT; i++) { for (int j = 0; j < WIDTH; ++j) { h_v[i * WIDTH + j] = i * WIDTH + j; } } } void print_matrix(const int *matrix, const int w, const int h) { fprintf(stdout, "%s\n", "Print matrix..."); for (int i = 0; i < h; i++) { for (int j = 0; j < w; ++j) { fprintf(stdout, "%5d", matrix[i * w + j]); } fprintf(stdout, "%s\n", ""); } fprintf(stdout, "%s\n", ""); } void addPitch() { int n = WIDTH * HEIGHT; dim3 t (16, 16); dim3 b ( (WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = sizeof(int) * n; size_t pitch; h_a = (int *) malloc (size); h_b = (int *) malloc (size); h_c = (int *) malloc (size); for (int i = 0; i < n; i++) { h_a[i] = i; h_b[i] = i; } cudaMallocPitch(&d_a, &pitch, WIDTH * sizeof(int), HEIGHT); cudaMallocPitch(&d_b, &pitch, WIDTH * sizeof(int), HEIGHT); cudaMallocPitch(&d_c, &pitch, WIDTH * sizeof(int), HEIGHT); cudaMemcpy2D (d_a, pitch, h_a, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, cudaMemcpyHostToDevice); cudaMemcpy2D (d_b, pitch, h_b, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, cudaMemcpyHostToDevice); matrixAddPitch <<<b, t>>> (d_a, d_b, d_c, pitch / sizeof(int)); cudaMemcpy2D (h_c, WIDTH * sizeof(int), d_c, pitch, WIDTH * sizeof(int), HEIGHT, cudaMemcpyDeviceToHost); print_matrix(h_c, HEIGHT, WIDTH); free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } void traspose() { int *matrix = NULL; int *dev_matrix = NULL; int *dev_traspose = NULL; mi_malloc_int(&matrix, WIDTH * HEIGHT); init(matrix); print_matrix(matrix, WIDTH, HEIGHT); cudaMalloc(&dev_matrix, sizeof(int) * WIDTH * HEIGHT); // &dst, size... cudaMemcpy(dev_matrix, matrix, sizeof(int) * WIDTH * HEIGHT, cudaMemcpyHostToDevice); // dst, src, size, cudaMemcpyHostToDevice...; cudaMalloc(&dev_traspose, sizeof(int) * WIDTH * HEIGHT); // &dst, size... cudaMemset(dev_traspose, 0, sizeof(int) * WIDTH * HEIGHT); // dst, value byte 0, size... dim3 t(THREADS, THREADS); dim3 b((WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); // ... START PARARELL CODE ... unsigned long now = get_time(); traspose<<<b, t>>>(dev_matrix, dev_traspose); // Call kernel << b , t >> (a , b); cudaMemcpy(matrix, dev_traspose, sizeof(int) * WIDTH * HEIGHT, cudaMemcpyDeviceToHost); // dest, src, size, cudaMemcpyDeviceToHost; fprintf(stdout, "Time : %lf ms\n", (get_time() - now) / 1000000.0f); // ... END PARARELL CODE ... print_matrix(matrix, HEIGHT, WIDTH); fprintf(stdout, "Num Blocks (x:%d y:%d)\n", b.x, b.y); } int main(int argc, char const *argv[]) { traspose(); return 0; }
#include <time.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <errno.h> #include <hip/hip_runtime.h> #define THREADS 32 #define WIDTH 4 #define HEIGHT 2 // See times between copy and traspose and realize the same operation // with shared memory. // Traspose is slower than copy... // Traspose is need to shared memory to improve the performance... // https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ __global__ void copy(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idy * WIDTH + idx] = src[idy * WIDTH + idx]; // Copio tal cual con los mismos indices facil... :) } __global__ void copy_shared(int *src, int *dst) { __shared__ int mem[THREADS][THREADS + 1]; int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; mem[threadIdx.x][threadIdx.y] = src[idy * WIDTH + idx]; // Añado el valor en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la memoria // compartida... } __global__ void traspose(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idx * HEIGHT + idy] = src[idy * WIDTH + idx]; // Cambio el valor de la matriz a la traspuesta // con los índices de acceso a la matriz... } __global__ void traspose_shared(int *src, int *dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; __shared__ int mem[THREADS][THREADS + 1]; mem[threadIdx.x][threadIdx.y] = src[idx * HEIGHT + idy]; // Hago las posiciones traspuestas // en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la shared // que tiene el valor de la traspuesta.... } __global__ void matrixAddPitch (int *a, int *b, int*c, int pitch) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx > pitch || idy > HEIGHT) return; c[idy * pitch + idx] = a[idy * pitch + idx] + b[idy * pitch + idx]; } unsigned long get_time() { struct timespec ts; if (clock_gettime(0, &ts) < 0) { fprintf(stderr, "Error calc time... %s\n", strerror(errno)); exit(1); } return ts.tv_sec * 1000000000L + ts.tv_nsec; } void mi_malloc_int(int **i, int size) { *i = (int *) malloc( sizeof(int) * size); if (*i == NULL) { fprintf(stderr, "Error malloc %s\n", strerror(errno)); exit(1); } memset(*i, 0, sizeof(int) * size); } void init(int *h_v) { for (int i = 0; i < HEIGHT; i++) { for (int j = 0; j < WIDTH; ++j) { h_v[i * WIDTH + j] = i * WIDTH + j; } } } void print_matrix(const int *matrix, const int w, const int h) { fprintf(stdout, "%s\n", "Print matrix..."); for (int i = 0; i < h; i++) { for (int j = 0; j < w; ++j) { fprintf(stdout, "%5d", matrix[i * w + j]); } fprintf(stdout, "%s\n", ""); } fprintf(stdout, "%s\n", ""); } void addPitch() { int n = WIDTH * HEIGHT; dim3 t (16, 16); dim3 b ( (WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = sizeof(int) * n; size_t pitch; h_a = (int *) malloc (size); h_b = (int *) malloc (size); h_c = (int *) malloc (size); for (int i = 0; i < n; i++) { h_a[i] = i; h_b[i] = i; } hipMallocPitch(&d_a, &pitch, WIDTH * sizeof(int), HEIGHT); hipMallocPitch(&d_b, &pitch, WIDTH * sizeof(int), HEIGHT); hipMallocPitch(&d_c, &pitch, WIDTH * sizeof(int), HEIGHT); hipMemcpy2D (d_a, pitch, h_a, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, hipMemcpyHostToDevice); hipMemcpy2D (d_b, pitch, h_b, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, hipMemcpyHostToDevice); matrixAddPitch <<<b, t>>> (d_a, d_b, d_c, pitch / sizeof(int)); hipMemcpy2D (h_c, WIDTH * sizeof(int), d_c, pitch, WIDTH * sizeof(int), HEIGHT, hipMemcpyDeviceToHost); print_matrix(h_c, HEIGHT, WIDTH); free(h_a); free(h_b); free(h_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); } void traspose() { int *matrix = NULL; int *dev_matrix = NULL; int *dev_traspose = NULL; mi_malloc_int(&matrix, WIDTH * HEIGHT); init(matrix); print_matrix(matrix, WIDTH, HEIGHT); hipMalloc(&dev_matrix, sizeof(int) * WIDTH * HEIGHT); // &dst, size... hipMemcpy(dev_matrix, matrix, sizeof(int) * WIDTH * HEIGHT, hipMemcpyHostToDevice); // dst, src, size, cudaMemcpyHostToDevice...; hipMalloc(&dev_traspose, sizeof(int) * WIDTH * HEIGHT); // &dst, size... hipMemset(dev_traspose, 0, sizeof(int) * WIDTH * HEIGHT); // dst, value byte 0, size... dim3 t(THREADS, THREADS); dim3 b((WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); // ... START PARARELL CODE ... unsigned long now = get_time(); traspose<<<b, t>>>(dev_matrix, dev_traspose); // Call kernel << b , t >> (a , b); hipMemcpy(matrix, dev_traspose, sizeof(int) * WIDTH * HEIGHT, hipMemcpyDeviceToHost); // dest, src, size, cudaMemcpyDeviceToHost; fprintf(stdout, "Time : %lf ms\n", (get_time() - now) / 1000000.0f); // ... END PARARELL CODE ... print_matrix(matrix, HEIGHT, WIDTH); fprintf(stdout, "Num Blocks (x:%d y:%d)\n", b.x, b.y); } int main(int argc, char const *argv[]) { traspose(); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <time.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <errno.h> #include <hip/hip_runtime.h> #define THREADS 32 #define WIDTH 4 #define HEIGHT 2 // See times between copy and traspose and realize the same operation // with shared memory. // Traspose is slower than copy... // Traspose is need to shared memory to improve the performance... // https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ __global__ void copy(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idy * WIDTH + idx] = src[idy * WIDTH + idx]; // Copio tal cual con los mismos indices facil... :) } __global__ void copy_shared(int *src, int *dst) { __shared__ int mem[THREADS][THREADS + 1]; int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; mem[threadIdx.x][threadIdx.y] = src[idy * WIDTH + idx]; // Añado el valor en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la memoria // compartida... } __global__ void traspose(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idx * HEIGHT + idy] = src[idy * WIDTH + idx]; // Cambio el valor de la matriz a la traspuesta // con los índices de acceso a la matriz... } __global__ void traspose_shared(int *src, int *dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; __shared__ int mem[THREADS][THREADS + 1]; mem[threadIdx.x][threadIdx.y] = src[idx * HEIGHT + idy]; // Hago las posiciones traspuestas // en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la shared // que tiene el valor de la traspuesta.... } __global__ void matrixAddPitch (int *a, int *b, int*c, int pitch) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx > pitch || idy > HEIGHT) return; c[idy * pitch + idx] = a[idy * pitch + idx] + b[idy * pitch + idx]; } unsigned long get_time() { struct timespec ts; if (clock_gettime(0, &ts) < 0) { fprintf(stderr, "Error calc time... %s\n", strerror(errno)); exit(1); } return ts.tv_sec * 1000000000L + ts.tv_nsec; } void mi_malloc_int(int **i, int size) { *i = (int *) malloc( sizeof(int) * size); if (*i == NULL) { fprintf(stderr, "Error malloc %s\n", strerror(errno)); exit(1); } memset(*i, 0, sizeof(int) * size); } void init(int *h_v) { for (int i = 0; i < HEIGHT; i++) { for (int j = 0; j < WIDTH; ++j) { h_v[i * WIDTH + j] = i * WIDTH + j; } } } void print_matrix(const int *matrix, const int w, const int h) { fprintf(stdout, "%s\n", "Print matrix..."); for (int i = 0; i < h; i++) { for (int j = 0; j < w; ++j) { fprintf(stdout, "%5d", matrix[i * w + j]); } fprintf(stdout, "%s\n", ""); } fprintf(stdout, "%s\n", ""); } void addPitch() { int n = WIDTH * HEIGHT; dim3 t (16, 16); dim3 b ( (WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = sizeof(int) * n; size_t pitch; h_a = (int *) malloc (size); h_b = (int *) malloc (size); h_c = (int *) malloc (size); for (int i = 0; i < n; i++) { h_a[i] = i; h_b[i] = i; } hipMallocPitch(&d_a, &pitch, WIDTH * sizeof(int), HEIGHT); hipMallocPitch(&d_b, &pitch, WIDTH * sizeof(int), HEIGHT); hipMallocPitch(&d_c, &pitch, WIDTH * sizeof(int), HEIGHT); hipMemcpy2D (d_a, pitch, h_a, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, hipMemcpyHostToDevice); hipMemcpy2D (d_b, pitch, h_b, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, hipMemcpyHostToDevice); matrixAddPitch <<<b, t>>> (d_a, d_b, d_c, pitch / sizeof(int)); hipMemcpy2D (h_c, WIDTH * sizeof(int), d_c, pitch, WIDTH * sizeof(int), HEIGHT, hipMemcpyDeviceToHost); print_matrix(h_c, HEIGHT, WIDTH); free(h_a); free(h_b); free(h_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); } void traspose() { int *matrix = NULL; int *dev_matrix = NULL; int *dev_traspose = NULL; mi_malloc_int(&matrix, WIDTH * HEIGHT); init(matrix); print_matrix(matrix, WIDTH, HEIGHT); hipMalloc(&dev_matrix, sizeof(int) * WIDTH * HEIGHT); // &dst, size... hipMemcpy(dev_matrix, matrix, sizeof(int) * WIDTH * HEIGHT, hipMemcpyHostToDevice); // dst, src, size, cudaMemcpyHostToDevice...; hipMalloc(&dev_traspose, sizeof(int) * WIDTH * HEIGHT); // &dst, size... hipMemset(dev_traspose, 0, sizeof(int) * WIDTH * HEIGHT); // dst, value byte 0, size... dim3 t(THREADS, THREADS); dim3 b((WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); // ... START PARARELL CODE ... unsigned long now = get_time(); traspose<<<b, t>>>(dev_matrix, dev_traspose); // Call kernel << b , t >> (a , b); hipMemcpy(matrix, dev_traspose, sizeof(int) * WIDTH * HEIGHT, hipMemcpyDeviceToHost); // dest, src, size, cudaMemcpyDeviceToHost; fprintf(stdout, "Time : %lf ms\n", (get_time() - now) / 1000000.0f); // ... END PARARELL CODE ... print_matrix(matrix, HEIGHT, WIDTH); fprintf(stdout, "Num Blocks (x:%d y:%d)\n", b.x, b.y); } int main(int argc, char const *argv[]) { traspose(); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z4copyPiS_ .globl _Z4copyPiS_ .p2align 8 .type _Z4copyPiS_,@function _Z4copyPiS_: s_load_b32 s2, s[0:1], 0x1c v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4] v_cmp_gt_i32_e32 vcc_lo, 4, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_i32_e64 s2, 2, v1 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_2 s_load_b128 s[0:3], s[0:1], 0x0 v_lshl_add_u32 v0, v1, 2, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo global_load_b32 v2, v[2:3], off s_waitcnt vmcnt(0) global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4copyPiS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z4copyPiS_, .Lfunc_end0-_Z4copyPiS_ .section .AMDGPU.csdata,"",@progbits .text .protected _Z11copy_sharedPiS_ .globl _Z11copy_sharedPiS_ .p2align 8 .type _Z11copy_sharedPiS_,@function _Z11copy_sharedPiS_: s_load_b32 s2, s[0:1], 0x1c v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 v_mad_u64_u32 v[2:3], null, s14, s3, v[1:2] v_mad_u64_u32 v[3:4], null, s15, s2, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, 4, v2 v_cmp_gt_i32_e64 s2, 2, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB1_2 s_load_b128 s[0:3], s[0:1], 0x0 v_lshl_add_u32 v2, v3, 2, v2 v_lshlrev_b32_e32 v0, 2, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v3, 31, v2 v_mad_u32_u24 v0, v1, 0x84, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) v_add_co_u32 v4, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v5, vcc_lo, s1, v3, vcc_lo global_load_b32 v4, v[4:5], off s_waitcnt vmcnt(0) ds_store_b32 v0, v4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b32 v4, v0 v_add_co_u32 v0, vcc_lo, s2, v2 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v3, vcc_lo s_waitcnt lgkmcnt(0) global_store_b32 v[0:1], v4, off .LBB1_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11copy_sharedPiS_ .amdhsa_group_segment_fixed_size 4224 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z11copy_sharedPiS_, .Lfunc_end1-_Z11copy_sharedPiS_ .section .AMDGPU.csdata,"",@progbits .text .protected _Z8trasposePiS_ .globl _Z8trasposePiS_ .p2align 8 .type _Z8trasposePiS_,@function _Z8trasposePiS_: s_load_b32 s2, s[0:1], 0x1c v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4] v_cmp_gt_i32_e32 vcc_lo, 4, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_i32_e64 s2, 2, v1 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB2_2 s_load_b128 s[0:3], s[0:1], 0x0 v_lshl_add_u32 v2, v1, 2, v0 v_lshl_add_u32 v0, v0, 1, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v3, 31, v2 v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[2:3], 2, v[2:3] v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v2, vcc_lo, s0, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v0, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo global_load_b32 v2, v[2:3], off s_waitcnt vmcnt(0) global_store_b32 v[0:1], v2, off .LBB2_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z8trasposePiS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z8trasposePiS_, .Lfunc_end2-_Z8trasposePiS_ .section .AMDGPU.csdata,"",@progbits .text .protected _Z15traspose_sharedPiS_ .globl _Z15traspose_sharedPiS_ .p2align 8 .type _Z15traspose_sharedPiS_,@function _Z15traspose_sharedPiS_: s_load_b32 s2, s[0:1], 0x1c v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 v_mad_u64_u32 v[2:3], null, s14, s3, v[1:2] v_mad_u64_u32 v[3:4], null, s15, s2, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, 4, v2 v_cmp_gt_i32_e64 s2, 2, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB3_2 s_load_b128 s[0:3], s[0:1], 0x0 v_lshl_add_u32 v4, v2, 1, v3 v_lshlrev_b32_e32 v0, 2, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v5, 31, v4 v_mad_u32_u24 v1, v1, 0x84, v0 v_lshl_add_u32 v0, v3, 2, v2 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] s_waitcnt lgkmcnt(0) v_add_co_u32 v4, vcc_lo, s0, v4 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo global_load_b32 v4, v[4:5], off s_waitcnt vmcnt(0) ds_store_b32 v1, v4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b32 v2, v1 v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[0:1] v_add_co_u32 v0, vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo s_waitcnt lgkmcnt(0) global_store_b32 v[0:1], v2, off .LBB3_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z15traspose_sharedPiS_ .amdhsa_group_segment_fixed_size 4224 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end3: .size _Z15traspose_sharedPiS_, .Lfunc_end3-_Z15traspose_sharedPiS_ .section .AMDGPU.csdata,"",@progbits .text .protected _Z14matrixAddPitchPiS_S_i .globl _Z14matrixAddPitchPiS_S_i .p2align 8 .type _Z14matrixAddPitchPiS_S_i,@function _Z14matrixAddPitchPiS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s4, s2, 0xffff s_lshr_b32 s2, s2, 16 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4] v_cmp_ge_i32_e32 vcc_lo, s3, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_i32_e64 s2, 3, v1 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s4, s2 s_cbranch_execz .LBB4_2 s_load_b128 s[4:7], s[0:1], 0x0 v_mad_u64_u32 v[2:3], null, v1, s3, v[0:1] s_load_b64 s[0:1], s[0:1], 0x10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[0:1], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, v3, v2 global_store_b32 v[0:1], v2, off .LBB4_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14matrixAddPitchPiS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end4: .size _Z14matrixAddPitchPiS_S_i, .Lfunc_end4-_Z14matrixAddPitchPiS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4copyPiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z4copyPiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 4224 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11copy_sharedPiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z11copy_sharedPiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z8trasposePiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z8trasposePiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 4224 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z15traspose_sharedPiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z15traspose_sharedPiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14matrixAddPitchPiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14matrixAddPitchPiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <time.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <errno.h> #include <hip/hip_runtime.h> #define THREADS 32 #define WIDTH 4 #define HEIGHT 2 // See times between copy and traspose and realize the same operation // with shared memory. // Traspose is slower than copy... // Traspose is need to shared memory to improve the performance... // https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ __global__ void copy(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idy * WIDTH + idx] = src[idy * WIDTH + idx]; // Copio tal cual con los mismos indices facil... :) } __global__ void copy_shared(int *src, int *dst) { __shared__ int mem[THREADS][THREADS + 1]; int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; mem[threadIdx.x][threadIdx.y] = src[idy * WIDTH + idx]; // Añado el valor en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la memoria // compartida... } __global__ void traspose(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idx * HEIGHT + idy] = src[idy * WIDTH + idx]; // Cambio el valor de la matriz a la traspuesta // con los índices de acceso a la matriz... } __global__ void traspose_shared(int *src, int *dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; __shared__ int mem[THREADS][THREADS + 1]; mem[threadIdx.x][threadIdx.y] = src[idx * HEIGHT + idy]; // Hago las posiciones traspuestas // en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la shared // que tiene el valor de la traspuesta.... } __global__ void matrixAddPitch (int *a, int *b, int*c, int pitch) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx > pitch || idy > HEIGHT) return; c[idy * pitch + idx] = a[idy * pitch + idx] + b[idy * pitch + idx]; } unsigned long get_time() { struct timespec ts; if (clock_gettime(0, &ts) < 0) { fprintf(stderr, "Error calc time... %s\n", strerror(errno)); exit(1); } return ts.tv_sec * 1000000000L + ts.tv_nsec; } void mi_malloc_int(int **i, int size) { *i = (int *) malloc( sizeof(int) * size); if (*i == NULL) { fprintf(stderr, "Error malloc %s\n", strerror(errno)); exit(1); } memset(*i, 0, sizeof(int) * size); } void init(int *h_v) { for (int i = 0; i < HEIGHT; i++) { for (int j = 0; j < WIDTH; ++j) { h_v[i * WIDTH + j] = i * WIDTH + j; } } } void print_matrix(const int *matrix, const int w, const int h) { fprintf(stdout, "%s\n", "Print matrix..."); for (int i = 0; i < h; i++) { for (int j = 0; j < w; ++j) { fprintf(stdout, "%5d", matrix[i * w + j]); } fprintf(stdout, "%s\n", ""); } fprintf(stdout, "%s\n", ""); } void addPitch() { int n = WIDTH * HEIGHT; dim3 t (16, 16); dim3 b ( (WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = sizeof(int) * n; size_t pitch; h_a = (int *) malloc (size); h_b = (int *) malloc (size); h_c = (int *) malloc (size); for (int i = 0; i < n; i++) { h_a[i] = i; h_b[i] = i; } hipMallocPitch(&d_a, &pitch, WIDTH * sizeof(int), HEIGHT); hipMallocPitch(&d_b, &pitch, WIDTH * sizeof(int), HEIGHT); hipMallocPitch(&d_c, &pitch, WIDTH * sizeof(int), HEIGHT); hipMemcpy2D (d_a, pitch, h_a, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, hipMemcpyHostToDevice); hipMemcpy2D (d_b, pitch, h_b, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, hipMemcpyHostToDevice); matrixAddPitch <<<b, t>>> (d_a, d_b, d_c, pitch / sizeof(int)); hipMemcpy2D (h_c, WIDTH * sizeof(int), d_c, pitch, WIDTH * sizeof(int), HEIGHT, hipMemcpyDeviceToHost); print_matrix(h_c, HEIGHT, WIDTH); free(h_a); free(h_b); free(h_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); } void traspose() { int *matrix = NULL; int *dev_matrix = NULL; int *dev_traspose = NULL; mi_malloc_int(&matrix, WIDTH * HEIGHT); init(matrix); print_matrix(matrix, WIDTH, HEIGHT); hipMalloc(&dev_matrix, sizeof(int) * WIDTH * HEIGHT); // &dst, size... hipMemcpy(dev_matrix, matrix, sizeof(int) * WIDTH * HEIGHT, hipMemcpyHostToDevice); // dst, src, size, cudaMemcpyHostToDevice...; hipMalloc(&dev_traspose, sizeof(int) * WIDTH * HEIGHT); // &dst, size... hipMemset(dev_traspose, 0, sizeof(int) * WIDTH * HEIGHT); // dst, value byte 0, size... dim3 t(THREADS, THREADS); dim3 b((WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); // ... START PARARELL CODE ... unsigned long now = get_time(); traspose<<<b, t>>>(dev_matrix, dev_traspose); // Call kernel << b , t >> (a , b); hipMemcpy(matrix, dev_traspose, sizeof(int) * WIDTH * HEIGHT, hipMemcpyDeviceToHost); // dest, src, size, cudaMemcpyDeviceToHost; fprintf(stdout, "Time : %lf ms\n", (get_time() - now) / 1000000.0f); // ... END PARARELL CODE ... print_matrix(matrix, HEIGHT, WIDTH); fprintf(stdout, "Num Blocks (x:%d y:%d)\n", b.x, b.y); } int main(int argc, char const *argv[]) { traspose(); return 0; }
.text .file "traspose.hip" .globl _Z19__device_stub__copyPiS_ # -- Begin function _Z19__device_stub__copyPiS_ .p2align 4, 0x90 .type _Z19__device_stub__copyPiS_,@function _Z19__device_stub__copyPiS_: # @_Z19__device_stub__copyPiS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z4copyPiS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z19__device_stub__copyPiS_, .Lfunc_end0-_Z19__device_stub__copyPiS_ .cfi_endproc # -- End function .globl _Z26__device_stub__copy_sharedPiS_ # -- Begin function _Z26__device_stub__copy_sharedPiS_ .p2align 4, 0x90 .type _Z26__device_stub__copy_sharedPiS_,@function _Z26__device_stub__copy_sharedPiS_: # @_Z26__device_stub__copy_sharedPiS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z11copy_sharedPiS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end1: .size _Z26__device_stub__copy_sharedPiS_, .Lfunc_end1-_Z26__device_stub__copy_sharedPiS_ .cfi_endproc # -- End function .globl _Z23__device_stub__trasposePiS_ # -- Begin function _Z23__device_stub__trasposePiS_ .p2align 4, 0x90 .type _Z23__device_stub__trasposePiS_,@function _Z23__device_stub__trasposePiS_: # @_Z23__device_stub__trasposePiS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z8trasposePiS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end2: .size _Z23__device_stub__trasposePiS_, .Lfunc_end2-_Z23__device_stub__trasposePiS_ .cfi_endproc # -- End function .globl _Z30__device_stub__traspose_sharedPiS_ # -- Begin function _Z30__device_stub__traspose_sharedPiS_ .p2align 4, 0x90 .type _Z30__device_stub__traspose_sharedPiS_,@function _Z30__device_stub__traspose_sharedPiS_: # @_Z30__device_stub__traspose_sharedPiS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z15traspose_sharedPiS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end3: .size _Z30__device_stub__traspose_sharedPiS_, .Lfunc_end3-_Z30__device_stub__traspose_sharedPiS_ .cfi_endproc # -- End function .globl _Z29__device_stub__matrixAddPitchPiS_S_i # -- Begin function _Z29__device_stub__matrixAddPitchPiS_S_i .p2align 4, 0x90 .type _Z29__device_stub__matrixAddPitchPiS_S_i,@function _Z29__device_stub__matrixAddPitchPiS_S_i: # @_Z29__device_stub__matrixAddPitchPiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14matrixAddPitchPiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end4: .size _Z29__device_stub__matrixAddPitchPiS_S_i, .Lfunc_end4-_Z29__device_stub__matrixAddPitchPiS_S_i .cfi_endproc # -- End function .globl _Z8get_timev # -- Begin function _Z8get_timev .p2align 4, 0x90 .type _Z8get_timev,@function _Z8get_timev: # @_Z8get_timev .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $16, %rsp .cfi_def_cfa_offset 32 .cfi_offset %rbx, -16 movq %rsp, %rsi xorl %edi, %edi callq clock_gettime testl %eax, %eax js .LBB5_2 # %bb.1: imulq $1000000000, (%rsp), %rax # imm = 0x3B9ACA00 addq 8(%rsp), %rax addq $16, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .LBB5_2: .cfi_def_cfa_offset 32 movq stderr(%rip), %rbx callq __errno_location movl (%rax), %edi callq strerror movl $.L.str, %esi movq %rbx, %rdi movq %rax, %rdx xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end5: .size _Z8get_timev, .Lfunc_end5-_Z8get_timev .cfi_endproc # -- End function .globl _Z13mi_malloc_intPPii # -- Begin function _Z13mi_malloc_intPPii .p2align 4, 0x90 .type _Z13mi_malloc_intPPii,@function _Z13mi_malloc_intPPii: # @_Z13mi_malloc_intPPii .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movq %rdi, %r14 movslq %esi, %rbx shlq $2, %rbx movq %rbx, %rdi callq malloc movq %rax, (%r14) testq %rax, %rax je .LBB6_1 # %bb.2: movq %rax, %rdi xorl %esi, %esi movq %rbx, %rdx addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 jmp memset@PLT # TAILCALL .LBB6_1: .cfi_def_cfa_offset 32 movq stderr(%rip), %rbx callq __errno_location movl (%rax), %edi callq strerror movl $.L.str.1, %esi movq %rbx, %rdi movq %rax, %rdx xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end6: .size _Z13mi_malloc_intPPii, .Lfunc_end6-_Z13mi_malloc_intPPii .cfi_endproc # -- End function .globl _Z4initPi # -- Begin function _Z4initPi .p2align 4, 0x90 .type _Z4initPi,@function _Z4initPi: # @_Z4initPi .cfi_startproc # %bb.0: xorl %eax, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB7_1: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB7_2 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB7_2: # Parent Loop BB7_1 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rax,%rdx), %esi movl %esi, (%rdi,%rdx,4) incq %rdx cmpq $4, %rdx jne .LBB7_2 # %bb.3: # in Loop: Header=BB7_1 Depth=1 leaq 1(%rcx), %rdx addq $4, %rax addq $16, %rdi testq %rcx, %rcx movq %rdx, %rcx je .LBB7_1 # %bb.4: retq .Lfunc_end7: .size _Z4initPi, .Lfunc_end7-_Z4initPi .cfi_endproc # -- End function .globl _Z12print_matrixPKiii # -- Begin function _Z12print_matrixPKiii .p2align 4, 0x90 .type _Z12print_matrixPKiii,@function _Z12print_matrixPKiii: # @_Z12print_matrixPKiii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $24, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edx, %ebp movl %esi, %ebx movq %rdi, 8(%rsp) # 8-byte Spill movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.3, %edx xorl %eax, %eax callq fprintf testl %ebp, %ebp jle .LBB8_6 # %bb.1: # %.preheader.lr.ph movl %ebp, %eax movq %rax, 16(%rsp) # 8-byte Spill movl %ebx, %r12d xorl %ebp, %ebp xorl %r13d, %r13d jmp .LBB8_2 .p2align 4, 0x90 .LBB8_5: # %._crit_edge # in Loop: Header=BB8_2 Depth=1 movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf incq %r13 addl %ebx, %ebp cmpq 16(%rsp), %r13 # 8-byte Folded Reload je .LBB8_6 .LBB8_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB8_4 Depth 2 testl %ebx, %ebx jle .LBB8_5 # %bb.3: # %.lr.ph # in Loop: Header=BB8_2 Depth=1 movl %ebp, %eax movq 8(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB8_4: # Parent Loop BB8_2 Depth=1 # => This Inner Loop Header: Depth=2 movq stdout(%rip), %rdi movl (%r14,%r15,4), %edx movl $.L.str.4, %esi xorl %eax, %eax callq fprintf incq %r15 cmpq %r15, %r12 jne .LBB8_4 jmp .LBB8_5 .LBB8_6: # %._crit_edge13 movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp fprintf # TAILCALL .Lfunc_end8: .size _Z12print_matrixPKiii, .Lfunc_end8-_Z12print_matrixPKiii .cfi_endproc # -- End function .globl _Z8addPitchv # -- Begin function _Z8addPitchv .p2align 4, 0x90 .type _Z8addPitchv,@function _Z8addPitchv: # @_Z8addPitchv .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $168, %rsp .cfi_def_cfa_offset 224 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $32, %edi callq malloc movq %rax, %rbx movl $32, %edi callq malloc movq %rax, %r14 movl $32, %edi callq malloc movq %rax, %r15 xorl %eax, %eax .p2align 4, 0x90 .LBB9_1: # =>This Inner Loop Header: Depth=1 movl %eax, (%rbx,%rax,4) movl %eax, (%r14,%rax,4) incq %rax cmpq $8, %rax jne .LBB9_1 # %bb.2: leaq 40(%rsp), %rdi leaq 16(%rsp), %r12 movl $16, %edx movl $2, %ecx movq %r12, %rsi callq hipMallocPitch leaq 32(%rsp), %rdi movl $16, %edx movl $2, %ecx movq %r12, %rsi callq hipMallocPitch leaq 24(%rsp), %rdi movl $16, %edx movl $2, %ecx movq %r12, %rsi callq hipMallocPitch movq 40(%rsp), %rdi movq 16(%rsp), %rsi movl $1, (%rsp) movl $16, %ecx movl $16, %r8d movl $2, %r9d movq %rbx, %rdx callq hipMemcpy2D movq 32(%rsp), %rdi movq 16(%rsp), %rsi movl $1, (%rsp) movl $16, %ecx movl $16, %r8d movl $2, %r9d movq %r14, %rdx callq hipMemcpy2D movabsq $4294967297, %rdi # imm = 0x100000001 movabsq $68719476752, %rdx # imm = 0x1000000010 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB9_4 # %bb.3: movq 40(%rsp), %rax movq 32(%rsp), %rcx movq 24(%rsp), %rdx movq 16(%rsp), %rsi shrq $2, %rsi movq %rax, 120(%rsp) movq %rcx, 112(%rsp) movq %rdx, 104(%rsp) movl %esi, 52(%rsp) leaq 120(%rsp), %rax movq %rax, 128(%rsp) leaq 112(%rsp), %rax movq %rax, 136(%rsp) leaq 104(%rsp), %rax movq %rax, 144(%rsp) leaq 52(%rsp), %rax movq %rax, 152(%rsp) leaq 88(%rsp), %rdi leaq 72(%rsp), %rsi leaq 64(%rsp), %rdx leaq 56(%rsp), %rcx callq __hipPopCallConfiguration movq 64(%rsp), %rax movq 56(%rsp), %rdi movq 88(%rsp), %rsi movl 96(%rsp), %edx movq 72(%rsp), %rcx movl 80(%rsp), %r8d movq %rdi, 8(%rsp) movq %rax, (%rsp) leaq 128(%rsp), %r9 movl $_Z14matrixAddPitchPiS_S_i, %edi callq hipLaunchKernel .LBB9_4: movq 24(%rsp), %rdx movq 16(%rsp), %rcx movl $2, (%rsp) movl $16, %esi movl $16, %r8d movl $2, %r9d movq %r15, %rdi callq hipMemcpy2D movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.3, %edx xorl %eax, %eax callq fprintf movq %r15, %r12 xorl %r13d, %r13d .p2align 4, 0x90 .LBB9_5: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB9_6 Depth 2 xorl %ebp, %ebp .p2align 4, 0x90 .LBB9_6: # Parent Loop BB9_5 Depth=1 # => This Inner Loop Header: Depth=2 movq stdout(%rip), %rdi movl (%r12,%rbp,4), %edx movl $.L.str.4, %esi xorl %eax, %eax callq fprintf incq %rbp cmpq $2, %rbp jne .LBB9_6 # %bb.7: # %._crit_edge.i # in Loop: Header=BB9_5 Depth=1 movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf incq %r13 addq $8, %r12 cmpq $4, %r13 jne .LBB9_5 # %bb.8: # %_Z12print_matrixPKiii.exit movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free movq 40(%rsp), %rdi callq hipFree movq 32(%rsp), %rdi callq hipFree movq 24(%rsp), %rdi callq hipFree addq $168, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end9: .size _Z8addPitchv, .Lfunc_end9-_Z8addPitchv .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z8trasposev .LCPI10_0: .long 0x49742400 # float 1.0E+6 .text .globl _Z8trasposev .p2align 4, 0x90 .type _Z8trasposev,@function _Z8trasposev: # @_Z8trasposev .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 subq $104, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq $0, 32(%rsp) movq $0, 8(%rsp) movl $32, %edi callq malloc testq %rax, %rax je .LBB10_1 # %bb.3: # %_Z13mi_malloc_intPPii.exit movq %rax, %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rax) movups %xmm0, (%rax) xorl %eax, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB10_4: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB10_5 Depth 2 movl $4, %edx movq %rax, %rsi .p2align 4, 0x90 .LBB10_5: # Parent Loop BB10_4 Depth=1 # => This Inner Loop Header: Depth=2 movl %esi, (%rbx,%rsi,4) incq %rsi decq %rdx jne .LBB10_5 # %bb.6: # in Loop: Header=BB10_4 Depth=1 leaq 1(%rcx), %rdx addq $4, %rax testq %rcx, %rcx movq %rdx, %rcx je .LBB10_4 # %bb.7: # %_Z4initPi.exit movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.3, %edx xorl %eax, %eax callq fprintf movq %rbx, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB10_8: # %.preheader.i7 # =>This Loop Header: Depth=1 # Child Loop BB10_9 Depth 2 xorl %r12d, %r12d .p2align 4, 0x90 .LBB10_9: # Parent Loop BB10_8 Depth=1 # => This Inner Loop Header: Depth=2 movq stdout(%rip), %rdi movl (%r14,%r12,4), %edx movl $.L.str.4, %esi xorl %eax, %eax callq fprintf incq %r12 cmpq $4, %r12 jne .LBB10_9 # %bb.10: # %._crit_edge.i # in Loop: Header=BB10_8 Depth=1 movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf incq %r15 addq $16, %r14 cmpq $2, %r15 jne .LBB10_8 # %bb.11: # %_Z12print_matrixPKiii.exit movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf leaq 32(%rsp), %rdi movl $32, %esi callq hipMalloc movq 32(%rsp), %rdi movl $32, %edx movq %rbx, %rsi movl $1, %ecx callq hipMemcpy leaq 8(%rsp), %rdi movl $32, %esi callq hipMalloc movq 8(%rsp), %rdi movl $32, %edx xorl %esi, %esi callq hipMemset leaq 16(%rsp), %rsi xorl %edi, %edi callq clock_gettime testl %eax, %eax js .LBB10_12 # %bb.13: # %_Z8get_timev.exit movq 16(%rsp), %r12 movq 24(%rsp), %r15 movabsq $4294967297, %rdi # imm = 0x100000001 movabsq $137438953504, %rdx # imm = 0x2000000020 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB10_15 # %bb.14: movq 32(%rsp), %rax movq 8(%rsp), %rcx movq %rax, 96(%rsp) movq %rcx, 88(%rsp) leaq 96(%rsp), %rax movq %rax, 16(%rsp) leaq 88(%rsp), %rax movq %rax, 24(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 16(%rsp), %r9 movl $_Z8trasposePiS_, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB10_15: movq 8(%rsp), %rsi movl $32, %edx movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movq stdout(%rip), %r14 leaq 16(%rsp), %rsi xorl %edi, %edi callq clock_gettime testl %eax, %eax js .LBB10_12 # %bb.16: # %_Z8get_timev.exit11 movq 16(%rsp), %rax subq %r12, %rax movq 24(%rsp), %rcx subq %r15, %rcx imulq $1000000000, %rax, %rax # imm = 0x3B9ACA00 addq %rcx, %rax js .LBB10_17 # %bb.18: # %_Z8get_timev.exit11 cvtsi2ss %rax, %xmm0 jmp .LBB10_19 .LBB10_17: movq %rax, %rcx shrq %rcx andl $1, %eax orq %rcx, %rax cvtsi2ss %rax, %xmm0 addss %xmm0, %xmm0 .LBB10_19: # %_Z8get_timev.exit11 divss .LCPI10_0(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 movl $.L.str.6, %esi movq %r14, %rdi movb $1, %al callq fprintf movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.3, %edx xorl %eax, %eax callq fprintf xorl %r14d, %r14d .p2align 4, 0x90 .LBB10_20: # %.preheader.i12 # =>This Loop Header: Depth=1 # Child Loop BB10_21 Depth 2 xorl %r15d, %r15d .p2align 4, 0x90 .LBB10_21: # Parent Loop BB10_20 Depth=1 # => This Inner Loop Header: Depth=2 movq stdout(%rip), %rdi movl (%rbx,%r15,4), %edx movl $.L.str.4, %esi xorl %eax, %eax callq fprintf incq %r15 cmpq $2, %r15 jne .LBB10_21 # %bb.22: # %._crit_edge.i17 # in Loop: Header=BB10_20 Depth=1 movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf incq %r14 addq $8, %rbx cmpq $4, %r14 jne .LBB10_20 # %bb.23: # %_Z12print_matrixPKiii.exit20 movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf movq stdout(%rip), %rdi movl $.L.str.7, %esi movl $1, %edx movl $1, %ecx xorl %eax, %eax callq fprintf addq $104, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB10_12: .cfi_def_cfa_offset 144 movq stderr(%rip), %rbx callq __errno_location movl (%rax), %edi callq strerror movl $.L.str, %esi jmp .LBB10_2 .LBB10_1: movq stderr(%rip), %rbx callq __errno_location movl (%rax), %edi callq strerror movl $.L.str.1, %esi .LBB10_2: movq %rbx, %rdi movq %rax, %rdx xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end10: .size _Z8trasposev, .Lfunc_end10-_Z8trasposev .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 callq _Z8trasposev xorl %eax, %eax popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end11: .size main, .Lfunc_end11-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB12_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB12_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z4copyPiS_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11copy_sharedPiS_, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8trasposePiS_, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z15traspose_sharedPiS_, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14matrixAddPitchPiS_S_i, %esi movl $.L__unnamed_5, %edx movl $.L__unnamed_5, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end12: .size __hip_module_ctor, .Lfunc_end12-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB13_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB13_2: retq .Lfunc_end13: .size __hip_module_dtor, .Lfunc_end13-__hip_module_dtor .cfi_endproc # -- End function .type _Z4copyPiS_,@object # @_Z4copyPiS_ .section .rodata,"a",@progbits .globl _Z4copyPiS_ .p2align 3, 0x0 _Z4copyPiS_: .quad _Z19__device_stub__copyPiS_ .size _Z4copyPiS_, 8 .type _Z11copy_sharedPiS_,@object # @_Z11copy_sharedPiS_ .globl _Z11copy_sharedPiS_ .p2align 3, 0x0 _Z11copy_sharedPiS_: .quad _Z26__device_stub__copy_sharedPiS_ .size _Z11copy_sharedPiS_, 8 .type _Z8trasposePiS_,@object # @_Z8trasposePiS_ .globl _Z8trasposePiS_ .p2align 3, 0x0 _Z8trasposePiS_: .quad _Z23__device_stub__trasposePiS_ .size _Z8trasposePiS_, 8 .type _Z15traspose_sharedPiS_,@object # @_Z15traspose_sharedPiS_ .globl _Z15traspose_sharedPiS_ .p2align 3, 0x0 _Z15traspose_sharedPiS_: .quad _Z30__device_stub__traspose_sharedPiS_ .size _Z15traspose_sharedPiS_, 8 .type _Z14matrixAddPitchPiS_S_i,@object # @_Z14matrixAddPitchPiS_S_i .globl _Z14matrixAddPitchPiS_S_i .p2align 3, 0x0 _Z14matrixAddPitchPiS_S_i: .quad _Z29__device_stub__matrixAddPitchPiS_S_i .size _Z14matrixAddPitchPiS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Error calc time... %s\n" .size .L.str, 23 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Error malloc %s\n" .size .L.str.1, 17 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%s\n" .size .L.str.2, 4 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Print matrix..." .size .L.str.3, 16 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "%5d" .size .L.str.4, 4 .type .L.str.5,@object # @.str.5 .L.str.5: .zero 1 .size .L.str.5, 1 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "Time : %lf ms\n" .size .L.str.6, 15 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "Num Blocks (x:%d y:%d)\n" .size .L.str.7, 24 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z4copyPiS_" .size .L__unnamed_1, 12 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z11copy_sharedPiS_" .size .L__unnamed_2, 20 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z8trasposePiS_" .size .L__unnamed_3, 16 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "_Z15traspose_sharedPiS_" .size .L__unnamed_4, 24 .type .L__unnamed_5,@object # @4 .L__unnamed_5: .asciz "_Z14matrixAddPitchPiS_S_i" .size .L__unnamed_5, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z19__device_stub__copyPiS_ .addrsig_sym _Z26__device_stub__copy_sharedPiS_ .addrsig_sym _Z23__device_stub__trasposePiS_ .addrsig_sym _Z30__device_stub__traspose_sharedPiS_ .addrsig_sym _Z29__device_stub__matrixAddPitchPiS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z4copyPiS_ .addrsig_sym _Z11copy_sharedPiS_ .addrsig_sym _Z8trasposePiS_ .addrsig_sym _Z15traspose_sharedPiS_ .addrsig_sym _Z14matrixAddPitchPiS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z14matrixAddPitchPiS_S_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e280000002100 */ /*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e280000002500 */ /*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000e680000002600 */ /*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */ /* 0x000e620000002200 */ /*0050*/ IMAD R0, R3, c[0x0][0x0], R0 ; /* 0x0000000003007a24 */ /* 0x001fca00078e0200 */ /*0060*/ ISETP.GT.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */ /* 0x000fe20003f04270 */ /*0070*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */ /* 0x002fca00078e0205 */ /*0080*/ ISETP.GT.OR P0, PT, R3, 0x2, P0 ; /* 0x000000020300780c */ /* 0x000fda0000704670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*00b0*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */ /* 0x000fe200078e0200 */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fc800078e0207 */ /*00e0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x0c0fe400078e0207 */ /*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */ /* 0x000fe200078e0207 */ /*0120*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */ /* 0x004fca0007ffe0ff */ /*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*0140*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0150*/ BRA 0x150; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z15traspose_sharedPiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */ /* 0x000e280000002600 */ /*0020*/ S2R R4, SR_TID.Y ; /* 0x0000000000047919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R5, R5, c[0x0][0x4], R4 ; /* 0x0000010005057a24 */ /* 0x001fca00078e0204 */ /*0060*/ ISETP.GT.AND P0, PT, R5, 0x1, PT ; /* 0x000000010500780c */ /* 0x000fe20003f04270 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0207 */ /*0080*/ ISETP.GT.OR P0, PT, R0, 0x3, P0 ; /* 0x000000030000780c */ /* 0x000fda0000704670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */ /* 0x000fe200000001ff */ /*00b0*/ LEA R2, R0, R5, 0x1 ; /* 0x0000000500027211 */ /* 0x000fe200078e08ff */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE R2, R2, R6, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fcc00078e0206 */ /*00e0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ IMAD R9, R7, 0x21, R4 ; /* 0x0000002107097824 */ /* 0x000fe200078e0204 */ /*0100*/ LEA R5, R5, R0, 0x2 ; /* 0x0000000005057211 */ /* 0x000fca00078e10ff */ /*0110*/ IMAD.WIDE R4, R5, R6, c[0x0][0x168] ; /* 0x00005a0005047625 */ /* 0x000fe200078e0206 */ /*0120*/ STS [R9.X4], R2 ; /* 0x0000000209007388 */ /* 0x004fe80000004800 */ /*0130*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0140*/ LDS R7, [R9.X4] ; /* 0x0000000009077984 */ /* 0x000e280000004800 */ /*0150*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x001fe2000c101904 */ /*0160*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0170*/ BRA 0x170; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z8trasposePiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */ /* 0x000e280000002600 */ /*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R5, R5, c[0x0][0x4], R2 ; /* 0x0000010005057a24 */ /* 0x001fca00078e0202 */ /*0060*/ ISETP.GT.AND P0, PT, R5, 0x1, PT ; /* 0x000000010500780c */ /* 0x000fe20003f04270 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0203 */ /*0080*/ ISETP.GT.OR P0, PT, R0, 0x3, P0 ; /* 0x000000030000780c */ /* 0x000fda0000704670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */ /* 0x000fe200000001ff */ /*00b0*/ LEA R2, R5, R0, 0x2 ; /* 0x0000000005027211 */ /* 0x000fe200078e10ff */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE R2, R2, R4, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fcc00078e0204 */ /*00e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ LEA R5, R0, R5, 0x1 ; /* 0x0000000500057211 */ /* 0x000fca00078e08ff */ /*0100*/ IMAD.WIDE R4, R5, R4, c[0x0][0x168] ; /* 0x00005a0005047625 */ /* 0x000fca00078e0204 */ /*0110*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe2000c101904 */ /*0120*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0130*/ BRA 0x130; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z11copy_sharedPiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002600 */ /*0020*/ S2R R4, SR_TID.Y ; /* 0x0000000000047919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R3, R3, c[0x0][0x4], R4 ; /* 0x0000010003037a24 */ /* 0x001fca00078e0204 */ /*0060*/ ISETP.GT.AND P0, PT, R3, 0x1, PT ; /* 0x000000010300780c */ /* 0x000fe20003f04270 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0205 */ /*0080*/ ISETP.GT.OR P0, PT, R0, 0x3, P0 ; /* 0x000000030000780c */ /* 0x000fda0000704670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R11, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0b7435 */ /* 0x000fe200000001ff */ /*00b0*/ LEA R0, R3, R0, 0x2 ; /* 0x0000000003007211 */ /* 0x000fe200078e10ff */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE R2, R0, R11, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e020b */ /*00e0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ IMAD R9, R5, 0x21, R4 ; /* 0x0000002105097824 */ /* 0x000fe400078e0204 */ /*0100*/ IMAD.WIDE R4, R0, R11, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fc600078e020b */ /*0110*/ STS [R9.X4], R2 ; /* 0x0000000209007388 */ /* 0x004fe80000004800 */ /*0120*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0130*/ LDS R7, [R9.X4] ; /* 0x0000000009077984 */ /* 0x000e280000004800 */ /*0140*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */ /* 0x001fe2000c101904 */ /*0150*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0160*/ BRA 0x160; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z4copyPiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002600 */ /*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002200 */ /*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e680000002500 */ /*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */ /* 0x001fca00078e0202 */ /*0060*/ ISETP.GT.AND P0, PT, R3, 0x1, PT ; /* 0x000000010300780c */ /* 0x000fe20003f04270 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0205 */ /*0080*/ ISETP.GT.OR P0, PT, R0, 0x3, P0 ; /* 0x000000030000780c */ /* 0x000fda0000704670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*00b0*/ LEA R0, R3, R0, 0x2 ; /* 0x0000000003007211 */ /* 0x000fe200078e10ff */ /*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*00d0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e0205 */ /*00e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00f0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fca00078e0205 */ /*0100*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe2000c101904 */ /*0110*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0120*/ BRA 0x120; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z4copyPiS_ .globl _Z4copyPiS_ .p2align 8 .type _Z4copyPiS_,@function _Z4copyPiS_: s_load_b32 s2, s[0:1], 0x1c v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4] v_cmp_gt_i32_e32 vcc_lo, 4, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_i32_e64 s2, 2, v1 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_2 s_load_b128 s[0:3], s[0:1], 0x0 v_lshl_add_u32 v0, v1, 2, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo global_load_b32 v2, v[2:3], off s_waitcnt vmcnt(0) global_store_b32 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z4copyPiS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z4copyPiS_, .Lfunc_end0-_Z4copyPiS_ .section .AMDGPU.csdata,"",@progbits .text .protected _Z11copy_sharedPiS_ .globl _Z11copy_sharedPiS_ .p2align 8 .type _Z11copy_sharedPiS_,@function _Z11copy_sharedPiS_: s_load_b32 s2, s[0:1], 0x1c v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 v_mad_u64_u32 v[2:3], null, s14, s3, v[1:2] v_mad_u64_u32 v[3:4], null, s15, s2, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, 4, v2 v_cmp_gt_i32_e64 s2, 2, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB1_2 s_load_b128 s[0:3], s[0:1], 0x0 v_lshl_add_u32 v2, v3, 2, v2 v_lshlrev_b32_e32 v0, 2, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v3, 31, v2 v_mad_u32_u24 v0, v1, 0x84, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) v_add_co_u32 v4, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v5, vcc_lo, s1, v3, vcc_lo global_load_b32 v4, v[4:5], off s_waitcnt vmcnt(0) ds_store_b32 v0, v4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b32 v4, v0 v_add_co_u32 v0, vcc_lo, s2, v2 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v3, vcc_lo s_waitcnt lgkmcnt(0) global_store_b32 v[0:1], v4, off .LBB1_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11copy_sharedPiS_ .amdhsa_group_segment_fixed_size 4224 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z11copy_sharedPiS_, .Lfunc_end1-_Z11copy_sharedPiS_ .section .AMDGPU.csdata,"",@progbits .text .protected _Z8trasposePiS_ .globl _Z8trasposePiS_ .p2align 8 .type _Z8trasposePiS_,@function _Z8trasposePiS_: s_load_b32 s2, s[0:1], 0x1c v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4] v_cmp_gt_i32_e32 vcc_lo, 4, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_i32_e64 s2, 2, v1 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB2_2 s_load_b128 s[0:3], s[0:1], 0x0 v_lshl_add_u32 v2, v1, 2, v0 v_lshl_add_u32 v0, v0, 1, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v3, 31, v2 v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[2:3], 2, v[2:3] v_lshlrev_b64 v[0:1], 2, v[0:1] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v2, vcc_lo, s0, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v0, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo global_load_b32 v2, v[2:3], off s_waitcnt vmcnt(0) global_store_b32 v[0:1], v2, off .LBB2_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z8trasposePiS_ .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z8trasposePiS_, .Lfunc_end2-_Z8trasposePiS_ .section .AMDGPU.csdata,"",@progbits .text .protected _Z15traspose_sharedPiS_ .globl _Z15traspose_sharedPiS_ .p2align 8 .type _Z15traspose_sharedPiS_,@function _Z15traspose_sharedPiS_: s_load_b32 s2, s[0:1], 0x1c v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v0, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 v_mad_u64_u32 v[2:3], null, s14, s3, v[1:2] v_mad_u64_u32 v[3:4], null, s15, s2, v[0:1] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, 4, v2 v_cmp_gt_i32_e64 s2, 2, v3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB3_2 s_load_b128 s[0:3], s[0:1], 0x0 v_lshl_add_u32 v4, v2, 1, v3 v_lshlrev_b32_e32 v0, 2, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_ashrrev_i32_e32 v5, 31, v4 v_mad_u32_u24 v1, v1, 0x84, v0 v_lshl_add_u32 v0, v3, 2, v2 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[4:5], 2, v[4:5] s_waitcnt lgkmcnt(0) v_add_co_u32 v4, vcc_lo, s0, v4 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo global_load_b32 v4, v[4:5], off s_waitcnt vmcnt(0) ds_store_b32 v1, v4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv ds_load_b32 v2, v1 v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 2, v[0:1] v_add_co_u32 v0, vcc_lo, s2, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo s_waitcnt lgkmcnt(0) global_store_b32 v[0:1], v2, off .LBB3_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z15traspose_sharedPiS_ .amdhsa_group_segment_fixed_size 4224 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end3: .size _Z15traspose_sharedPiS_, .Lfunc_end3-_Z15traspose_sharedPiS_ .section .AMDGPU.csdata,"",@progbits .text .protected _Z14matrixAddPitchPiS_S_i .globl _Z14matrixAddPitchPiS_S_i .p2align 8 .type _Z14matrixAddPitchPiS_S_i,@function _Z14matrixAddPitchPiS_S_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b32 s3, s[0:1], 0x18 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s4, s2, 0xffff s_lshr_b32 s2, s2, 16 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4] v_cmp_ge_i32_e32 vcc_lo, s3, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_gt_i32_e64 s2, 3, v1 s_and_b32 s2, vcc_lo, s2 s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s4, s2 s_cbranch_execz .LBB4_2 s_load_b128 s[4:7], s[0:1], 0x0 v_mad_u64_u32 v[2:3], null, v1, s3, v[0:1] s_load_b64 s[0:1], s[0:1], 0x10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[0:1], 2, v[2:3] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo v_add_co_u32 v4, vcc_lo, s6, v0 v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo v_add_co_u32 v0, vcc_lo, s0, v0 global_load_b32 v2, v[2:3], off global_load_b32 v3, v[4:5], off v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, v3, v2 global_store_b32 v[0:1], v2, off .LBB4_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14matrixAddPitchPiS_S_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end4: .size _Z14matrixAddPitchPiS_S_i, .Lfunc_end4-_Z14matrixAddPitchPiS_S_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z4copyPiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z4copyPiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 4224 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11copy_sharedPiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z11copy_sharedPiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z8trasposePiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z8trasposePiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 4224 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z15traspose_sharedPiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z15traspose_sharedPiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14matrixAddPitchPiS_S_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14matrixAddPitchPiS_S_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00114244_00000000-6_traspose.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2066: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2066: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Error calc time... %s\n" .text .globl _Z8get_timev .type _Z8get_timev, @function _Z8get_timev: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movq %fs:40, %rax movq %rax, 24(%rsp) xorl %eax, %eax movq %rsp, %rsi movl $0, %edi call clock_gettime@PLT testl %eax, %eax js .L7 imulq $1000000000, (%rsp), %rax addq 8(%rsp), %rax movq 24(%rsp), %rdx subq %fs:40, %rdx jne .L8 addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state call __errno_location@PLT movl (%rax), %edi call strerror@PLT movq %rax, %rcx leaq .LC0(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z8get_timev, .-_Z8get_timev .section .rodata.str1.1 .LC1: .string "Error malloc %s\n" .text .globl _Z13mi_malloc_intPPii .type _Z13mi_malloc_intPPii, @function _Z13mi_malloc_intPPii: .LFB2058: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $8, %rsp .cfi_def_cfa_offset 32 movq %rdi, %rbp movslq %esi, %rbx salq $2, %rbx movq %rbx, %rdi call malloc@PLT movq %rax, 0(%rbp) testq %rax, %rax je .L12 movq %rax, %rdi movq %rbx, %rcx movq %rbx, %rdx movl $0, %esi call __memset_chk@PLT addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L12: .cfi_restore_state call __errno_location@PLT movl (%rax), %edi call strerror@PLT movq %rax, %rcx leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %edi call exit@PLT .cfi_endproc .LFE2058: .size _Z13mi_malloc_intPPii, .-_Z13mi_malloc_intPPii .globl _Z4initPi .type _Z4initPi, @function _Z4initPi: .LFB2059: .cfi_startproc endbr64 movl $0, (%rdi) movl $1, 4(%rdi) movl $2, 8(%rdi) movl $3, 12(%rdi) movl $0, %eax .L14: leal 4(%rax), %edx movl %edx, 16(%rdi,%rax,4) addq $1, %rax cmpq $4, %rax jne .L14 ret .cfi_endproc .LFE2059: .size _Z4initPi, .-_Z4initPi .section .rodata.str1.1 .LC2: .string "Print matrix..." .LC3: .string "%s\n" .LC4: .string "%5d" .LC5: .string "" .text .globl _Z12print_matrixPKiii .type _Z12print_matrixPKiii, @function _Z12print_matrixPKiii: .LFB2060: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $40, %rsp .cfi_def_cfa_offset 96 movq %rdi, 16(%rsp) movl %esi, %r15d movl %edx, %ebx movl %edx, 12(%rsp) leaq .LC2(%rip), %rcx leaq .LC3(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT testl %ebx, %ebx jle .L17 movl $0, %r14d movl $0, %r13d movslq %r15d, %rax movq %rax, 24(%rsp) leaq .LC4(%rip), %r12 jmp .L18 .L20: movslq %r14d, %rax movq 16(%rsp), %rsi leaq (%rsi,%rax,4), %rbx movq 24(%rsp), %rdi addq %rdi, %rax leaq (%rsi,%rax,4), %rbp .L19: movl (%rbx), %ecx movq %r12, %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT addq $4, %rbx cmpq %rbp, %rbx jne .L19 .L21: leaq .LC5(%rip), %rcx leaq .LC3(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT addl $1, %r13d addl %r15d, %r14d cmpl %r13d, 12(%rsp) je .L17 .L18: testl %r15d, %r15d jg .L20 jmp .L21 .L17: leaq .LC5(%rip), %rcx leaq .LC3(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT addq $40, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _Z12print_matrixPKiii, .-_Z12print_matrixPKiii .globl _Z25__device_stub__Z4copyPiS_PiS_ .type _Z25__device_stub__Z4copyPiS_PiS_, @function _Z25__device_stub__Z4copyPiS_PiS_: .LFB2088: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L28 .L24: movq 104(%rsp), %rax subq %fs:40, %rax jne .L29 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L28: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z4copyPiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L24 .L29: call __stack_chk_fail@PLT .cfi_endproc .LFE2088: .size _Z25__device_stub__Z4copyPiS_PiS_, .-_Z25__device_stub__Z4copyPiS_PiS_ .globl _Z4copyPiS_ .type _Z4copyPiS_, @function _Z4copyPiS_: .LFB2089: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z25__device_stub__Z4copyPiS_PiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _Z4copyPiS_, .-_Z4copyPiS_ .globl _Z33__device_stub__Z11copy_sharedPiS_PiS_ .type _Z33__device_stub__Z11copy_sharedPiS_PiS_, @function _Z33__device_stub__Z11copy_sharedPiS_PiS_: .LFB2090: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L36 .L32: movq 104(%rsp), %rax subq %fs:40, %rax jne .L37 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z11copy_sharedPiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L32 .L37: call __stack_chk_fail@PLT .cfi_endproc .LFE2090: .size _Z33__device_stub__Z11copy_sharedPiS_PiS_, .-_Z33__device_stub__Z11copy_sharedPiS_PiS_ .globl _Z11copy_sharedPiS_ .type _Z11copy_sharedPiS_, @function _Z11copy_sharedPiS_: .LFB2091: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z33__device_stub__Z11copy_sharedPiS_PiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2091: .size _Z11copy_sharedPiS_, .-_Z11copy_sharedPiS_ .globl _Z29__device_stub__Z8trasposePiS_PiS_ .type _Z29__device_stub__Z8trasposePiS_PiS_, @function _Z29__device_stub__Z8trasposePiS_PiS_: .LFB2092: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L44 .L40: movq 104(%rsp), %rax subq %fs:40, %rax jne .L45 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L44: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z8trasposePiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L40 .L45: call __stack_chk_fail@PLT .cfi_endproc .LFE2092: .size _Z29__device_stub__Z8trasposePiS_PiS_, .-_Z29__device_stub__Z8trasposePiS_PiS_ .globl _Z8trasposePiS_ .type _Z8trasposePiS_, @function _Z8trasposePiS_: .LFB2093: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z8trasposePiS_PiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2093: .size _Z8trasposePiS_, .-_Z8trasposePiS_ .section .rodata.str1.1 .LC7: .string "Time : %lf ms\n" .LC8: .string "Num Blocks (x:%d y:%d)\n" .text .globl _Z8trasposev .type _Z8trasposev, @function _Z8trasposev: .LFB2062: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $72, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movq $0, 16(%rsp) movq $0, 24(%rsp) leaq 8(%rsp), %rdi movl $8, %esi call _Z13mi_malloc_intPPii movq 8(%rsp), %rbx movq %rbx, %rdi call _Z4initPi movl $2, %edx movl $4, %esi movq %rbx, %rdi call _Z12print_matrixPKiii leaq 16(%rsp), %rdi movl $32, %esi call cudaMalloc@PLT movl $1, %ecx movl $32, %edx movq %rbx, %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT leaq 24(%rsp), %rdi movl $32, %esi call cudaMalloc@PLT movl $32, %edx movl $0, %esi movq 24(%rsp), %rdi call cudaMemset@PLT movl $1, 40(%rsp) movl $1, 52(%rsp) call _Z8get_timev movq %rax, %rbp movl $1, 44(%rsp) movl $1, 48(%rsp) movl $32, 32(%rsp) movl $32, 36(%rsp) movl 40(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 32(%rsp), %rdx movq 44(%rsp), %rdi movl 52(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L54 .L49: movl $2, %ecx movl $32, %edx movq 24(%rsp), %rsi movq %rbx, %rdi call cudaMemcpy@PLT call _Z8get_timev subq %rbp, %rax js .L50 pxor %xmm0, %xmm0 cvtsi2ssq %rax, %xmm0 .L51: divss .LC6(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 leaq .LC7(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $1, %eax call __fprintf_chk@PLT movl $4, %edx movl $2, %esi movq %rbx, %rdi call _Z12print_matrixPKiii movl $1, %r8d movl $1, %ecx leaq .LC8(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L55 addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L54: .cfi_restore_state movq 24(%rsp), %rsi movq 16(%rsp), %rdi call _Z29__device_stub__Z8trasposePiS_PiS_ jmp .L49 .L50: movq %rax, %rdx shrq %rdx andl $1, %eax orq %rax, %rdx pxor %xmm0, %xmm0 cvtsi2ssq %rdx, %xmm0 addss %xmm0, %xmm0 jmp .L51 .L55: call __stack_chk_fail@PLT .cfi_endproc .LFE2062: .size _Z8trasposev, .-_Z8trasposev .globl main .type main, @function main: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z8trasposev movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size main, .-main .globl _Z37__device_stub__Z15traspose_sharedPiS_PiS_ .type _Z37__device_stub__Z15traspose_sharedPiS_PiS_, @function _Z37__device_stub__Z15traspose_sharedPiS_PiS_: .LFB2094: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L62 .L58: movq 104(%rsp), %rax subq %fs:40, %rax jne .L63 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L62: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z15traspose_sharedPiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L58 .L63: call __stack_chk_fail@PLT .cfi_endproc .LFE2094: .size _Z37__device_stub__Z15traspose_sharedPiS_PiS_, .-_Z37__device_stub__Z15traspose_sharedPiS_PiS_ .globl _Z15traspose_sharedPiS_ .type _Z15traspose_sharedPiS_, @function _Z15traspose_sharedPiS_: .LFB2095: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z15traspose_sharedPiS_PiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2095: .size _Z15traspose_sharedPiS_, .-_Z15traspose_sharedPiS_ .globl _Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i .type _Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i, @function _Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i: .LFB2096: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L70 .L66: movq 136(%rsp), %rax subq %fs:40, %rax jne .L71 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L70: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14matrixAddPitchPiS_S_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L66 .L71: call __stack_chk_fail@PLT .cfi_endproc .LFE2096: .size _Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i, .-_Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i .globl _Z14matrixAddPitchPiS_S_i .type _Z14matrixAddPitchPiS_S_i, @function _Z14matrixAddPitchPiS_S_i: .LFB2097: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2097: .size _Z14matrixAddPitchPiS_S_i, .-_Z14matrixAddPitchPiS_S_i .globl _Z8addPitchv .type _Z8addPitchv, @function _Z8addPitchv: .LFB2061: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $72, %rsp .cfi_def_cfa_offset 112 movq %fs:40, %rax movq %rax, 56(%rsp) xorl %eax, %eax movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $32, %edi call malloc@PLT movq %rax, %rbp movl $32, %edi call malloc@PLT movq %rax, %rbx movl $32, %edi call malloc@PLT movq %rax, %r12 movl $0, %eax .L75: movl %eax, 0(%rbp,%rax,4) movl %eax, (%rbx,%rax,4) addq $1, %rax cmpq $8, %rax jne .L75 leaq 24(%rsp), %r13 movq %rsp, %rdi movl $2, %ecx movl $16, %edx movq %r13, %rsi call cudaMallocPitch@PLT leaq 8(%rsp), %rdi movl $2, %ecx movl $16, %edx movq %r13, %rsi call cudaMallocPitch@PLT leaq 16(%rsp), %rdi movl $2, %ecx movl $16, %edx movq %r13, %rsi call cudaMallocPitch@PLT subq $8, %rsp .cfi_def_cfa_offset 120 pushq $1 .cfi_def_cfa_offset 128 movl $2, %r9d movl $16, %r8d movl $16, %ecx movq %rbp, %rdx movq 40(%rsp), %rsi movq 16(%rsp), %rdi call cudaMemcpy2D@PLT movl $1, (%rsp) movl $2, %r9d movl $16, %r8d movl $16, %ecx movq %rbx, %rdx movq 40(%rsp), %rsi movq 24(%rsp), %rdi call cudaMemcpy2D@PLT movl $16, 48(%rsp) movl $16, 52(%rsp) addq $16, %rsp .cfi_def_cfa_offset 112 movl 40(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 32(%rsp), %rdx movq 44(%rsp), %rdi movl 52(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L80 .L76: subq $8, %rsp .cfi_def_cfa_offset 120 pushq $2 .cfi_def_cfa_offset 128 movl $2, %r9d movl $16, %r8d movq 40(%rsp), %rcx movq 32(%rsp), %rdx movl $16, %esi movq %r12, %rdi call cudaMemcpy2D@PLT addq $16, %rsp .cfi_def_cfa_offset 112 movl $4, %edx movl $2, %esi movq %r12, %rdi call _Z12print_matrixPKiii movq %rbp, %rdi call free@PLT movq %rbx, %rdi call free@PLT movq %r12, %rdi call free@PLT movq (%rsp), %rdi call cudaFree@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 16(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rax subq %fs:40, %rax jne .L81 addq $72, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L80: .cfi_restore_state movq 24(%rsp), %rcx shrq $2, %rcx movq 16(%rsp), %rdx movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z39__device_stub__Z14matrixAddPitchPiS_S_iPiS_S_i jmp .L76 .L81: call __stack_chk_fail@PLT .cfi_endproc .LFE2061: .size _Z8addPitchv, .-_Z8addPitchv .section .rodata.str1.1 .LC9: .string "_Z14matrixAddPitchPiS_S_i" .LC10: .string "_Z15traspose_sharedPiS_" .LC11: .string "_Z8trasposePiS_" .LC12: .string "_Z11copy_sharedPiS_" .LC13: .string "_Z4copyPiS_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2099: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z14matrixAddPitchPiS_S_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC10(%rip), %rdx movq %rdx, %rcx leaq _Z15traspose_sharedPiS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC11(%rip), %rdx movq %rdx, %rcx leaq _Z8trasposePiS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC12(%rip), %rdx movq %rdx, %rcx leaq _Z11copy_sharedPiS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC13(%rip), %rdx movq %rdx, %rcx leaq _Z4copyPiS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2099: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC6: .long 1232348160 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "traspose.hip" .globl _Z19__device_stub__copyPiS_ # -- Begin function _Z19__device_stub__copyPiS_ .p2align 4, 0x90 .type _Z19__device_stub__copyPiS_,@function _Z19__device_stub__copyPiS_: # @_Z19__device_stub__copyPiS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z4copyPiS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z19__device_stub__copyPiS_, .Lfunc_end0-_Z19__device_stub__copyPiS_ .cfi_endproc # -- End function .globl _Z26__device_stub__copy_sharedPiS_ # -- Begin function _Z26__device_stub__copy_sharedPiS_ .p2align 4, 0x90 .type _Z26__device_stub__copy_sharedPiS_,@function _Z26__device_stub__copy_sharedPiS_: # @_Z26__device_stub__copy_sharedPiS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z11copy_sharedPiS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end1: .size _Z26__device_stub__copy_sharedPiS_, .Lfunc_end1-_Z26__device_stub__copy_sharedPiS_ .cfi_endproc # -- End function .globl _Z23__device_stub__trasposePiS_ # -- Begin function _Z23__device_stub__trasposePiS_ .p2align 4, 0x90 .type _Z23__device_stub__trasposePiS_,@function _Z23__device_stub__trasposePiS_: # @_Z23__device_stub__trasposePiS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z8trasposePiS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end2: .size _Z23__device_stub__trasposePiS_, .Lfunc_end2-_Z23__device_stub__trasposePiS_ .cfi_endproc # -- End function .globl _Z30__device_stub__traspose_sharedPiS_ # -- Begin function _Z30__device_stub__traspose_sharedPiS_ .p2align 4, 0x90 .type _Z30__device_stub__traspose_sharedPiS_,@function _Z30__device_stub__traspose_sharedPiS_: # @_Z30__device_stub__traspose_sharedPiS_ .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z15traspose_sharedPiS_, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end3: .size _Z30__device_stub__traspose_sharedPiS_, .Lfunc_end3-_Z30__device_stub__traspose_sharedPiS_ .cfi_endproc # -- End function .globl _Z29__device_stub__matrixAddPitchPiS_S_i # -- Begin function _Z29__device_stub__matrixAddPitchPiS_S_i .p2align 4, 0x90 .type _Z29__device_stub__matrixAddPitchPiS_S_i,@function _Z29__device_stub__matrixAddPitchPiS_S_i: # @_Z29__device_stub__matrixAddPitchPiS_S_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14matrixAddPitchPiS_S_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end4: .size _Z29__device_stub__matrixAddPitchPiS_S_i, .Lfunc_end4-_Z29__device_stub__matrixAddPitchPiS_S_i .cfi_endproc # -- End function .globl _Z8get_timev # -- Begin function _Z8get_timev .p2align 4, 0x90 .type _Z8get_timev,@function _Z8get_timev: # @_Z8get_timev .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $16, %rsp .cfi_def_cfa_offset 32 .cfi_offset %rbx, -16 movq %rsp, %rsi xorl %edi, %edi callq clock_gettime testl %eax, %eax js .LBB5_2 # %bb.1: imulq $1000000000, (%rsp), %rax # imm = 0x3B9ACA00 addq 8(%rsp), %rax addq $16, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 retq .LBB5_2: .cfi_def_cfa_offset 32 movq stderr(%rip), %rbx callq __errno_location movl (%rax), %edi callq strerror movl $.L.str, %esi movq %rbx, %rdi movq %rax, %rdx xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end5: .size _Z8get_timev, .Lfunc_end5-_Z8get_timev .cfi_endproc # -- End function .globl _Z13mi_malloc_intPPii # -- Begin function _Z13mi_malloc_intPPii .p2align 4, 0x90 .type _Z13mi_malloc_intPPii,@function _Z13mi_malloc_intPPii: # @_Z13mi_malloc_intPPii .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 pushq %rax .cfi_def_cfa_offset 32 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movq %rdi, %r14 movslq %esi, %rbx shlq $2, %rbx movq %rbx, %rdi callq malloc movq %rax, (%r14) testq %rax, %rax je .LBB6_1 # %bb.2: movq %rax, %rdi xorl %esi, %esi movq %rbx, %rdx addq $8, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 jmp memset@PLT # TAILCALL .LBB6_1: .cfi_def_cfa_offset 32 movq stderr(%rip), %rbx callq __errno_location movl (%rax), %edi callq strerror movl $.L.str.1, %esi movq %rbx, %rdi movq %rax, %rdx xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end6: .size _Z13mi_malloc_intPPii, .Lfunc_end6-_Z13mi_malloc_intPPii .cfi_endproc # -- End function .globl _Z4initPi # -- Begin function _Z4initPi .p2align 4, 0x90 .type _Z4initPi,@function _Z4initPi: # @_Z4initPi .cfi_startproc # %bb.0: xorl %eax, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB7_1: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB7_2 Depth 2 xorl %edx, %edx .p2align 4, 0x90 .LBB7_2: # Parent Loop BB7_1 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rax,%rdx), %esi movl %esi, (%rdi,%rdx,4) incq %rdx cmpq $4, %rdx jne .LBB7_2 # %bb.3: # in Loop: Header=BB7_1 Depth=1 leaq 1(%rcx), %rdx addq $4, %rax addq $16, %rdi testq %rcx, %rcx movq %rdx, %rcx je .LBB7_1 # %bb.4: retq .Lfunc_end7: .size _Z4initPi, .Lfunc_end7-_Z4initPi .cfi_endproc # -- End function .globl _Z12print_matrixPKiii # -- Begin function _Z12print_matrixPKiii .p2align 4, 0x90 .type _Z12print_matrixPKiii,@function _Z12print_matrixPKiii: # @_Z12print_matrixPKiii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $24, %rsp .cfi_def_cfa_offset 80 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edx, %ebp movl %esi, %ebx movq %rdi, 8(%rsp) # 8-byte Spill movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.3, %edx xorl %eax, %eax callq fprintf testl %ebp, %ebp jle .LBB8_6 # %bb.1: # %.preheader.lr.ph movl %ebp, %eax movq %rax, 16(%rsp) # 8-byte Spill movl %ebx, %r12d xorl %ebp, %ebp xorl %r13d, %r13d jmp .LBB8_2 .p2align 4, 0x90 .LBB8_5: # %._crit_edge # in Loop: Header=BB8_2 Depth=1 movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf incq %r13 addl %ebx, %ebp cmpq 16(%rsp), %r13 # 8-byte Folded Reload je .LBB8_6 .LBB8_2: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB8_4 Depth 2 testl %ebx, %ebx jle .LBB8_5 # %bb.3: # %.lr.ph # in Loop: Header=BB8_2 Depth=1 movl %ebp, %eax movq 8(%rsp), %rcx # 8-byte Reload leaq (%rcx,%rax,4), %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB8_4: # Parent Loop BB8_2 Depth=1 # => This Inner Loop Header: Depth=2 movq stdout(%rip), %rdi movl (%r14,%r15,4), %edx movl $.L.str.4, %esi xorl %eax, %eax callq fprintf incq %r15 cmpq %r15, %r12 jne .LBB8_4 jmp .LBB8_5 .LBB8_6: # %._crit_edge13 movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 jmp fprintf # TAILCALL .Lfunc_end8: .size _Z12print_matrixPKiii, .Lfunc_end8-_Z12print_matrixPKiii .cfi_endproc # -- End function .globl _Z8addPitchv # -- Begin function _Z8addPitchv .p2align 4, 0x90 .type _Z8addPitchv,@function _Z8addPitchv: # @_Z8addPitchv .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $168, %rsp .cfi_def_cfa_offset 224 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $32, %edi callq malloc movq %rax, %rbx movl $32, %edi callq malloc movq %rax, %r14 movl $32, %edi callq malloc movq %rax, %r15 xorl %eax, %eax .p2align 4, 0x90 .LBB9_1: # =>This Inner Loop Header: Depth=1 movl %eax, (%rbx,%rax,4) movl %eax, (%r14,%rax,4) incq %rax cmpq $8, %rax jne .LBB9_1 # %bb.2: leaq 40(%rsp), %rdi leaq 16(%rsp), %r12 movl $16, %edx movl $2, %ecx movq %r12, %rsi callq hipMallocPitch leaq 32(%rsp), %rdi movl $16, %edx movl $2, %ecx movq %r12, %rsi callq hipMallocPitch leaq 24(%rsp), %rdi movl $16, %edx movl $2, %ecx movq %r12, %rsi callq hipMallocPitch movq 40(%rsp), %rdi movq 16(%rsp), %rsi movl $1, (%rsp) movl $16, %ecx movl $16, %r8d movl $2, %r9d movq %rbx, %rdx callq hipMemcpy2D movq 32(%rsp), %rdi movq 16(%rsp), %rsi movl $1, (%rsp) movl $16, %ecx movl $16, %r8d movl $2, %r9d movq %r14, %rdx callq hipMemcpy2D movabsq $4294967297, %rdi # imm = 0x100000001 movabsq $68719476752, %rdx # imm = 0x1000000010 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB9_4 # %bb.3: movq 40(%rsp), %rax movq 32(%rsp), %rcx movq 24(%rsp), %rdx movq 16(%rsp), %rsi shrq $2, %rsi movq %rax, 120(%rsp) movq %rcx, 112(%rsp) movq %rdx, 104(%rsp) movl %esi, 52(%rsp) leaq 120(%rsp), %rax movq %rax, 128(%rsp) leaq 112(%rsp), %rax movq %rax, 136(%rsp) leaq 104(%rsp), %rax movq %rax, 144(%rsp) leaq 52(%rsp), %rax movq %rax, 152(%rsp) leaq 88(%rsp), %rdi leaq 72(%rsp), %rsi leaq 64(%rsp), %rdx leaq 56(%rsp), %rcx callq __hipPopCallConfiguration movq 64(%rsp), %rax movq 56(%rsp), %rdi movq 88(%rsp), %rsi movl 96(%rsp), %edx movq 72(%rsp), %rcx movl 80(%rsp), %r8d movq %rdi, 8(%rsp) movq %rax, (%rsp) leaq 128(%rsp), %r9 movl $_Z14matrixAddPitchPiS_S_i, %edi callq hipLaunchKernel .LBB9_4: movq 24(%rsp), %rdx movq 16(%rsp), %rcx movl $2, (%rsp) movl $16, %esi movl $16, %r8d movl $2, %r9d movq %r15, %rdi callq hipMemcpy2D movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.3, %edx xorl %eax, %eax callq fprintf movq %r15, %r12 xorl %r13d, %r13d .p2align 4, 0x90 .LBB9_5: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB9_6 Depth 2 xorl %ebp, %ebp .p2align 4, 0x90 .LBB9_6: # Parent Loop BB9_5 Depth=1 # => This Inner Loop Header: Depth=2 movq stdout(%rip), %rdi movl (%r12,%rbp,4), %edx movl $.L.str.4, %esi xorl %eax, %eax callq fprintf incq %rbp cmpq $2, %rbp jne .LBB9_6 # %bb.7: # %._crit_edge.i # in Loop: Header=BB9_5 Depth=1 movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf incq %r13 addq $8, %r12 cmpq $4, %r13 jne .LBB9_5 # %bb.8: # %_Z12print_matrixPKiii.exit movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf movq %rbx, %rdi callq free movq %r14, %rdi callq free movq %r15, %rdi callq free movq 40(%rsp), %rdi callq hipFree movq 32(%rsp), %rdi callq hipFree movq 24(%rsp), %rdi callq hipFree addq $168, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end9: .size _Z8addPitchv, .Lfunc_end9-_Z8addPitchv .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z8trasposev .LCPI10_0: .long 0x49742400 # float 1.0E+6 .text .globl _Z8trasposev .p2align 4, 0x90 .type _Z8trasposev,@function _Z8trasposev: # @_Z8trasposev .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 subq $104, %rsp .cfi_def_cfa_offset 144 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq $0, 32(%rsp) movq $0, 8(%rsp) movl $32, %edi callq malloc testq %rax, %rax je .LBB10_1 # %bb.3: # %_Z13mi_malloc_intPPii.exit movq %rax, %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rax) movups %xmm0, (%rax) xorl %eax, %eax xorl %ecx, %ecx .p2align 4, 0x90 .LBB10_4: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB10_5 Depth 2 movl $4, %edx movq %rax, %rsi .p2align 4, 0x90 .LBB10_5: # Parent Loop BB10_4 Depth=1 # => This Inner Loop Header: Depth=2 movl %esi, (%rbx,%rsi,4) incq %rsi decq %rdx jne .LBB10_5 # %bb.6: # in Loop: Header=BB10_4 Depth=1 leaq 1(%rcx), %rdx addq $4, %rax testq %rcx, %rcx movq %rdx, %rcx je .LBB10_4 # %bb.7: # %_Z4initPi.exit movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.3, %edx xorl %eax, %eax callq fprintf movq %rbx, %r14 xorl %r15d, %r15d .p2align 4, 0x90 .LBB10_8: # %.preheader.i7 # =>This Loop Header: Depth=1 # Child Loop BB10_9 Depth 2 xorl %r12d, %r12d .p2align 4, 0x90 .LBB10_9: # Parent Loop BB10_8 Depth=1 # => This Inner Loop Header: Depth=2 movq stdout(%rip), %rdi movl (%r14,%r12,4), %edx movl $.L.str.4, %esi xorl %eax, %eax callq fprintf incq %r12 cmpq $4, %r12 jne .LBB10_9 # %bb.10: # %._crit_edge.i # in Loop: Header=BB10_8 Depth=1 movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf incq %r15 addq $16, %r14 cmpq $2, %r15 jne .LBB10_8 # %bb.11: # %_Z12print_matrixPKiii.exit movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf leaq 32(%rsp), %rdi movl $32, %esi callq hipMalloc movq 32(%rsp), %rdi movl $32, %edx movq %rbx, %rsi movl $1, %ecx callq hipMemcpy leaq 8(%rsp), %rdi movl $32, %esi callq hipMalloc movq 8(%rsp), %rdi movl $32, %edx xorl %esi, %esi callq hipMemset leaq 16(%rsp), %rsi xorl %edi, %edi callq clock_gettime testl %eax, %eax js .LBB10_12 # %bb.13: # %_Z8get_timev.exit movq 16(%rsp), %r12 movq 24(%rsp), %r15 movabsq $4294967297, %rdi # imm = 0x100000001 movabsq $137438953504, %rdx # imm = 0x2000000020 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB10_15 # %bb.14: movq 32(%rsp), %rax movq 8(%rsp), %rcx movq %rax, 96(%rsp) movq %rcx, 88(%rsp) leaq 96(%rsp), %rax movq %rax, 16(%rsp) leaq 88(%rsp), %rax movq %rax, 24(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 16(%rsp), %r9 movl $_Z8trasposePiS_, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB10_15: movq 8(%rsp), %rsi movl $32, %edx movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movq stdout(%rip), %r14 leaq 16(%rsp), %rsi xorl %edi, %edi callq clock_gettime testl %eax, %eax js .LBB10_12 # %bb.16: # %_Z8get_timev.exit11 movq 16(%rsp), %rax subq %r12, %rax movq 24(%rsp), %rcx subq %r15, %rcx imulq $1000000000, %rax, %rax # imm = 0x3B9ACA00 addq %rcx, %rax js .LBB10_17 # %bb.18: # %_Z8get_timev.exit11 cvtsi2ss %rax, %xmm0 jmp .LBB10_19 .LBB10_17: movq %rax, %rcx shrq %rcx andl $1, %eax orq %rcx, %rax cvtsi2ss %rax, %xmm0 addss %xmm0, %xmm0 .LBB10_19: # %_Z8get_timev.exit11 divss .LCPI10_0(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 movl $.L.str.6, %esi movq %r14, %rdi movb $1, %al callq fprintf movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.3, %edx xorl %eax, %eax callq fprintf xorl %r14d, %r14d .p2align 4, 0x90 .LBB10_20: # %.preheader.i12 # =>This Loop Header: Depth=1 # Child Loop BB10_21 Depth 2 xorl %r15d, %r15d .p2align 4, 0x90 .LBB10_21: # Parent Loop BB10_20 Depth=1 # => This Inner Loop Header: Depth=2 movq stdout(%rip), %rdi movl (%rbx,%r15,4), %edx movl $.L.str.4, %esi xorl %eax, %eax callq fprintf incq %r15 cmpq $2, %r15 jne .LBB10_21 # %bb.22: # %._crit_edge.i17 # in Loop: Header=BB10_20 Depth=1 movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf incq %r14 addq $8, %rbx cmpq $4, %r14 jne .LBB10_20 # %bb.23: # %_Z12print_matrixPKiii.exit20 movq stdout(%rip), %rdi movl $.L.str.2, %esi movl $.L.str.5, %edx xorl %eax, %eax callq fprintf movq stdout(%rip), %rdi movl $.L.str.7, %esi movl $1, %edx movl $1, %ecx xorl %eax, %eax callq fprintf addq $104, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB10_12: .cfi_def_cfa_offset 144 movq stderr(%rip), %rbx callq __errno_location movl (%rax), %edi callq strerror movl $.L.str, %esi jmp .LBB10_2 .LBB10_1: movq stderr(%rip), %rbx callq __errno_location movl (%rax), %edi callq strerror movl $.L.str.1, %esi .LBB10_2: movq %rbx, %rdi movq %rax, %rdx xorl %eax, %eax callq fprintf movl $1, %edi callq exit .Lfunc_end10: .size _Z8trasposev, .Lfunc_end10-_Z8trasposev .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 callq _Z8trasposev xorl %eax, %eax popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end11: .size main, .Lfunc_end11-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB12_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB12_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z4copyPiS_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11copy_sharedPiS_, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8trasposePiS_, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z15traspose_sharedPiS_, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14matrixAddPitchPiS_S_i, %esi movl $.L__unnamed_5, %edx movl $.L__unnamed_5, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end12: .size __hip_module_ctor, .Lfunc_end12-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB13_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB13_2: retq .Lfunc_end13: .size __hip_module_dtor, .Lfunc_end13-__hip_module_dtor .cfi_endproc # -- End function .type _Z4copyPiS_,@object # @_Z4copyPiS_ .section .rodata,"a",@progbits .globl _Z4copyPiS_ .p2align 3, 0x0 _Z4copyPiS_: .quad _Z19__device_stub__copyPiS_ .size _Z4copyPiS_, 8 .type _Z11copy_sharedPiS_,@object # @_Z11copy_sharedPiS_ .globl _Z11copy_sharedPiS_ .p2align 3, 0x0 _Z11copy_sharedPiS_: .quad _Z26__device_stub__copy_sharedPiS_ .size _Z11copy_sharedPiS_, 8 .type _Z8trasposePiS_,@object # @_Z8trasposePiS_ .globl _Z8trasposePiS_ .p2align 3, 0x0 _Z8trasposePiS_: .quad _Z23__device_stub__trasposePiS_ .size _Z8trasposePiS_, 8 .type _Z15traspose_sharedPiS_,@object # @_Z15traspose_sharedPiS_ .globl _Z15traspose_sharedPiS_ .p2align 3, 0x0 _Z15traspose_sharedPiS_: .quad _Z30__device_stub__traspose_sharedPiS_ .size _Z15traspose_sharedPiS_, 8 .type _Z14matrixAddPitchPiS_S_i,@object # @_Z14matrixAddPitchPiS_S_i .globl _Z14matrixAddPitchPiS_S_i .p2align 3, 0x0 _Z14matrixAddPitchPiS_S_i: .quad _Z29__device_stub__matrixAddPitchPiS_S_i .size _Z14matrixAddPitchPiS_S_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Error calc time... %s\n" .size .L.str, 23 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Error malloc %s\n" .size .L.str.1, 17 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%s\n" .size .L.str.2, 4 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Print matrix..." .size .L.str.3, 16 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "%5d" .size .L.str.4, 4 .type .L.str.5,@object # @.str.5 .L.str.5: .zero 1 .size .L.str.5, 1 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz "Time : %lf ms\n" .size .L.str.6, 15 .type .L.str.7,@object # @.str.7 .L.str.7: .asciz "Num Blocks (x:%d y:%d)\n" .size .L.str.7, 24 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z4copyPiS_" .size .L__unnamed_1, 12 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z11copy_sharedPiS_" .size .L__unnamed_2, 20 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z8trasposePiS_" .size .L__unnamed_3, 16 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "_Z15traspose_sharedPiS_" .size .L__unnamed_4, 24 .type .L__unnamed_5,@object # @4 .L__unnamed_5: .asciz "_Z14matrixAddPitchPiS_S_i" .size .L__unnamed_5, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z19__device_stub__copyPiS_ .addrsig_sym _Z26__device_stub__copy_sharedPiS_ .addrsig_sym _Z23__device_stub__trasposePiS_ .addrsig_sym _Z30__device_stub__traspose_sharedPiS_ .addrsig_sym _Z29__device_stub__matrixAddPitchPiS_S_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z4copyPiS_ .addrsig_sym _Z11copy_sharedPiS_ .addrsig_sym _Z8trasposePiS_ .addrsig_sym _Z15traspose_sharedPiS_ .addrsig_sym _Z14matrixAddPitchPiS_S_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> __global__ void init_numbers(int *d_numbers, int value, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { d_numbers[index] = index & 1; } } __global__ void local_blelloch_sum( int *d_input, int input_size, int *d_output, int inclusive ) { extern __shared__ int temp[]; int tid = threadIdx.x; int index = tid + blockIdx.x * blockDim.x; // Copy to shared memory. // If index exceeds the input size use zero, so we keep a length which is // a power of two. temp[tid] = (index < input_size) ? d_input[index] : 0; __syncthreads(); // Create a binary tree, that reduces the (local) elements for (int k = 2; k <= blockDim.x; k <<= 1) { if ((tid+1) % k == 0) { int offset = k >> 1; temp[tid] = temp[tid] + temp[tid - offset]; } __syncthreads(); } // Set the last (local) element to zero if (tid == (blockDim.x-1)) { temp[tid] = 0; } __syncthreads(); // Perform downsweep for (int k = blockDim.x; k > 1; k >>= 1) { if ((tid+1) % k == 0) { int offset = k >> 1; float old_value = temp[tid]; int left_child_index = tid - offset; temp[tid] = temp[tid] + temp[left_child_index]; temp[left_child_index] = old_value; } __syncthreads(); } // Copy the results if the index does not execeed the input size if (index < input_size) { if (inclusive) { d_output[index] = (tid == blockDim.x-1) ? temp[tid] + d_input[index] : temp[tid+1]; } else { d_output[index] = temp[tid]; } } } __global__ void add_block_sums( int *d_input, int input_size, int *const d_block_sums ) { // Load the corresponding sum into shared memory __shared__ int block_sum; if (threadIdx.x == 0) { block_sum = d_block_sums[blockIdx.x]; } __syncthreads(); // Assert that the index does not execeed the input size int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < input_size) { // Increment all elements in this block by the corresponding block sum d_input[index] = d_input[index] + block_sum; } } __global__ void gather_every_nth( int *const d_input, int *d_output, int n ) { int index = threadIdx.x + blockIdx.x * blockDim.x; int element_index = (n-1) + (index * n); d_output[index] = d_input[element_index]; } /** * A two staged sum scan that can handle an arbitrary number of * elements between 0 and 1024^2. * * Stage 1: 1024 single Blelloch scans with 1024 elements each * Stage 2: 1 Blelloch scan of the 1024 maximums (as required) */ void sum_scan( int *d_input, int input_size, int *d_output, int inclusive ) { // Always use 1024 threads due to Blelloch only works on lenghts that are a // power of two. const int THREAD_COUNT = 1024; const int BLOCK_COUNT = ceil((double) input_size / (double) THREAD_COUNT); // Execute stage 1 int shared_size = THREAD_COUNT * sizeof(int); local_blelloch_sum<<<BLOCK_COUNT, THREAD_COUNT, shared_size>>>(d_input, input_size, d_output, inclusive); // Execute state 2 (if necessary) if (BLOCK_COUNT > 1) { // Allocate memory for the block sums int *d_block_sums; cudaMalloc((void **) &d_block_sums, sizeof(int) * BLOCK_COUNT); // Gather local maximums gather_every_nth<<<1, BLOCK_COUNT>>>(d_output, d_block_sums, THREAD_COUNT); // Scan the final sums of the blocks shared_size = THREAD_COUNT * sizeof(int); local_blelloch_sum<<<1, THREAD_COUNT, shared_size>>>(d_block_sums, BLOCK_COUNT, d_block_sums, 0); // Add the block sums to the input items add_block_sums<<<BLOCK_COUNT, THREAD_COUNT>>>(d_output, input_size, d_block_sums); // Free block sums cudaFree(d_block_sums); } } int main(int argc, char **argv) { int ELEMENT_COUNT = 4100; // Initialization int *d_numbers, *d_scan_result; cudaMalloc((void **) &d_numbers, ELEMENT_COUNT * sizeof(int)); cudaMalloc((void **) &d_scan_result, ELEMENT_COUNT * sizeof(int)); init_numbers<<<5, 1024>>>(d_numbers, 1, ELEMENT_COUNT); int h_numbers[ELEMENT_COUNT]; cudaMemcpy(h_numbers, d_numbers, ELEMENT_COUNT * sizeof(int), cudaMemcpyDeviceToHost); // Scan // simple_blelloch_scan<<<1, ELEMENT_COUNT, ELEMENT_COUNT * sizeof(float)>>>(d_numbers, d_scan_result, ELEMENT_COUNT); sum_scan(d_numbers, ELEMENT_COUNT, d_scan_result, 1); // Copy result int result[ELEMENT_COUNT]; cudaMemcpy(result, d_scan_result, ELEMENT_COUNT * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < ELEMENT_COUNT; i++) { printf("%i%s", result[i], (i % 20 == 19) ? "\n" : "\t"); } printf("\n"); cudaFree(d_numbers); cudaFree(d_scan_result); cudaDeviceSynchronize(); return 0; }
code for sm_80 Function : _Z16gather_every_nthPiS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */ /* 0x000e220000002100 */ /*0020*/ MOV R5, c[0x0][0x170] ; /* 0x00005c0000057a02 */ /* 0x000fe20000000f00 */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e240000002500 */ /*0050*/ IMAD R4, R3, c[0x0][0x0], R4 ; /* 0x0000000003047a24 */ /* 0x001fc800078e0204 */ /*0060*/ IMAD R0, R4, R5, c[0x0][0x170] ; /* 0x00005c0004007624 */ /* 0x000fe200078e0205 */ /*0070*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fc800000001ff */ /*0080*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */ /* 0x000fcc0007ffe0ff */ /*0090*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fcc00078e0205 */ /*00a0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*00b0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */ /* 0x000fca00078e0205 */ /*00c0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */ /* 0x004fe2000c101904 */ /*00d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z14add_block_sumsPiiS_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */ /* 0x000e220000002100 */ /*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0030*/ BSSY B0, 0xf0 ; /* 0x000000b000007945 */ /* 0x000fe40003800000 */ /*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e620000002500 */ /*0050*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */ /* 0x001fe20003f05270 */ /*0060*/ IMAD R0, R3, c[0x0][0x0], R2 ; /* 0x0000000003007a24 */ /* 0x002fca00078e0202 */ /*0070*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */ /* 0x000fce0003f26270 */ /*0080*/ @P0 BRA 0xe0 ; /* 0x0000005000000947 */ /* 0x000fea0003800000 */ /*0090*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */ /* 0x000e220000002500 */ /*00a0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fd400000001ff */ /*00b0*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x001fcc00078e0003 */ /*00c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */ /* 0x000ea8000c1e1900 */ /*00d0*/ STS [RZ], R2 ; /* 0x00000002ff007388 */ /* 0x0041e40000000800 */ /*00e0*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*00f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0100*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*0110*/ MOV R3, 0x4 ; /* 0x0000000400037802 */ /* 0x000fca0000000f00 */ /*0120*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x001fe400078e0203 */ /*0130*/ LDS R0, [RZ] ; /* 0x00000000ff007984 */ /* 0x000e280000000800 */ /*0140*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */ /* 0x000e24000c1e1900 */ /*0150*/ IADD3 R5, R0, R5, RZ ; /* 0x0000000500057210 */ /* 0x001fca0007ffe0ff */ /*0160*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*0170*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0180*/ BRA 0x180; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z18local_blelloch_sumPiiS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e220000002500 */ /*0020*/ IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0e7624 */ /* 0x000fe200078e00ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0040*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff027624 */ /* 0x000fe200078e00ff */ /*0050*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e220000002100 */ /*0060*/ BSSY B0, 0x120 ; /* 0x000000b000007945 */ /* 0x000fe20003800000 */ /*0070*/ IADD3 R5, R14, -0x1, RZ ; /* 0xffffffff0e057810 */ /* 0x000fe20007ffe0ff */ /*0080*/ IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff077224 */ /* 0x000fe200078e00ff */ /*0090*/ ISETP.GE.U32.AND P2, PT, R2, 0x2, PT ; /* 0x000000020200780c */ /* 0x000fe20003f46070 */ /*00a0*/ IMAD R4, R3, c[0x0][0x0], R0 ; /* 0x0000000003047a24 */ /* 0x001fe200078e0200 */ /*00b0*/ ISETP.NE.AND P1, PT, R0, R5, PT ; /* 0x000000050000720c */ /* 0x000fe20003f25270 */ /*00c0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */ /* 0x000fc600078e00ff */ /*00d0*/ ISETP.GE.AND P0, PT, R4.reuse, c[0x0][0x168], PT ; /* 0x00005a0004007a0c */ /* 0x040fe20003f06270 */ /*00e0*/ IMAD.WIDE R2, R4, R3, c[0x0][0x160] ; /* 0x0000580004027625 */ /* 0x000fd800078e0203 */ /*00f0*/ @P0 BRA 0x110 ; /* 0x0000001000000947 */ /* 0x000fea0003800000 */ /*0100*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */ /* 0x000164000c1e1900 */ /*0110*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0120*/ STS [R0.X4], R7 ; /* 0x0000000700007388 */ /* 0x0203e80000004800 */ /*0130*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fec0000010000 */ /*0140*/ @!P2 BRA 0x390 ; /* 0x000002400000a947 */ /* 0x000fea0003800000 */ /*0150*/ IADD3 R15, R0, 0x1, RZ ; /* 0x00000001000f7810 */ /* 0x002fe20007ffe0ff */ /*0160*/ IMAD.MOV.U32 R6, RZ, RZ, 0x2 ; /* 0x00000002ff067424 */ /* 0x000fc600078e00ff */ /*0170*/ IABS R7, R15 ; /* 0x0000000f00077213 */ /* 0x000fe40000000000 */ /*0180*/ IABS R11, R6.reuse ; /* 0x00000006000b7213 */ /* 0x080fe40000000000 */ /*0190*/ IABS R16, R6 ; /* 0x0000000600107213 */ /* 0x000fe40000000000 */ /*01a0*/ I2F.RP R10, R11 ; /* 0x0000000b000a7306 */ /* 0x000e620000209400 */ /*01b0*/ ISETP.GE.AND P4, PT, R15, RZ, PT ; /* 0x000000ff0f00720c */ /* 0x000fce0003f86270 */ /*01c0*/ MUFU.RCP R10, R10 ; /* 0x0000000a000a7308 */ /* 0x002e640000001000 */ /*01d0*/ IADD3 R8, R10, 0xffffffe, RZ ; /* 0x0ffffffe0a087810 */ /* 0x002fe40007ffe0ff */ /*01e0*/ IABS R10, R15 ; /* 0x0000000f000a7213 */ /* 0x000fc80000000000 */ /*01f0*/ F2I.FTZ.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */ /* 0x0002a4000021f000 */ /*0200*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */ /* 0x002fe400078e00ff */ /*0210*/ IMAD.MOV R12, RZ, RZ, -R9 ; /* 0x000000ffff0c7224 */ /* 0x004fc800078e0a09 */ /*0220*/ IMAD R13, R12, R11, RZ ; /* 0x0000000b0c0d7224 */ /* 0x000fc800078e02ff */ /*0230*/ IMAD.HI.U32 R12, R9, R13, R8 ; /* 0x0000000d090c7227 */ /* 0x000fc800078e0008 */ /*0240*/ IMAD.MOV R9, RZ, RZ, -R16 ; /* 0x000000ffff097224 */ /* 0x000fe400078e0a10 */ /*0250*/ IMAD.HI.U32 R12, R12, R7, RZ ; /* 0x000000070c0c7227 */ /* 0x000fc800078e00ff */ /*0260*/ IMAD R12, R12, R9, R10 ; /* 0x000000090c0c7224 */ /* 0x000fca00078e020a */ /*0270*/ ISETP.GT.U32.AND P2, PT, R11, R12, PT ; /* 0x0000000c0b00720c */ /* 0x000fda0003f44070 */ /*0280*/ @!P2 IMAD.IADD R12, R12, 0x1, -R11 ; /* 0x000000010c0ca824 */ /* 0x000fe200078e0a0b */ /*0290*/ ISETP.NE.AND P2, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fc80003f45270 */ /*02a0*/ ISETP.GT.U32.AND P3, PT, R11, R12, PT ; /* 0x0000000c0b00720c */ /* 0x000fda0003f64070 */ /*02b0*/ @!P3 IMAD.IADD R12, R12, 0x1, -R11 ; /* 0x000000010c0cb824 */ /* 0x000fc800078e0a0b */ /*02c0*/ @!P4 IMAD.MOV R12, RZ, RZ, -R12 ; /* 0x000000ffff0cc224 */ /* 0x000fe200078e0a0c */ /*02d0*/ @!P2 LOP3.LUT R12, RZ, R6, RZ, 0x33, !PT ; /* 0x00000006ff0ca212 */ /* 0x000fc800078e33ff */ /*02e0*/ ISETP.NE.AND P2, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */ /* 0x000fda0003f45270 */ /*02f0*/ @!P2 SHF.R.U32.HI R9, RZ, 0x1, R6 ; /* 0x00000001ff09a819 */ /* 0x000fe20000011606 */ /*0300*/ @!P2 LDS R8, [R0.X4] ; /* 0x000000000008a984 */ /* 0x000fe20000004800 */ /*0310*/ IMAD.SHL.U32 R6, R6, 0x2, RZ ; /* 0x0000000206067824 */ /* 0x000fc600078e00ff */ /*0320*/ @!P2 IMAD.IADD R9, R0, 0x1, -R9 ; /* 0x000000010009a824 */ /* 0x000fcc00078e0a09 */ /*0330*/ @!P2 LDS R9, [R9.X4] ; /* 0x000000000909a984 */ /* 0x000e640000004800 */ /*0340*/ @!P2 IADD3 R11, R9, R8, RZ ; /* 0x00000008090ba210 */ /* 0x002fca0007ffe0ff */ /*0350*/ @!P2 STS [R0.X4], R11 ; /* 0x0000000b0000a388 */ /* 0x0003e80000004800 */ /*0360*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0370*/ ISETP.GT.U32.AND P2, PT, R6, c[0x0][0x0], PT ; /* 0x0000000006007a0c */ /* 0x000fda0003f44070 */ /*0380*/ @!P2 BRA 0x180 ; /* 0xfffffdf00000a947 */ /* 0x002fea000383ffff */ /*0390*/ @!P1 STS [R0.X4], RZ ; /* 0x000000ff00009388 */ /* 0x0023e80000004800 */ /*03a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*03b0*/ ISETP.GE.AND P1, PT, R14, 0x2, PT ; /* 0x000000020e00780c */ /* 0x000fe20003f26270 */ /*03c0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff067624 */ /* 0x000fd800078e00ff */ /*03d0*/ @!P1 BRA 0x670 ; /* 0x0000029000009947 */ /* 0x000fea0003800000 */ /*03e0*/ IADD3 R14, R0, 0x1, RZ ; /* 0x00000001000e7810 */ /* 0x002fc80007ffe0ff */ /*03f0*/ IABS R7, R14 ; /* 0x0000000e00077213 */ /* 0x000fe40000000000 */ /*0400*/ IABS R11, R6.reuse ; /* 0x00000006000b7213 */ /* 0x084fe20000000000 */ /*0410*/ BSSY B0, 0x630 ; /* 0x0000021000007945 */ /* 0x000fe20003800000 */ /*0420*/ IABS R15, R6 ; /* 0x00000006000f7213 */ /* 0x000fe40000000000 */ /*0430*/ I2F.RP R10, R11 ; /* 0x0000000b000a7306 */ /* 0x000e620000209400 */ /*0440*/ ISETP.GE.AND P3, PT, R14, RZ, PT ; /* 0x000000ff0e00720c */ /* 0x000fce0003f66270 */ /*0450*/ MUFU.RCP R10, R10 ; /* 0x0000000a000a7308 */ /* 0x002e640000001000 */ /*0460*/ IADD3 R8, R10, 0xffffffe, RZ ; /* 0x0ffffffe0a087810 */ /* 0x002fe40007ffe0ff */ /*0470*/ IABS R10, R14 ; /* 0x0000000e000a7213 */ /* 0x000fc80000000000 */ /*0480*/ F2I.FTZ.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */ /* 0x0002a4000021f000 */ /*0490*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */ /* 0x002fe400078e00ff */ /*04a0*/ IMAD.MOV R12, RZ, RZ, -R9 ; /* 0x000000ffff0c7224 */ /* 0x004fc800078e0a09 */ /*04b0*/ IMAD R13, R12, R11, RZ ; /* 0x0000000b0c0d7224 */ /* 0x000fc800078e02ff */ /*04c0*/ IMAD.HI.U32 R12, R9, R13, R8 ; /* 0x0000000d090c7227 */ /* 0x000fc800078e0008 */ /*04d0*/ IMAD.MOV R9, RZ, RZ, -R15 ; /* 0x000000ffff097224 */ /* 0x000fe400078e0a0f */ /*04e0*/ IMAD.HI.U32 R12, R12, R7, RZ ; /* 0x000000070c0c7227 */ /* 0x000fc800078e00ff */ /*04f0*/ IMAD R12, R12, R9, R10 ; /* 0x000000090c0c7224 */ /* 0x000fca00078e020a */ /*0500*/ ISETP.GT.U32.AND P1, PT, R11, R12, PT ; /* 0x0000000c0b00720c */ /* 0x000fda0003f24070 */ /*0510*/ @!P1 IMAD.IADD R12, R12, 0x1, -R11 ; /* 0x000000010c0c9824 */ /* 0x000fe200078e0a0b */ /*0520*/ ISETP.NE.AND P1, PT, R6, RZ, PT ; /* 0x000000ff0600720c */ /* 0x000fc80003f25270 */ /*0530*/ ISETP.GT.U32.AND P2, PT, R11, R12, PT ; /* 0x0000000c0b00720c */ /* 0x000fda0003f44070 */ /*0540*/ @!P2 IADD3 R12, R12, -R11, RZ ; /* 0x8000000b0c0ca210 */ /* 0x000fca0007ffe0ff */ /*0550*/ @!P3 IMAD.MOV R12, RZ, RZ, -R12 ; /* 0x000000ffff0cb224 */ /* 0x000fe200078e0a0c */ /*0560*/ @!P1 LOP3.LUT R12, RZ, R6, RZ, 0x33, !PT ; /* 0x00000006ff0c9212 */ /* 0x000fc800078e33ff */ /*0570*/ ISETP.NE.AND P1, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */ /* 0x000fda0003f25270 */ /*0580*/ @P1 BRA 0x620 ; /* 0x0000009000001947 */ /* 0x000fea0003800000 */ /*0590*/ LDS R8, [R0.X4] ; /* 0x0000000000087984 */ /* 0x000e620000004800 */ /*05a0*/ SHF.R.U32.HI R9, RZ, 0x1, R6 ; /* 0x00000001ff097819 */ /* 0x000fca0000011606 */ /*05b0*/ IMAD.IADD R10, R0, 0x1, -R9 ; /* 0x00000001000a7824 */ /* 0x000fca00078e0a09 */ /*05c0*/ LDS R11, [R10.X4] ; /* 0x000000000a0b7984 */ /* 0x000ea20000004800 */ /*05d0*/ I2F R9, R8 ; /* 0x0000000800097306 */ /* 0x002e620000201400 */ /*05e0*/ IMAD.IADD R11, R8, 0x1, R11 ; /* 0x00000001080b7824 */ /* 0x004fce00078e020b */ /*05f0*/ F2I.TRUNC.NTZ R9, R9 ; /* 0x0000000900097305 */ /* 0x002e62000020f100 */ /*0600*/ STS [R0.X4], R11 ; /* 0x0000000b00007388 */ /* 0x0005e80000004800 */ /*0610*/ STS [R10.X4], R9 ; /* 0x000000090a007388 */ /* 0x0025e40000004800 */ /*0620*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0630*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*0640*/ ISETP.GT.U32.AND P1, PT, R6, 0x3, PT ; /* 0x000000030600780c */ /* 0x000fe40003f24070 */ /*0650*/ SHF.R.U32.HI R6, RZ, 0x1, R6 ; /* 0x00000001ff067819 */ /* 0x000fd60000011606 */ /*0660*/ @P1 BRA 0x400 ; /* 0xfffffd9000001947 */ /* 0x000fea000383ffff */ /*0670*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x002fea0003800000 */ /*0680*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x178], PT ; /* 0x00005e00ff007a0c */ /* 0x000fe20003f05270 */ /*0690*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */ /* 0x000fc800078e00ff */ /*06a0*/ IMAD.WIDE R6, R4, R7, c[0x0][0x170] ; /* 0x00005c0004067625 */ /* 0x000fd000078e0207 */ /*06b0*/ @!P0 BRA 0x750 ; /* 0x0000009000008947 */ /* 0x000fea0003800000 */ /*06c0*/ ISETP.NE.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */ /* 0x000fda0003f05270 */ /*06d0*/ @P0 LDS R5, [R0.X4+0x4] ; /* 0x0000040000050984 */ /* 0x000e680000004800 */ /*06e0*/ @P0 STG.E [R6.64], R5 ; /* 0x0000000506000986 */ /* 0x0023e2000c101904 */ /*06f0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0700*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x001ee8000c1e1900 */ /*0710*/ LDS R0, [R0.X4] ; /* 0x0000000000007984 */ /* 0x004ee40000004800 */ /*0720*/ IMAD.IADD R5, R0, 0x1, R3 ; /* 0x0000000100057824 */ /* 0x00afca00078e0203 */ /*0730*/ STG.E [R6.64], R5 ; /* 0x0000000506007986 */ /* 0x000fe2000c101904 */ /*0740*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0750*/ LDS R3, [R0.X4] ; /* 0x0000000000037984 */ /* 0x001e280000004800 */ /*0760*/ STG.E [R6.64], R3 ; /* 0x0000000306007986 */ /* 0x001fe2000c101904 */ /*0770*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0780*/ BRA 0x780; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0790*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*07f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0800*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0810*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0820*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0830*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0840*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0850*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0860*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0870*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z12init_numbersPiii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */ /* 0x000e280000002100 */ /*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e240000002500 */ /*0030*/ IMAD R2, R3, c[0x0][0x0], R2 ; /* 0x0000000003027a24 */ /* 0x001fca00078e0202 */ /*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x16c], PT ; /* 0x00005b0002007a0c */ /* 0x000fda0003f06270 */ /*0050*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x000fe200000001ff */ /*0070*/ LOP3.LUT R5, R2, 0x1, RZ, 0xc0, !PT ; /* 0x0000000102057812 */ /* 0x000fe200078ec0ff */ /*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd00000000a00 */ /*0090*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x000fca00078e0203 */ /*00a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*00b0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> __global__ void init_numbers(int *d_numbers, int value, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { d_numbers[index] = index & 1; } } __global__ void local_blelloch_sum( int *d_input, int input_size, int *d_output, int inclusive ) { extern __shared__ int temp[]; int tid = threadIdx.x; int index = tid + blockIdx.x * blockDim.x; // Copy to shared memory. // If index exceeds the input size use zero, so we keep a length which is // a power of two. temp[tid] = (index < input_size) ? d_input[index] : 0; __syncthreads(); // Create a binary tree, that reduces the (local) elements for (int k = 2; k <= blockDim.x; k <<= 1) { if ((tid+1) % k == 0) { int offset = k >> 1; temp[tid] = temp[tid] + temp[tid - offset]; } __syncthreads(); } // Set the last (local) element to zero if (tid == (blockDim.x-1)) { temp[tid] = 0; } __syncthreads(); // Perform downsweep for (int k = blockDim.x; k > 1; k >>= 1) { if ((tid+1) % k == 0) { int offset = k >> 1; float old_value = temp[tid]; int left_child_index = tid - offset; temp[tid] = temp[tid] + temp[left_child_index]; temp[left_child_index] = old_value; } __syncthreads(); } // Copy the results if the index does not execeed the input size if (index < input_size) { if (inclusive) { d_output[index] = (tid == blockDim.x-1) ? temp[tid] + d_input[index] : temp[tid+1]; } else { d_output[index] = temp[tid]; } } } __global__ void add_block_sums( int *d_input, int input_size, int *const d_block_sums ) { // Load the corresponding sum into shared memory __shared__ int block_sum; if (threadIdx.x == 0) { block_sum = d_block_sums[blockIdx.x]; } __syncthreads(); // Assert that the index does not execeed the input size int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < input_size) { // Increment all elements in this block by the corresponding block sum d_input[index] = d_input[index] + block_sum; } } __global__ void gather_every_nth( int *const d_input, int *d_output, int n ) { int index = threadIdx.x + blockIdx.x * blockDim.x; int element_index = (n-1) + (index * n); d_output[index] = d_input[element_index]; } /** * A two staged sum scan that can handle an arbitrary number of * elements between 0 and 1024^2. * * Stage 1: 1024 single Blelloch scans with 1024 elements each * Stage 2: 1 Blelloch scan of the 1024 maximums (as required) */ void sum_scan( int *d_input, int input_size, int *d_output, int inclusive ) { // Always use 1024 threads due to Blelloch only works on lenghts that are a // power of two. const int THREAD_COUNT = 1024; const int BLOCK_COUNT = ceil((double) input_size / (double) THREAD_COUNT); // Execute stage 1 int shared_size = THREAD_COUNT * sizeof(int); local_blelloch_sum<<<BLOCK_COUNT, THREAD_COUNT, shared_size>>>(d_input, input_size, d_output, inclusive); // Execute state 2 (if necessary) if (BLOCK_COUNT > 1) { // Allocate memory for the block sums int *d_block_sums; cudaMalloc((void **) &d_block_sums, sizeof(int) * BLOCK_COUNT); // Gather local maximums gather_every_nth<<<1, BLOCK_COUNT>>>(d_output, d_block_sums, THREAD_COUNT); // Scan the final sums of the blocks shared_size = THREAD_COUNT * sizeof(int); local_blelloch_sum<<<1, THREAD_COUNT, shared_size>>>(d_block_sums, BLOCK_COUNT, d_block_sums, 0); // Add the block sums to the input items add_block_sums<<<BLOCK_COUNT, THREAD_COUNT>>>(d_output, input_size, d_block_sums); // Free block sums cudaFree(d_block_sums); } } int main(int argc, char **argv) { int ELEMENT_COUNT = 4100; // Initialization int *d_numbers, *d_scan_result; cudaMalloc((void **) &d_numbers, ELEMENT_COUNT * sizeof(int)); cudaMalloc((void **) &d_scan_result, ELEMENT_COUNT * sizeof(int)); init_numbers<<<5, 1024>>>(d_numbers, 1, ELEMENT_COUNT); int h_numbers[ELEMENT_COUNT]; cudaMemcpy(h_numbers, d_numbers, ELEMENT_COUNT * sizeof(int), cudaMemcpyDeviceToHost); // Scan // simple_blelloch_scan<<<1, ELEMENT_COUNT, ELEMENT_COUNT * sizeof(float)>>>(d_numbers, d_scan_result, ELEMENT_COUNT); sum_scan(d_numbers, ELEMENT_COUNT, d_scan_result, 1); // Copy result int result[ELEMENT_COUNT]; cudaMemcpy(result, d_scan_result, ELEMENT_COUNT * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < ELEMENT_COUNT; i++) { printf("%i%s", result[i], (i % 20 == 19) ? "\n" : "\t"); } printf("\n"); cudaFree(d_numbers); cudaFree(d_scan_result); cudaDeviceSynchronize(); return 0; }
.file "tmpxft_00174acb_00000000-6_scan.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z34__device_stub__Z12init_numbersPiiiPiii .type _Z34__device_stub__Z12init_numbersPiiiPiii, @function _Z34__device_stub__Z12init_numbersPiiiPiii: .LFB2083: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z12init_numbersPiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z34__device_stub__Z12init_numbersPiiiPiii, .-_Z34__device_stub__Z12init_numbersPiiiPiii .globl _Z12init_numbersPiii .type _Z12init_numbersPiii, @function _Z12init_numbersPiii: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z34__device_stub__Z12init_numbersPiiiPiii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z12init_numbersPiii, .-_Z12init_numbersPiii .globl _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i .type _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i, @function _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i: .LFB2085: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movl %ecx, 16(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 16(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18local_blelloch_sumPiiS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2085: .size _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i, .-_Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i .globl _Z18local_blelloch_sumPiiS_i .type _Z18local_blelloch_sumPiiS_i, @function _Z18local_blelloch_sumPiiS_i: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _Z18local_blelloch_sumPiiS_i, .-_Z18local_blelloch_sumPiiS_i .globl _Z37__device_stub__Z14add_block_sumsPiiS_PiiS_ .type _Z37__device_stub__Z14add_block_sumsPiiS_PiiS_, @function _Z37__device_stub__Z14add_block_sumsPiiS_PiiS_: .LFB2087: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 120(%rsp), %rax subq %fs:40, %rax jne .L24 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14add_block_sumsPiiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2087: .size _Z37__device_stub__Z14add_block_sumsPiiS_PiiS_, .-_Z37__device_stub__Z14add_block_sumsPiiS_PiiS_ .globl _Z14add_block_sumsPiiS_ .type _Z14add_block_sumsPiiS_, @function _Z14add_block_sumsPiiS_: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z14add_block_sumsPiiS_PiiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _Z14add_block_sumsPiiS_, .-_Z14add_block_sumsPiiS_ .globl _Z39__device_stub__Z16gather_every_nthPiS_iPiS_i .type _Z39__device_stub__Z16gather_every_nthPiS_iPiS_i, @function _Z39__device_stub__Z16gather_every_nthPiS_iPiS_i: .LFB2089: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L31 .L27: movq 120(%rsp), %rax subq %fs:40, %rax jne .L32 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L31: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z16gather_every_nthPiS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L27 .L32: call __stack_chk_fail@PLT .cfi_endproc .LFE2089: .size _Z39__device_stub__Z16gather_every_nthPiS_iPiS_i, .-_Z39__device_stub__Z16gather_every_nthPiS_iPiS_i .globl _Z16gather_every_nthPiS_i .type _Z16gather_every_nthPiS_i, @function _Z16gather_every_nthPiS_i: .LFB2090: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z16gather_every_nthPiS_iPiS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2090: .size _Z16gather_every_nthPiS_i, .-_Z16gather_every_nthPiS_i .globl _Z8sum_scanPiiS_i .type _Z8sum_scanPiiS_i, @function _Z8sum_scanPiiS_i: .LFB2057: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $48, %rsp .cfi_def_cfa_offset 96 movq %rdi, %r12 movl %esi, %ebx movq %rdx, %rbp movl %ecx, %r13d movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax pxor %xmm0, %xmm0 cvtsi2sdl %esi, %xmm0 mulsd .LC0(%rip), %xmm0 movapd %xmm0, %xmm3 movsd .LC4(%rip), %xmm2 movapd %xmm0, %xmm1 andpd %xmm2, %xmm1 movsd .LC1(%rip), %xmm4 ucomisd %xmm1, %xmm4 jbe .L36 cvttsd2siq %xmm0, %rax pxor %xmm1, %xmm1 cvtsi2sdq %rax, %xmm1 cmpnlesd %xmm1, %xmm3 movsd .LC3(%rip), %xmm4 andpd %xmm4, %xmm3 addsd %xmm1, %xmm3 andnpd %xmm0, %xmm2 orpd %xmm2, %xmm3 .L36: cvttsd2sil %xmm3, %r14d movl $1024, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl %r14d, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $4096, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L44 .L37: cmpl $1, %r14d jg .L45 .L35: movq 40(%rsp), %rax subq %fs:40, %rax jne .L46 addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L44: .cfi_restore_state movl %r13d, %ecx movq %rbp, %rdx movl %ebx, %esi movq %r12, %rdi call _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i jmp .L37 .L45: movslq %r14d, %rsi salq $2, %rsi leaq 8(%rsp), %rdi call cudaMalloc@PLT movl %r14d, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L47 .L39: movl $1024, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $4096, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L48 .L40: movl $1024, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl %r14d, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L49 .L41: movq 8(%rsp), %rdi call cudaFree@PLT jmp .L35 .L47: movl $1024, %edx movq 8(%rsp), %rsi movq %rbp, %rdi call _Z39__device_stub__Z16gather_every_nthPiS_iPiS_i jmp .L39 .L48: movq 8(%rsp), %rdi movl $0, %ecx movq %rdi, %rdx movl %r14d, %esi call _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i jmp .L40 .L49: movq 8(%rsp), %rdx movl %ebx, %esi movq %rbp, %rdi call _Z37__device_stub__Z14add_block_sumsPiiS_PiiS_ jmp .L41 .L46: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z8sum_scanPiiS_i, .-_Z8sum_scanPiiS_i .section .rodata.str1.1,"aMS",@progbits,1 .LC5: .string "\n" .LC6: .string "\t" .LC7: .string "%i%s" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movq %rsp, %rbp .cfi_def_cfa_register 6 pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $56, %rsp .cfi_offset 15, -24 .cfi_offset 14, -32 .cfi_offset 13, -40 .cfi_offset 12, -48 .cfi_offset 3, -56 movq %fs:40, %rax movq %rax, -56(%rbp) xorl %eax, %eax leaq -96(%rbp), %rdi movl $16400, %esi call cudaMalloc@PLT leaq -88(%rbp), %rdi movl $16400, %esi call cudaMalloc@PLT movl $1024, -68(%rbp) movl $1, -64(%rbp) movl $1, -60(%rbp) movl $5, -80(%rbp) movl $1, -76(%rbp) movl $1, -72(%rbp) movl $0, %r9d movl $0, %r8d movq -68(%rbp), %rdx movl $1, %ecx movq -80(%rbp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L64 .L51: leaq -16384(%rsp), %rax .L52: cmpq %rax, %rsp je .L53 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L52 .L64: movl $4100, %edx movl $1, %esi movq -96(%rbp), %rdi call _Z34__device_stub__Z12init_numbersPiiiPiii jmp .L51 .L53: subq $16, %rsp orq $0, 8(%rsp) movq %rsp, %rdi movl $2, %ecx movl $16400, %edx movq -96(%rbp), %rsi call cudaMemcpy@PLT movl $1, %ecx movq -88(%rbp), %rdx movl $4100, %esi movq -96(%rbp), %rdi call _Z8sum_scanPiiS_i leaq -16384(%rsp), %rax .L55: cmpq %rax, %rsp je .L56 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L55 .L56: subq $16, %rsp orq $0, 8(%rsp) movq %rsp, %r12 movl $2, %ecx movl $16400, %edx movq -88(%rbp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movl $0, %ebx leaq .LC6(%rip), %r15 leaq .LC5(%rip), %r14 leaq .LC7(%rip), %r13 .L59: movslq %ebx, %rax imulq $1717986919, %rax, %rax sarq $35, %rax movl %ebx, %edx sarl $31, %edx subl %edx, %eax leal (%rax,%rax,4), %eax sall $2, %eax movl %ebx, %edx subl %eax, %edx cmpl $19, %edx movq %r14, %rcx cmovne %r15, %rcx movl (%r12,%rbx,4), %edx movq %r13, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $1, %rbx cmpq $4100, %rbx jne .L59 leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq -96(%rbp), %rdi call cudaFree@PLT movq -88(%rbp), %rdi call cudaFree@PLT call cudaDeviceSynchronize@PLT movq -56(%rbp), %rax subq %fs:40, %rax jne .L65 movl $0, %eax leaq -40(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp .cfi_remember_state .cfi_def_cfa 7, 8 ret .L65: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.1 .LC8: .string "_Z16gather_every_nthPiS_i" .LC9: .string "_Z14add_block_sumsPiiS_" .LC10: .string "_Z18local_blelloch_sumPiiS_i" .LC11: .string "_Z12init_numbersPiii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2092: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _Z16gather_every_nthPiS_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z14add_block_sumsPiiS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC10(%rip), %rdx movq %rdx, %rcx leaq _Z18local_blelloch_sumPiiS_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC11(%rip), %rdx movq %rdx, %rcx leaq _Z12init_numbersPiii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2092: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1062207488 .align 8 .LC1: .long 0 .long 1127219200 .align 8 .LC3: .long 0 .long 1072693248 .align 8 .LC4: .long -1 .long 2147483647 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> __global__ void init_numbers(int *d_numbers, int value, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { d_numbers[index] = index & 1; } } __global__ void local_blelloch_sum( int *d_input, int input_size, int *d_output, int inclusive ) { extern __shared__ int temp[]; int tid = threadIdx.x; int index = tid + blockIdx.x * blockDim.x; // Copy to shared memory. // If index exceeds the input size use zero, so we keep a length which is // a power of two. temp[tid] = (index < input_size) ? d_input[index] : 0; __syncthreads(); // Create a binary tree, that reduces the (local) elements for (int k = 2; k <= blockDim.x; k <<= 1) { if ((tid+1) % k == 0) { int offset = k >> 1; temp[tid] = temp[tid] + temp[tid - offset]; } __syncthreads(); } // Set the last (local) element to zero if (tid == (blockDim.x-1)) { temp[tid] = 0; } __syncthreads(); // Perform downsweep for (int k = blockDim.x; k > 1; k >>= 1) { if ((tid+1) % k == 0) { int offset = k >> 1; float old_value = temp[tid]; int left_child_index = tid - offset; temp[tid] = temp[tid] + temp[left_child_index]; temp[left_child_index] = old_value; } __syncthreads(); } // Copy the results if the index does not execeed the input size if (index < input_size) { if (inclusive) { d_output[index] = (tid == blockDim.x-1) ? temp[tid] + d_input[index] : temp[tid+1]; } else { d_output[index] = temp[tid]; } } } __global__ void add_block_sums( int *d_input, int input_size, int *const d_block_sums ) { // Load the corresponding sum into shared memory __shared__ int block_sum; if (threadIdx.x == 0) { block_sum = d_block_sums[blockIdx.x]; } __syncthreads(); // Assert that the index does not execeed the input size int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < input_size) { // Increment all elements in this block by the corresponding block sum d_input[index] = d_input[index] + block_sum; } } __global__ void gather_every_nth( int *const d_input, int *d_output, int n ) { int index = threadIdx.x + blockIdx.x * blockDim.x; int element_index = (n-1) + (index * n); d_output[index] = d_input[element_index]; } /** * A two staged sum scan that can handle an arbitrary number of * elements between 0 and 1024^2. * * Stage 1: 1024 single Blelloch scans with 1024 elements each * Stage 2: 1 Blelloch scan of the 1024 maximums (as required) */ void sum_scan( int *d_input, int input_size, int *d_output, int inclusive ) { // Always use 1024 threads due to Blelloch only works on lenghts that are a // power of two. const int THREAD_COUNT = 1024; const int BLOCK_COUNT = ceil((double) input_size / (double) THREAD_COUNT); // Execute stage 1 int shared_size = THREAD_COUNT * sizeof(int); local_blelloch_sum<<<BLOCK_COUNT, THREAD_COUNT, shared_size>>>(d_input, input_size, d_output, inclusive); // Execute state 2 (if necessary) if (BLOCK_COUNT > 1) { // Allocate memory for the block sums int *d_block_sums; cudaMalloc((void **) &d_block_sums, sizeof(int) * BLOCK_COUNT); // Gather local maximums gather_every_nth<<<1, BLOCK_COUNT>>>(d_output, d_block_sums, THREAD_COUNT); // Scan the final sums of the blocks shared_size = THREAD_COUNT * sizeof(int); local_blelloch_sum<<<1, THREAD_COUNT, shared_size>>>(d_block_sums, BLOCK_COUNT, d_block_sums, 0); // Add the block sums to the input items add_block_sums<<<BLOCK_COUNT, THREAD_COUNT>>>(d_output, input_size, d_block_sums); // Free block sums cudaFree(d_block_sums); } } int main(int argc, char **argv) { int ELEMENT_COUNT = 4100; // Initialization int *d_numbers, *d_scan_result; cudaMalloc((void **) &d_numbers, ELEMENT_COUNT * sizeof(int)); cudaMalloc((void **) &d_scan_result, ELEMENT_COUNT * sizeof(int)); init_numbers<<<5, 1024>>>(d_numbers, 1, ELEMENT_COUNT); int h_numbers[ELEMENT_COUNT]; cudaMemcpy(h_numbers, d_numbers, ELEMENT_COUNT * sizeof(int), cudaMemcpyDeviceToHost); // Scan // simple_blelloch_scan<<<1, ELEMENT_COUNT, ELEMENT_COUNT * sizeof(float)>>>(d_numbers, d_scan_result, ELEMENT_COUNT); sum_scan(d_numbers, ELEMENT_COUNT, d_scan_result, 1); // Copy result int result[ELEMENT_COUNT]; cudaMemcpy(result, d_scan_result, ELEMENT_COUNT * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < ELEMENT_COUNT; i++) { printf("%i%s", result[i], (i % 20 == 19) ? "\n" : "\t"); } printf("\n"); cudaFree(d_numbers); cudaFree(d_scan_result); cudaDeviceSynchronize(); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void init_numbers(int *d_numbers, int value, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { d_numbers[index] = index & 1; } } __global__ void local_blelloch_sum( int *d_input, int input_size, int *d_output, int inclusive ) { extern __shared__ int temp[]; int tid = threadIdx.x; int index = tid + blockIdx.x * blockDim.x; // Copy to shared memory. // If index exceeds the input size use zero, so we keep a length which is // a power of two. temp[tid] = (index < input_size) ? d_input[index] : 0; __syncthreads(); // Create a binary tree, that reduces the (local) elements for (int k = 2; k <= blockDim.x; k <<= 1) { if ((tid+1) % k == 0) { int offset = k >> 1; temp[tid] = temp[tid] + temp[tid - offset]; } __syncthreads(); } // Set the last (local) element to zero if (tid == (blockDim.x-1)) { temp[tid] = 0; } __syncthreads(); // Perform downsweep for (int k = blockDim.x; k > 1; k >>= 1) { if ((tid+1) % k == 0) { int offset = k >> 1; float old_value = temp[tid]; int left_child_index = tid - offset; temp[tid] = temp[tid] + temp[left_child_index]; temp[left_child_index] = old_value; } __syncthreads(); } // Copy the results if the index does not execeed the input size if (index < input_size) { if (inclusive) { d_output[index] = (tid == blockDim.x-1) ? temp[tid] + d_input[index] : temp[tid+1]; } else { d_output[index] = temp[tid]; } } } __global__ void add_block_sums( int *d_input, int input_size, int *const d_block_sums ) { // Load the corresponding sum into shared memory __shared__ int block_sum; if (threadIdx.x == 0) { block_sum = d_block_sums[blockIdx.x]; } __syncthreads(); // Assert that the index does not execeed the input size int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < input_size) { // Increment all elements in this block by the corresponding block sum d_input[index] = d_input[index] + block_sum; } } __global__ void gather_every_nth( int *const d_input, int *d_output, int n ) { int index = threadIdx.x + blockIdx.x * blockDim.x; int element_index = (n-1) + (index * n); d_output[index] = d_input[element_index]; } /** * A two staged sum scan that can handle an arbitrary number of * elements between 0 and 1024^2. * * Stage 1: 1024 single Blelloch scans with 1024 elements each * Stage 2: 1 Blelloch scan of the 1024 maximums (as required) */ void sum_scan( int *d_input, int input_size, int *d_output, int inclusive ) { // Always use 1024 threads due to Blelloch only works on lenghts that are a // power of two. const int THREAD_COUNT = 1024; const int BLOCK_COUNT = ceil((double) input_size / (double) THREAD_COUNT); // Execute stage 1 int shared_size = THREAD_COUNT * sizeof(int); local_blelloch_sum<<<BLOCK_COUNT, THREAD_COUNT, shared_size>>>(d_input, input_size, d_output, inclusive); // Execute state 2 (if necessary) if (BLOCK_COUNT > 1) { // Allocate memory for the block sums int *d_block_sums; hipMalloc((void **) &d_block_sums, sizeof(int) * BLOCK_COUNT); // Gather local maximums gather_every_nth<<<1, BLOCK_COUNT>>>(d_output, d_block_sums, THREAD_COUNT); // Scan the final sums of the blocks shared_size = THREAD_COUNT * sizeof(int); local_blelloch_sum<<<1, THREAD_COUNT, shared_size>>>(d_block_sums, BLOCK_COUNT, d_block_sums, 0); // Add the block sums to the input items add_block_sums<<<BLOCK_COUNT, THREAD_COUNT>>>(d_output, input_size, d_block_sums); // Free block sums hipFree(d_block_sums); } } int main(int argc, char **argv) { int ELEMENT_COUNT = 4100; // Initialization int *d_numbers, *d_scan_result; hipMalloc((void **) &d_numbers, ELEMENT_COUNT * sizeof(int)); hipMalloc((void **) &d_scan_result, ELEMENT_COUNT * sizeof(int)); init_numbers<<<5, 1024>>>(d_numbers, 1, ELEMENT_COUNT); int h_numbers[ELEMENT_COUNT]; hipMemcpy(h_numbers, d_numbers, ELEMENT_COUNT * sizeof(int), hipMemcpyDeviceToHost); // Scan // simple_blelloch_scan<<<1, ELEMENT_COUNT, ELEMENT_COUNT * sizeof(float)>>>(d_numbers, d_scan_result, ELEMENT_COUNT); sum_scan(d_numbers, ELEMENT_COUNT, d_scan_result, 1); // Copy result int result[ELEMENT_COUNT]; hipMemcpy(result, d_scan_result, ELEMENT_COUNT * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < ELEMENT_COUNT; i++) { printf("%i%s", result[i], (i % 20 == 19) ? "\n" : "\t"); } printf("\n"); hipFree(d_numbers); hipFree(d_scan_result); hipDeviceSynchronize(); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void init_numbers(int *d_numbers, int value, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { d_numbers[index] = index & 1; } } __global__ void local_blelloch_sum( int *d_input, int input_size, int *d_output, int inclusive ) { extern __shared__ int temp[]; int tid = threadIdx.x; int index = tid + blockIdx.x * blockDim.x; // Copy to shared memory. // If index exceeds the input size use zero, so we keep a length which is // a power of two. temp[tid] = (index < input_size) ? d_input[index] : 0; __syncthreads(); // Create a binary tree, that reduces the (local) elements for (int k = 2; k <= blockDim.x; k <<= 1) { if ((tid+1) % k == 0) { int offset = k >> 1; temp[tid] = temp[tid] + temp[tid - offset]; } __syncthreads(); } // Set the last (local) element to zero if (tid == (blockDim.x-1)) { temp[tid] = 0; } __syncthreads(); // Perform downsweep for (int k = blockDim.x; k > 1; k >>= 1) { if ((tid+1) % k == 0) { int offset = k >> 1; float old_value = temp[tid]; int left_child_index = tid - offset; temp[tid] = temp[tid] + temp[left_child_index]; temp[left_child_index] = old_value; } __syncthreads(); } // Copy the results if the index does not execeed the input size if (index < input_size) { if (inclusive) { d_output[index] = (tid == blockDim.x-1) ? temp[tid] + d_input[index] : temp[tid+1]; } else { d_output[index] = temp[tid]; } } } __global__ void add_block_sums( int *d_input, int input_size, int *const d_block_sums ) { // Load the corresponding sum into shared memory __shared__ int block_sum; if (threadIdx.x == 0) { block_sum = d_block_sums[blockIdx.x]; } __syncthreads(); // Assert that the index does not execeed the input size int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < input_size) { // Increment all elements in this block by the corresponding block sum d_input[index] = d_input[index] + block_sum; } } __global__ void gather_every_nth( int *const d_input, int *d_output, int n ) { int index = threadIdx.x + blockIdx.x * blockDim.x; int element_index = (n-1) + (index * n); d_output[index] = d_input[element_index]; } /** * A two staged sum scan that can handle an arbitrary number of * elements between 0 and 1024^2. * * Stage 1: 1024 single Blelloch scans with 1024 elements each * Stage 2: 1 Blelloch scan of the 1024 maximums (as required) */ void sum_scan( int *d_input, int input_size, int *d_output, int inclusive ) { // Always use 1024 threads due to Blelloch only works on lenghts that are a // power of two. const int THREAD_COUNT = 1024; const int BLOCK_COUNT = ceil((double) input_size / (double) THREAD_COUNT); // Execute stage 1 int shared_size = THREAD_COUNT * sizeof(int); local_blelloch_sum<<<BLOCK_COUNT, THREAD_COUNT, shared_size>>>(d_input, input_size, d_output, inclusive); // Execute state 2 (if necessary) if (BLOCK_COUNT > 1) { // Allocate memory for the block sums int *d_block_sums; hipMalloc((void **) &d_block_sums, sizeof(int) * BLOCK_COUNT); // Gather local maximums gather_every_nth<<<1, BLOCK_COUNT>>>(d_output, d_block_sums, THREAD_COUNT); // Scan the final sums of the blocks shared_size = THREAD_COUNT * sizeof(int); local_blelloch_sum<<<1, THREAD_COUNT, shared_size>>>(d_block_sums, BLOCK_COUNT, d_block_sums, 0); // Add the block sums to the input items add_block_sums<<<BLOCK_COUNT, THREAD_COUNT>>>(d_output, input_size, d_block_sums); // Free block sums hipFree(d_block_sums); } } int main(int argc, char **argv) { int ELEMENT_COUNT = 4100; // Initialization int *d_numbers, *d_scan_result; hipMalloc((void **) &d_numbers, ELEMENT_COUNT * sizeof(int)); hipMalloc((void **) &d_scan_result, ELEMENT_COUNT * sizeof(int)); init_numbers<<<5, 1024>>>(d_numbers, 1, ELEMENT_COUNT); int h_numbers[ELEMENT_COUNT]; hipMemcpy(h_numbers, d_numbers, ELEMENT_COUNT * sizeof(int), hipMemcpyDeviceToHost); // Scan // simple_blelloch_scan<<<1, ELEMENT_COUNT, ELEMENT_COUNT * sizeof(float)>>>(d_numbers, d_scan_result, ELEMENT_COUNT); sum_scan(d_numbers, ELEMENT_COUNT, d_scan_result, 1); // Copy result int result[ELEMENT_COUNT]; hipMemcpy(result, d_scan_result, ELEMENT_COUNT * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < ELEMENT_COUNT; i++) { printf("%i%s", result[i], (i % 20 == 19) ? "\n" : "\t"); } printf("\n"); hipFree(d_numbers); hipFree(d_scan_result); hipDeviceSynchronize(); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12init_numbersPiii .globl _Z12init_numbersPiii .p2align 8 .type _Z12init_numbersPiii,@function _Z12init_numbersPiii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b32 s3, s[0:1], 0xc s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s3, v1 s_cbranch_execz .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 v_and_b32_e32 v4, 1, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v3, vcc_lo global_store_b32 v[0:1], v4, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12init_numbersPiii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12init_numbersPiii, .Lfunc_end0-_Z12init_numbersPiii .section .AMDGPU.csdata,"",@progbits .text .protected _Z18local_blelloch_sumPiiS_i .globl _Z18local_blelloch_sumPiiS_i .p2align 8 .type _Z18local_blelloch_sumPiiS_i,@function _Z18local_blelloch_sumPiiS_i: s_clause 0x2 s_load_b32 s3, s[0:1], 0x2c s_load_b32 s2, s[0:1], 0x8 s_load_b64 s[4:5], s[0:1], 0x0 v_mov_b32_e32 v4, 0 s_waitcnt lgkmcnt(0) s_and_b32 s6, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s6, v[0:1] v_cmp_gt_i32_e64 s2, s2, v1 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_2) s_and_saveexec_b32 s7, s2 s_cbranch_execz .LBB1_2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 2, v[1:2] v_add_co_u32 v3, vcc_lo, s4, v3 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo global_load_b32 v4, v[3:4], off .LBB1_2: s_or_b32 exec_lo, exec_lo, s7 v_cmp_lt_u16_e64 s7, s3, 2 v_lshl_add_u32 v3, v0, 2, 0 s_mov_b32 s3, 2 s_delay_alu instid0(VALU_DEP_2) s_and_b32 vcc_lo, exec_lo, s7 s_waitcnt vmcnt(0) ds_store_b32 v3, v4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_vccnz .LBB1_7 v_add_nc_u32_e32 v4, 1, v0 s_branch .LBB1_5 .p2align 6 .LBB1_4: s_or_b32 exec_lo, exec_lo, s7 s_lshl_b32 s3, s3, 1 s_waitcnt lgkmcnt(0) s_cmp_gt_u32 s3, s6 s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB1_7 .LBB1_5: s_add_i32 s7, s3, -1 s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_and_b32_e32 v5, s7, v4 s_mov_b32 s7, exec_lo s_delay_alu instid0(VALU_DEP_1) v_cmpx_eq_u32_e32 0, v5 s_cbranch_execz .LBB1_4 s_lshr_b32 s8, s3, 1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_subrev_nc_u32_e32 v5, s8, v0 v_lshl_add_u32 v5, v5, 2, 0 ds_load_b32 v6, v3 ds_load_b32 v5, v5 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v5, v5, v6 ds_store_b32 v3, v5 s_branch .LBB1_4 .LBB1_7: s_add_i32 s3, s6, -1 s_mov_b32 s7, exec_lo v_cmp_ne_u32_e32 vcc_lo, s3, v0 v_cmpx_eq_u32_e64 s3, v0 s_cbranch_execz .LBB1_9 v_mov_b32_e32 v4, 0 ds_store_b32 v3, v4 .LBB1_9: s_or_b32 exec_lo, exec_lo, s7 s_cmp_lt_u32 s6, 2 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB1_14 v_add_nc_u32_e32 v4, 1, v0 s_delay_alu instid0(VALU_DEP_1) v_cvt_f32_u32_e32 v5, v4 s_set_inst_prefetch_distance 0x1 s_branch .LBB1_12 .p2align 6 .LBB1_11: s_or_b32 exec_lo, exec_lo, s7 s_lshr_b32 s3, s6, 1 s_cmp_lt_u32 s6, 4 s_mov_b32 s6, s3 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB1_14 .LBB1_12: v_cvt_f32_u32_e32 v6, s6 s_mov_b32 s7, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_rcp_iflag_f32_e32 v7, v6 s_waitcnt_depctr 0xfff v_mul_f32_e32 v7, v5, v7 v_trunc_f32_e32 v7, v7 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_fma_f32 v8, -v7, v6, v5 v_cvt_u32_f32_e32 v7, v7 v_cmp_ge_f32_e64 s3, |v8|, |v6| s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_co_ci_u32_e64 v6, s3, 0, v7, s3 v_mul_lo_u32 v6, v6, s6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_sub_nc_u32_e32 v6, v4, v6 v_and_b32_e32 v6, 0xffff, v6 s_delay_alu instid0(VALU_DEP_1) v_cmpx_eq_u32_e32 0, v6 s_cbranch_execz .LBB1_11 s_lshr_b32 s3, s6, 1 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_subrev_nc_u32_e32 v6, s3, v0 v_lshl_add_u32 v6, v6, 2, 0 ds_load_b32 v7, v3 ds_load_b32 v8, v6 s_waitcnt lgkmcnt(1) v_cvt_f32_i32_e32 v9, v7 s_waitcnt lgkmcnt(0) v_add_nc_u32_e32 v7, v8, v7 s_delay_alu instid0(VALU_DEP_2) v_cvt_i32_f32_e32 v8, v9 ds_store_b32 v3, v7 ds_store_b32 v6, v8 s_branch .LBB1_11 .LBB1_14: s_set_inst_prefetch_distance 0x2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB1_23 s_load_b32 s2, s[0:1], 0x18 s_waitcnt lgkmcnt(0) s_cmp_eq_u32 s2, 0 s_cbranch_scc1 .LBB1_24 s_and_saveexec_b32 s2, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s2, exec_lo, s2 s_cbranch_execz .LBB1_18 v_lshl_add_u32 v0, v0, 2, 0 ds_load_b32 v4, v0 offset:4 .LBB1_18: s_and_not1_saveexec_b32 s2, s2 s_cbranch_execz .LBB1_20 s_waitcnt lgkmcnt(0) v_lshlrev_b64 v[4:5], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v4, vcc_lo, s4, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo global_load_b32 v0, v[4:5], off ds_load_b32 v4, v3 s_waitcnt vmcnt(0) lgkmcnt(0) v_add_nc_u32_e32 v4, v0, v4 .LBB1_20: s_or_b32 exec_lo, exec_lo, s2 s_cbranch_execnz .LBB1_22 .LBB1_21: s_waitcnt lgkmcnt(0) ds_load_b32 v4, v3 .LBB1_22: s_load_b64 s[0:1], s[0:1], 0x10 v_lshlrev_b64 v[0:1], 2, v[1:2] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v4, off .LBB1_23: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .LBB1_24: s_branch .LBB1_21 .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z18local_blelloch_sumPiiS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 10 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z18local_blelloch_sumPiiS_i, .Lfunc_end1-_Z18local_blelloch_sumPiiS_i .section .AMDGPU.csdata,"",@progbits .text .protected _Z14add_block_sumsPiiS_ .globl _Z14add_block_sumsPiiS_ .p2align 8 .type _Z14add_block_sumsPiiS_,@function _Z14add_block_sumsPiiS_: s_mov_b32 s2, s15 s_mov_b32 s3, 0 s_mov_b32 s4, exec_lo v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB2_2 s_load_b64 s[6:7], s[0:1], 0x10 s_lshl_b64 s[8:9], s[2:3], 2 s_waitcnt lgkmcnt(0) s_add_u32 s6, s6, s8 s_addc_u32 s7, s7, s9 s_load_b32 s3, s[6:7], 0x0 s_waitcnt lgkmcnt(0) v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s3 ds_store_b32 v1, v2 .LBB2_2: s_or_b32 exec_lo, exec_lo, s4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_clause 0x1 s_load_b32 s3, s[0:1], 0x24 s_load_b32 s4, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s4, v1 s_cbranch_execz .LBB2_4 s_load_b64 s[0:1], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 v_mov_b32_e32 v3, 0 s_delay_alu instid0(VALU_DEP_2) v_lshlrev_b64 v[0:1], 2, v[1:2] ds_load_b32 v3, v3 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b32 v2, v[0:1], off s_waitcnt vmcnt(0) v_add_nc_u32_e32 v2, v3, v2 global_store_b32 v[0:1], v2, off .LBB2_4: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z14add_block_sumsPiiS_ .amdhsa_group_segment_fixed_size 4 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z14add_block_sumsPiiS_, .Lfunc_end2-_Z14add_block_sumsPiiS_ .section .AMDGPU.csdata,"",@progbits .text .protected _Z16gather_every_nthPiS_i .globl _Z16gather_every_nthPiS_i .p2align 8 .type _Z16gather_every_nthPiS_i,@function _Z16gather_every_nthPiS_i: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b32 s4, s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] s_load_b128 s[0:3], s[0:1], 0x0 v_mul_lo_u32 v0, v1, s4 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add3_u32 v2, s4, -1, v0 v_ashrrev_i32_e32 v3, 31, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[2:3] s_waitcnt lgkmcnt(0) v_add_co_u32 v2, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo global_load_b32 v3, v[2:3], off v_ashrrev_i32_e32 v2, 31, v1 v_lshlrev_b64 v[0:1], 2, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s2, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo s_waitcnt vmcnt(0) global_store_b32 v[0:1], v3, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z16gather_every_nthPiS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end3: .size _Z16gather_every_nthPiS_i, .Lfunc_end3-_Z16gather_every_nthPiS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12init_numbersPiii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12init_numbersPiii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims - .offset: 152 .size: 4 .value_kind: hidden_dynamic_lds_size .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z18local_blelloch_sumPiiS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z18local_blelloch_sumPiiS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 10 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 4 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z14add_block_sumsPiiS_ .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z14add_block_sumsPiiS_.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z16gather_every_nthPiS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z16gather_every_nthPiS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void init_numbers(int *d_numbers, int value, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { d_numbers[index] = index & 1; } } __global__ void local_blelloch_sum( int *d_input, int input_size, int *d_output, int inclusive ) { extern __shared__ int temp[]; int tid = threadIdx.x; int index = tid + blockIdx.x * blockDim.x; // Copy to shared memory. // If index exceeds the input size use zero, so we keep a length which is // a power of two. temp[tid] = (index < input_size) ? d_input[index] : 0; __syncthreads(); // Create a binary tree, that reduces the (local) elements for (int k = 2; k <= blockDim.x; k <<= 1) { if ((tid+1) % k == 0) { int offset = k >> 1; temp[tid] = temp[tid] + temp[tid - offset]; } __syncthreads(); } // Set the last (local) element to zero if (tid == (blockDim.x-1)) { temp[tid] = 0; } __syncthreads(); // Perform downsweep for (int k = blockDim.x; k > 1; k >>= 1) { if ((tid+1) % k == 0) { int offset = k >> 1; float old_value = temp[tid]; int left_child_index = tid - offset; temp[tid] = temp[tid] + temp[left_child_index]; temp[left_child_index] = old_value; } __syncthreads(); } // Copy the results if the index does not execeed the input size if (index < input_size) { if (inclusive) { d_output[index] = (tid == blockDim.x-1) ? temp[tid] + d_input[index] : temp[tid+1]; } else { d_output[index] = temp[tid]; } } } __global__ void add_block_sums( int *d_input, int input_size, int *const d_block_sums ) { // Load the corresponding sum into shared memory __shared__ int block_sum; if (threadIdx.x == 0) { block_sum = d_block_sums[blockIdx.x]; } __syncthreads(); // Assert that the index does not execeed the input size int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < input_size) { // Increment all elements in this block by the corresponding block sum d_input[index] = d_input[index] + block_sum; } } __global__ void gather_every_nth( int *const d_input, int *d_output, int n ) { int index = threadIdx.x + blockIdx.x * blockDim.x; int element_index = (n-1) + (index * n); d_output[index] = d_input[element_index]; } /** * A two staged sum scan that can handle an arbitrary number of * elements between 0 and 1024^2. * * Stage 1: 1024 single Blelloch scans with 1024 elements each * Stage 2: 1 Blelloch scan of the 1024 maximums (as required) */ void sum_scan( int *d_input, int input_size, int *d_output, int inclusive ) { // Always use 1024 threads due to Blelloch only works on lenghts that are a // power of two. const int THREAD_COUNT = 1024; const int BLOCK_COUNT = ceil((double) input_size / (double) THREAD_COUNT); // Execute stage 1 int shared_size = THREAD_COUNT * sizeof(int); local_blelloch_sum<<<BLOCK_COUNT, THREAD_COUNT, shared_size>>>(d_input, input_size, d_output, inclusive); // Execute state 2 (if necessary) if (BLOCK_COUNT > 1) { // Allocate memory for the block sums int *d_block_sums; hipMalloc((void **) &d_block_sums, sizeof(int) * BLOCK_COUNT); // Gather local maximums gather_every_nth<<<1, BLOCK_COUNT>>>(d_output, d_block_sums, THREAD_COUNT); // Scan the final sums of the blocks shared_size = THREAD_COUNT * sizeof(int); local_blelloch_sum<<<1, THREAD_COUNT, shared_size>>>(d_block_sums, BLOCK_COUNT, d_block_sums, 0); // Add the block sums to the input items add_block_sums<<<BLOCK_COUNT, THREAD_COUNT>>>(d_output, input_size, d_block_sums); // Free block sums hipFree(d_block_sums); } } int main(int argc, char **argv) { int ELEMENT_COUNT = 4100; // Initialization int *d_numbers, *d_scan_result; hipMalloc((void **) &d_numbers, ELEMENT_COUNT * sizeof(int)); hipMalloc((void **) &d_scan_result, ELEMENT_COUNT * sizeof(int)); init_numbers<<<5, 1024>>>(d_numbers, 1, ELEMENT_COUNT); int h_numbers[ELEMENT_COUNT]; hipMemcpy(h_numbers, d_numbers, ELEMENT_COUNT * sizeof(int), hipMemcpyDeviceToHost); // Scan // simple_blelloch_scan<<<1, ELEMENT_COUNT, ELEMENT_COUNT * sizeof(float)>>>(d_numbers, d_scan_result, ELEMENT_COUNT); sum_scan(d_numbers, ELEMENT_COUNT, d_scan_result, 1); // Copy result int result[ELEMENT_COUNT]; hipMemcpy(result, d_scan_result, ELEMENT_COUNT * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < ELEMENT_COUNT; i++) { printf("%i%s", result[i], (i % 20 == 19) ? "\n" : "\t"); } printf("\n"); hipFree(d_numbers); hipFree(d_scan_result); hipDeviceSynchronize(); return 0; }
.text .file "scan.hip" .globl _Z27__device_stub__init_numbersPiii # -- Begin function _Z27__device_stub__init_numbersPiii .p2align 4, 0x90 .type _Z27__device_stub__init_numbersPiii,@function _Z27__device_stub__init_numbersPiii: # @_Z27__device_stub__init_numbersPiii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) movq %rsp, %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z12init_numbersPiii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z27__device_stub__init_numbersPiii, .Lfunc_end0-_Z27__device_stub__init_numbersPiii .cfi_endproc # -- End function .globl _Z33__device_stub__local_blelloch_sumPiiS_i # -- Begin function _Z33__device_stub__local_blelloch_sumPiiS_i .p2align 4, 0x90 .type _Z33__device_stub__local_blelloch_sumPiiS_i,@function _Z33__device_stub__local_blelloch_sumPiiS_i: # @_Z33__device_stub__local_blelloch_sumPiiS_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18local_blelloch_sumPiiS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z33__device_stub__local_blelloch_sumPiiS_i, .Lfunc_end1-_Z33__device_stub__local_blelloch_sumPiiS_i .cfi_endproc # -- End function .globl _Z29__device_stub__add_block_sumsPiiS_ # -- Begin function _Z29__device_stub__add_block_sumsPiiS_ .p2align 4, 0x90 .type _Z29__device_stub__add_block_sumsPiiS_,@function _Z29__device_stub__add_block_sumsPiiS_: # @_Z29__device_stub__add_block_sumsPiiS_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14add_block_sumsPiiS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size _Z29__device_stub__add_block_sumsPiiS_, .Lfunc_end2-_Z29__device_stub__add_block_sumsPiiS_ .cfi_endproc # -- End function .globl _Z31__device_stub__gather_every_nthPiS_i # -- Begin function _Z31__device_stub__gather_every_nthPiS_i .p2align 4, 0x90 .type _Z31__device_stub__gather_every_nthPiS_i,@function _Z31__device_stub__gather_every_nthPiS_i: # @_Z31__device_stub__gather_every_nthPiS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z16gather_every_nthPiS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end3: .size _Z31__device_stub__gather_every_nthPiS_i, .Lfunc_end3-_Z31__device_stub__gather_every_nthPiS_i .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z8sum_scanPiiS_i .LCPI4_0: .quad 0x3f50000000000000 # double 9.765625E-4 .text .globl _Z8sum_scanPiiS_i .p2align 4, 0x90 .type _Z8sum_scanPiiS_i,@function _Z8sum_scanPiiS_i: # @_Z8sum_scanPiiS_i .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $136, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %ecx, %ebp movq %rdx, 120(%rsp) # 8-byte Spill movl %esi, %ebx movq %rdi, %r13 movabsq $4294968320, %r15 # imm = 0x100000400 cvtsi2sd %esi, %xmm0 mulsd .LCPI4_0(%rip), %xmm0 callq ceil@PLT cvttsd2si %xmm0, %r14d leaq (%r14,%r15), %r12 addq $-1024, %r12 # imm = 0xFC00 movl $4096, %r8d # imm = 0x1000 movq %r12, %rdi movl $1, %esi movq %r15, %rdx movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax je .LBB4_1 # %bb.2: cmpl $2, %r14d jge .LBB4_3 jmp .LBB4_10 .LBB4_1: movq %r13, 64(%rsp) movl %ebx, 72(%rsp) movq 120(%rsp), %rax # 8-byte Reload movq %rax, 56(%rsp) movl %ebp, 4(%rsp) leaq 64(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18local_blelloch_sumPiiS_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 cmpl $2, %r14d jl .LBB4_10 .LBB4_3: leaq (,%r14,4), %rsi leaq 72(%rsp), %rdi callq hipMalloc leaq -1023(%r15), %r13 movq %r13, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_5 # %bb.4: movq 72(%rsp), %rax movq 120(%rsp), %rcx # 8-byte Reload movq %rcx, 64(%rsp) movq %rax, 56(%rsp) movl $1024, 4(%rsp) # imm = 0x400 leaq 64(%rsp), %rax movq %rax, 80(%rsp) leaq 56(%rsp), %rax movq %rax, 88(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z16gather_every_nthPiS_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_5: movl $4096, %r8d # imm = 0x1000 movq %r13, %rdi movl $1, %esi movq %r15, %rdx movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_7 # %bb.6: movq 72(%rsp), %rax movq %rax, 64(%rsp) movl %r14d, 4(%rsp) movq %rax, 56(%rsp) movl $0, 132(%rsp) leaq 64(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 132(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18local_blelloch_sumPiiS_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_7: movq %r12, %rdi movl $1, %esi movq %r15, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_9 # %bb.8: movq 72(%rsp), %rax movq 120(%rsp), %rcx # 8-byte Reload movq %rcx, 64(%rsp) movl %ebx, 4(%rsp) movq %rax, 56(%rsp) leaq 64(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14add_block_sumsPiiS_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_9: movq 72(%rsp), %rdi callq hipFree .LBB4_10: addq $136, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z8sum_scanPiiS_i, .Lfunc_end4-_Z8sum_scanPiiS_i .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset %rbp, -16 movq %rsp, %rbp .cfi_def_cfa_register %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $112, %rsp .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 leaq -40(%rbp), %rdi movl $16400, %esi # imm = 0x4010 callq hipMalloc leaq -48(%rbp), %rdi movl $16400, %esi # imm = 0x4010 callq hipMalloc movabsq $4294967301, %rdi # imm = 0x100000005 leaq 1019(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_2 # %bb.1: movq -40(%rbp), %rax movq %rax, -112(%rbp) movl $1, -56(%rbp) movl $4100, -52(%rbp) # imm = 0x1004 leaq -112(%rbp), %rax movq %rax, -144(%rbp) leaq -56(%rbp), %rax movq %rax, -136(%rbp) leaq -52(%rbp), %rax movq %rax, -128(%rbp) leaq -104(%rbp), %rdi leaq -88(%rbp), %rsi leaq -72(%rbp), %rdx leaq -64(%rbp), %rcx callq __hipPopCallConfiguration movq -104(%rbp), %rsi movl -96(%rbp), %edx movq -88(%rbp), %rcx movl -80(%rbp), %r8d leaq -144(%rbp), %r9 movl $_Z12init_numbersPiii, %edi pushq -64(%rbp) pushq -72(%rbp) callq hipLaunchKernel addq $16, %rsp .LBB5_2: movq %rsp, %r15 leaq -16400(%rsp), %rdi movq %rdi, %rsp movq -40(%rbp), %rsi movl $16400, %edx # imm = 0x4010 movl $2, %ecx callq hipMemcpy movq -40(%rbp), %rdi movq -48(%rbp), %rdx movl $4100, %esi # imm = 0x1004 movl $1, %ecx callq _Z8sum_scanPiiS_i movq %rsp, %rbx addq $-16400, %rbx # imm = 0xBFF0 movq %rbx, %rsp movq -48(%rbp), %rsi movl $16400, %edx # imm = 0x4010 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movabsq $-3689348814741910323, %r12 # imm = 0xCCCCCCCCCCCCCCCD xorl %r14d, %r14d jmp .LBB5_3 .p2align 4, 0x90 .LBB5_5: # in Loop: Header=BB5_3 Depth=1 movl (%rbx,%r14,4), %esi movl $.L.str, %edi xorl %eax, %eax callq printf incq %r14 cmpq $4100, %r14 # imm = 0x1004 je .LBB5_6 .LBB5_3: # =>This Inner Loop Header: Depth=1 movq %r14, %rax mulq %r12 shrq $4, %rdx leal (%rdx,%rdx,4), %eax leal 19(,%rax,4), %eax movl $.L.str.1, %edx cmpl %r14d, %eax je .LBB5_5 # %bb.4: # in Loop: Header=BB5_3 Depth=1 movl $.L.str.2, %edx jmp .LBB5_5 .LBB5_6: movl $10, %edi callq putchar@PLT movq -40(%rbp), %rdi callq hipFree movq -48(%rbp), %rdi callq hipFree callq hipDeviceSynchronize movq %r15, %rsp xorl %eax, %eax leaq -32(%rbp), %rsp popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp .cfi_def_cfa %rsp, 8 retq .Lfunc_end5: .size main, .Lfunc_end5-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12init_numbersPiii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18local_blelloch_sumPiiS_i, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14add_block_sumsPiiS_, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z16gather_every_nthPiS_i, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type _Z12init_numbersPiii,@object # @_Z12init_numbersPiii .section .rodata,"a",@progbits .globl _Z12init_numbersPiii .p2align 3, 0x0 _Z12init_numbersPiii: .quad _Z27__device_stub__init_numbersPiii .size _Z12init_numbersPiii, 8 .type _Z18local_blelloch_sumPiiS_i,@object # @_Z18local_blelloch_sumPiiS_i .globl _Z18local_blelloch_sumPiiS_i .p2align 3, 0x0 _Z18local_blelloch_sumPiiS_i: .quad _Z33__device_stub__local_blelloch_sumPiiS_i .size _Z18local_blelloch_sumPiiS_i, 8 .type _Z14add_block_sumsPiiS_,@object # @_Z14add_block_sumsPiiS_ .globl _Z14add_block_sumsPiiS_ .p2align 3, 0x0 _Z14add_block_sumsPiiS_: .quad _Z29__device_stub__add_block_sumsPiiS_ .size _Z14add_block_sumsPiiS_, 8 .type _Z16gather_every_nthPiS_i,@object # @_Z16gather_every_nthPiS_i .globl _Z16gather_every_nthPiS_i .p2align 3, 0x0 _Z16gather_every_nthPiS_i: .quad _Z31__device_stub__gather_every_nthPiS_i .size _Z16gather_every_nthPiS_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%i%s" .size .L.str, 5 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "\n" .size .L.str.1, 2 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "\t" .size .L.str.2, 2 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z12init_numbersPiii" .size .L__unnamed_1, 21 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z18local_blelloch_sumPiiS_i" .size .L__unnamed_2, 29 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z14add_block_sumsPiiS_" .size .L__unnamed_3, 24 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "_Z16gather_every_nthPiS_i" .size .L__unnamed_4, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__init_numbersPiii .addrsig_sym _Z33__device_stub__local_blelloch_sumPiiS_i .addrsig_sym _Z29__device_stub__add_block_sumsPiiS_ .addrsig_sym _Z31__device_stub__gather_every_nthPiS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12init_numbersPiii .addrsig_sym _Z18local_blelloch_sumPiiS_i .addrsig_sym _Z14add_block_sumsPiiS_ .addrsig_sym _Z16gather_every_nthPiS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00174acb_00000000-6_scan.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z34__device_stub__Z12init_numbersPiiiPiii .type _Z34__device_stub__Z12init_numbersPiiiPiii, @function _Z34__device_stub__Z12init_numbersPiiiPiii: .LFB2083: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movq %rsp, %rax movq %rax, 96(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z12init_numbersPiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z34__device_stub__Z12init_numbersPiiiPiii, .-_Z34__device_stub__Z12init_numbersPiiiPiii .globl _Z12init_numbersPiii .type _Z12init_numbersPiii, @function _Z12init_numbersPiii: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z34__device_stub__Z12init_numbersPiiiPiii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z12init_numbersPiii, .-_Z12init_numbersPiii .globl _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i .type _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i, @function _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i: .LFB2085: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movl %ecx, 16(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 16(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z18local_blelloch_sumPiiS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2085: .size _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i, .-_Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i .globl _Z18local_blelloch_sumPiiS_i .type _Z18local_blelloch_sumPiiS_i, @function _Z18local_blelloch_sumPiiS_i: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _Z18local_blelloch_sumPiiS_i, .-_Z18local_blelloch_sumPiiS_i .globl _Z37__device_stub__Z14add_block_sumsPiiS_PiiS_ .type _Z37__device_stub__Z14add_block_sumsPiiS_PiiS_, @function _Z37__device_stub__Z14add_block_sumsPiiS_PiiS_: .LFB2087: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L23 .L19: movq 120(%rsp), %rax subq %fs:40, %rax jne .L24 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L23: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z14add_block_sumsPiiS_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L19 .L24: call __stack_chk_fail@PLT .cfi_endproc .LFE2087: .size _Z37__device_stub__Z14add_block_sumsPiiS_PiiS_, .-_Z37__device_stub__Z14add_block_sumsPiiS_PiiS_ .globl _Z14add_block_sumsPiiS_ .type _Z14add_block_sumsPiiS_, @function _Z14add_block_sumsPiiS_: .LFB2088: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z14add_block_sumsPiiS_PiiS_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2088: .size _Z14add_block_sumsPiiS_, .-_Z14add_block_sumsPiiS_ .globl _Z39__device_stub__Z16gather_every_nthPiS_iPiS_i .type _Z39__device_stub__Z16gather_every_nthPiS_iPiS_i, @function _Z39__device_stub__Z16gather_every_nthPiS_iPiS_i: .LFB2089: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L31 .L27: movq 120(%rsp), %rax subq %fs:40, %rax jne .L32 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L31: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z16gather_every_nthPiS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L27 .L32: call __stack_chk_fail@PLT .cfi_endproc .LFE2089: .size _Z39__device_stub__Z16gather_every_nthPiS_iPiS_i, .-_Z39__device_stub__Z16gather_every_nthPiS_iPiS_i .globl _Z16gather_every_nthPiS_i .type _Z16gather_every_nthPiS_i, @function _Z16gather_every_nthPiS_i: .LFB2090: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z16gather_every_nthPiS_iPiS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2090: .size _Z16gather_every_nthPiS_i, .-_Z16gather_every_nthPiS_i .globl _Z8sum_scanPiiS_i .type _Z8sum_scanPiiS_i, @function _Z8sum_scanPiiS_i: .LFB2057: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $48, %rsp .cfi_def_cfa_offset 96 movq %rdi, %r12 movl %esi, %ebx movq %rdx, %rbp movl %ecx, %r13d movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax pxor %xmm0, %xmm0 cvtsi2sdl %esi, %xmm0 mulsd .LC0(%rip), %xmm0 movapd %xmm0, %xmm3 movsd .LC4(%rip), %xmm2 movapd %xmm0, %xmm1 andpd %xmm2, %xmm1 movsd .LC1(%rip), %xmm4 ucomisd %xmm1, %xmm4 jbe .L36 cvttsd2siq %xmm0, %rax pxor %xmm1, %xmm1 cvtsi2sdq %rax, %xmm1 cmpnlesd %xmm1, %xmm3 movsd .LC3(%rip), %xmm4 andpd %xmm4, %xmm3 addsd %xmm1, %xmm3 andnpd %xmm0, %xmm2 orpd %xmm2, %xmm3 .L36: cvttsd2sil %xmm3, %r14d movl $1024, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl %r14d, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $4096, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L44 .L37: cmpl $1, %r14d jg .L45 .L35: movq 40(%rsp), %rax subq %fs:40, %rax jne .L46 addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L44: .cfi_restore_state movl %r13d, %ecx movq %rbp, %rdx movl %ebx, %esi movq %r12, %rdi call _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i jmp .L37 .L45: movslq %r14d, %rsi salq $2, %rsi leaq 8(%rsp), %rdi call cudaMalloc@PLT movl %r14d, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L47 .L39: movl $1024, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $4096, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L48 .L40: movl $1024, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl %r14d, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L49 .L41: movq 8(%rsp), %rdi call cudaFree@PLT jmp .L35 .L47: movl $1024, %edx movq 8(%rsp), %rsi movq %rbp, %rdi call _Z39__device_stub__Z16gather_every_nthPiS_iPiS_i jmp .L39 .L48: movq 8(%rsp), %rdi movl $0, %ecx movq %rdi, %rdx movl %r14d, %esi call _Z42__device_stub__Z18local_blelloch_sumPiiS_iPiiS_i jmp .L40 .L49: movq 8(%rsp), %rdx movl %ebx, %esi movq %rbp, %rdi call _Z37__device_stub__Z14add_block_sumsPiiS_PiiS_ jmp .L41 .L46: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z8sum_scanPiiS_i, .-_Z8sum_scanPiiS_i .section .rodata.str1.1,"aMS",@progbits,1 .LC5: .string "\n" .LC6: .string "\t" .LC7: .string "%i%s" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movq %rsp, %rbp .cfi_def_cfa_register 6 pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $56, %rsp .cfi_offset 15, -24 .cfi_offset 14, -32 .cfi_offset 13, -40 .cfi_offset 12, -48 .cfi_offset 3, -56 movq %fs:40, %rax movq %rax, -56(%rbp) xorl %eax, %eax leaq -96(%rbp), %rdi movl $16400, %esi call cudaMalloc@PLT leaq -88(%rbp), %rdi movl $16400, %esi call cudaMalloc@PLT movl $1024, -68(%rbp) movl $1, -64(%rbp) movl $1, -60(%rbp) movl $5, -80(%rbp) movl $1, -76(%rbp) movl $1, -72(%rbp) movl $0, %r9d movl $0, %r8d movq -68(%rbp), %rdx movl $1, %ecx movq -80(%rbp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L64 .L51: leaq -16384(%rsp), %rax .L52: cmpq %rax, %rsp je .L53 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L52 .L64: movl $4100, %edx movl $1, %esi movq -96(%rbp), %rdi call _Z34__device_stub__Z12init_numbersPiiiPiii jmp .L51 .L53: subq $16, %rsp orq $0, 8(%rsp) movq %rsp, %rdi movl $2, %ecx movl $16400, %edx movq -96(%rbp), %rsi call cudaMemcpy@PLT movl $1, %ecx movq -88(%rbp), %rdx movl $4100, %esi movq -96(%rbp), %rdi call _Z8sum_scanPiiS_i leaq -16384(%rsp), %rax .L55: cmpq %rax, %rsp je .L56 subq $4096, %rsp orq $0, 4088(%rsp) jmp .L55 .L56: subq $16, %rsp orq $0, 8(%rsp) movq %rsp, %r12 movl $2, %ecx movl $16400, %edx movq -88(%rbp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movl $0, %ebx leaq .LC6(%rip), %r15 leaq .LC5(%rip), %r14 leaq .LC7(%rip), %r13 .L59: movslq %ebx, %rax imulq $1717986919, %rax, %rax sarq $35, %rax movl %ebx, %edx sarl $31, %edx subl %edx, %eax leal (%rax,%rax,4), %eax sall $2, %eax movl %ebx, %edx subl %eax, %edx cmpl $19, %edx movq %r14, %rcx cmovne %r15, %rcx movl (%r12,%rbx,4), %edx movq %r13, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $1, %rbx cmpq $4100, %rbx jne .L59 leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq -96(%rbp), %rdi call cudaFree@PLT movq -88(%rbp), %rdi call cudaFree@PLT call cudaDeviceSynchronize@PLT movq -56(%rbp), %rax subq %fs:40, %rax jne .L65 movl $0, %eax leaq -40(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp .cfi_remember_state .cfi_def_cfa 7, 8 ret .L65: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.1 .LC8: .string "_Z16gather_every_nthPiS_i" .LC9: .string "_Z14add_block_sumsPiiS_" .LC10: .string "_Z18local_blelloch_sumPiiS_i" .LC11: .string "_Z12init_numbersPiii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2092: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _Z16gather_every_nthPiS_i(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z14add_block_sumsPiiS_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC10(%rip), %rdx movq %rdx, %rcx leaq _Z18local_blelloch_sumPiiS_i(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC11(%rip), %rdx movq %rdx, %rcx leaq _Z12init_numbersPiii(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2092: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1062207488 .align 8 .LC1: .long 0 .long 1127219200 .align 8 .LC3: .long 0 .long 1072693248 .align 8 .LC4: .long -1 .long 2147483647 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "scan.hip" .globl _Z27__device_stub__init_numbersPiii # -- Begin function _Z27__device_stub__init_numbersPiii .p2align 4, 0x90 .type _Z27__device_stub__init_numbersPiii,@function _Z27__device_stub__init_numbersPiii: # @_Z27__device_stub__init_numbersPiii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) movl %edx, (%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) movq %rsp, %rax movq %rax, 80(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z12init_numbersPiii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z27__device_stub__init_numbersPiii, .Lfunc_end0-_Z27__device_stub__init_numbersPiii .cfi_endproc # -- End function .globl _Z33__device_stub__local_blelloch_sumPiiS_i # -- Begin function _Z33__device_stub__local_blelloch_sumPiiS_i .p2align 4, 0x90 .type _Z33__device_stub__local_blelloch_sumPiiS_i,@function _Z33__device_stub__local_blelloch_sumPiiS_i: # @_Z33__device_stub__local_blelloch_sumPiiS_i .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) movl %ecx, 8(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18local_blelloch_sumPiiS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end1: .size _Z33__device_stub__local_blelloch_sumPiiS_i, .Lfunc_end1-_Z33__device_stub__local_blelloch_sumPiiS_i .cfi_endproc # -- End function .globl _Z29__device_stub__add_block_sumsPiiS_ # -- Begin function _Z29__device_stub__add_block_sumsPiiS_ .p2align 4, 0x90 .type _Z29__device_stub__add_block_sumsPiiS_,@function _Z29__device_stub__add_block_sumsPiiS_: # @_Z29__device_stub__add_block_sumsPiiS_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movq %rdx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 64(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14add_block_sumsPiiS_, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size _Z29__device_stub__add_block_sumsPiiS_, .Lfunc_end2-_Z29__device_stub__add_block_sumsPiiS_ .cfi_endproc # -- End function .globl _Z31__device_stub__gather_every_nthPiS_i # -- Begin function _Z31__device_stub__gather_every_nthPiS_i .p2align 4, 0x90 .type _Z31__device_stub__gather_every_nthPiS_i,@function _Z31__device_stub__gather_every_nthPiS_i: # @_Z31__device_stub__gather_every_nthPiS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z16gather_every_nthPiS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end3: .size _Z31__device_stub__gather_every_nthPiS_i, .Lfunc_end3-_Z31__device_stub__gather_every_nthPiS_i .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z8sum_scanPiiS_i .LCPI4_0: .quad 0x3f50000000000000 # double 9.765625E-4 .text .globl _Z8sum_scanPiiS_i .p2align 4, 0x90 .type _Z8sum_scanPiiS_i,@function _Z8sum_scanPiiS_i: # @_Z8sum_scanPiiS_i .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $136, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %ecx, %ebp movq %rdx, 120(%rsp) # 8-byte Spill movl %esi, %ebx movq %rdi, %r13 movabsq $4294968320, %r15 # imm = 0x100000400 cvtsi2sd %esi, %xmm0 mulsd .LCPI4_0(%rip), %xmm0 callq ceil@PLT cvttsd2si %xmm0, %r14d leaq (%r14,%r15), %r12 addq $-1024, %r12 # imm = 0xFC00 movl $4096, %r8d # imm = 0x1000 movq %r12, %rdi movl $1, %esi movq %r15, %rdx movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax je .LBB4_1 # %bb.2: cmpl $2, %r14d jge .LBB4_3 jmp .LBB4_10 .LBB4_1: movq %r13, 64(%rsp) movl %ebx, 72(%rsp) movq 120(%rsp), %rax # 8-byte Reload movq %rax, 56(%rsp) movl %ebp, 4(%rsp) leaq 64(%rsp), %rax movq %rax, 80(%rsp) leaq 72(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18local_blelloch_sumPiiS_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 cmpl $2, %r14d jl .LBB4_10 .LBB4_3: leaq (,%r14,4), %rsi leaq 72(%rsp), %rdi callq hipMalloc leaq -1023(%r15), %r13 movq %r13, %rdi movl $1, %esi movq %r12, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_5 # %bb.4: movq 72(%rsp), %rax movq 120(%rsp), %rcx # 8-byte Reload movq %rcx, 64(%rsp) movq %rax, 56(%rsp) movl $1024, 4(%rsp) # imm = 0x400 leaq 64(%rsp), %rax movq %rax, 80(%rsp) leaq 56(%rsp), %rax movq %rax, 88(%rsp) leaq 4(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z16gather_every_nthPiS_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_5: movl $4096, %r8d # imm = 0x1000 movq %r13, %rdi movl $1, %esi movq %r15, %rdx movl $1, %ecx xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_7 # %bb.6: movq 72(%rsp), %rax movq %rax, 64(%rsp) movl %r14d, 4(%rsp) movq %rax, 56(%rsp) movl $0, 132(%rsp) leaq 64(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 132(%rsp), %rax movq %rax, 104(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z18local_blelloch_sumPiiS_i, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_7: movq %r12, %rdi movl $1, %esi movq %r15, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB4_9 # %bb.8: movq 72(%rsp), %rax movq 120(%rsp), %rcx # 8-byte Reload movq %rcx, 64(%rsp) movl %ebx, 4(%rsp) movq %rax, 56(%rsp) leaq 64(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z14add_block_sumsPiiS_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB4_9: movq 72(%rsp), %rdi callq hipFree .LBB4_10: addq $136, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z8sum_scanPiiS_i, .Lfunc_end4-_Z8sum_scanPiiS_i .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset %rbp, -16 movq %rsp, %rbp .cfi_def_cfa_register %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $112, %rsp .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 leaq -40(%rbp), %rdi movl $16400, %esi # imm = 0x4010 callq hipMalloc leaq -48(%rbp), %rdi movl $16400, %esi # imm = 0x4010 callq hipMalloc movabsq $4294967301, %rdi # imm = 0x100000005 leaq 1019(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_2 # %bb.1: movq -40(%rbp), %rax movq %rax, -112(%rbp) movl $1, -56(%rbp) movl $4100, -52(%rbp) # imm = 0x1004 leaq -112(%rbp), %rax movq %rax, -144(%rbp) leaq -56(%rbp), %rax movq %rax, -136(%rbp) leaq -52(%rbp), %rax movq %rax, -128(%rbp) leaq -104(%rbp), %rdi leaq -88(%rbp), %rsi leaq -72(%rbp), %rdx leaq -64(%rbp), %rcx callq __hipPopCallConfiguration movq -104(%rbp), %rsi movl -96(%rbp), %edx movq -88(%rbp), %rcx movl -80(%rbp), %r8d leaq -144(%rbp), %r9 movl $_Z12init_numbersPiii, %edi pushq -64(%rbp) pushq -72(%rbp) callq hipLaunchKernel addq $16, %rsp .LBB5_2: movq %rsp, %r15 leaq -16400(%rsp), %rdi movq %rdi, %rsp movq -40(%rbp), %rsi movl $16400, %edx # imm = 0x4010 movl $2, %ecx callq hipMemcpy movq -40(%rbp), %rdi movq -48(%rbp), %rdx movl $4100, %esi # imm = 0x1004 movl $1, %ecx callq _Z8sum_scanPiiS_i movq %rsp, %rbx addq $-16400, %rbx # imm = 0xBFF0 movq %rbx, %rsp movq -48(%rbp), %rsi movl $16400, %edx # imm = 0x4010 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy movabsq $-3689348814741910323, %r12 # imm = 0xCCCCCCCCCCCCCCCD xorl %r14d, %r14d jmp .LBB5_3 .p2align 4, 0x90 .LBB5_5: # in Loop: Header=BB5_3 Depth=1 movl (%rbx,%r14,4), %esi movl $.L.str, %edi xorl %eax, %eax callq printf incq %r14 cmpq $4100, %r14 # imm = 0x1004 je .LBB5_6 .LBB5_3: # =>This Inner Loop Header: Depth=1 movq %r14, %rax mulq %r12 shrq $4, %rdx leal (%rdx,%rdx,4), %eax leal 19(,%rax,4), %eax movl $.L.str.1, %edx cmpl %r14d, %eax je .LBB5_5 # %bb.4: # in Loop: Header=BB5_3 Depth=1 movl $.L.str.2, %edx jmp .LBB5_5 .LBB5_6: movl $10, %edi callq putchar@PLT movq -40(%rbp), %rdi callq hipFree movq -48(%rbp), %rdi callq hipFree callq hipDeviceSynchronize movq %r15, %rsp xorl %eax, %eax leaq -32(%rbp), %rsp popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp .cfi_def_cfa %rsp, 8 retq .Lfunc_end5: .size main, .Lfunc_end5-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12init_numbersPiii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z18local_blelloch_sumPiiS_i, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z14add_block_sumsPiiS_, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z16gather_every_nthPiS_i, %esi movl $.L__unnamed_4, %edx movl $.L__unnamed_4, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type _Z12init_numbersPiii,@object # @_Z12init_numbersPiii .section .rodata,"a",@progbits .globl _Z12init_numbersPiii .p2align 3, 0x0 _Z12init_numbersPiii: .quad _Z27__device_stub__init_numbersPiii .size _Z12init_numbersPiii, 8 .type _Z18local_blelloch_sumPiiS_i,@object # @_Z18local_blelloch_sumPiiS_i .globl _Z18local_blelloch_sumPiiS_i .p2align 3, 0x0 _Z18local_blelloch_sumPiiS_i: .quad _Z33__device_stub__local_blelloch_sumPiiS_i .size _Z18local_blelloch_sumPiiS_i, 8 .type _Z14add_block_sumsPiiS_,@object # @_Z14add_block_sumsPiiS_ .globl _Z14add_block_sumsPiiS_ .p2align 3, 0x0 _Z14add_block_sumsPiiS_: .quad _Z29__device_stub__add_block_sumsPiiS_ .size _Z14add_block_sumsPiiS_, 8 .type _Z16gather_every_nthPiS_i,@object # @_Z16gather_every_nthPiS_i .globl _Z16gather_every_nthPiS_i .p2align 3, 0x0 _Z16gather_every_nthPiS_i: .quad _Z31__device_stub__gather_every_nthPiS_i .size _Z16gather_every_nthPiS_i, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "%i%s" .size .L.str, 5 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "\n" .size .L.str.1, 2 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "\t" .size .L.str.2, 2 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z12init_numbersPiii" .size .L__unnamed_1, 21 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z18local_blelloch_sumPiiS_i" .size .L__unnamed_2, 29 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z14add_block_sumsPiiS_" .size .L__unnamed_3, 24 .type .L__unnamed_4,@object # @3 .L__unnamed_4: .asciz "_Z16gather_every_nthPiS_i" .size .L__unnamed_4, 26 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__init_numbersPiii .addrsig_sym _Z33__device_stub__local_blelloch_sumPiiS_i .addrsig_sym _Z29__device_stub__add_block_sumsPiiS_ .addrsig_sym _Z31__device_stub__gather_every_nthPiS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12init_numbersPiii .addrsig_sym _Z18local_blelloch_sumPiiS_i .addrsig_sym _Z14add_block_sumsPiiS_ .addrsig_sym _Z16gather_every_nthPiS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/* kernel.cu Holds the kernel for the main program */ #include <iostream> #define BLOCK_WIDTH 32 #define cuda_check_errors(val) check( (val), #val, __FILE__, __LINE__) using namespace std; /* Reports the location of the occured error and exits the program */ template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != cudaSuccess) { cerr << "CUDA error at: " << file << ":" << line << endl; cerr << cudaGetErrorString(err) << " " << func << endl; exit(1); } } /* The primary kernel (heart of the program!) Each pixel p in the RGBA image is a struct of four unsigned chars: - p.x Which is the red channel number. - p.y The green channel. - p.z The blue channel. - p.w The alpha channel (which we ignore). For each greyscale pixel to be created we calculate this formula: p = .299*p.x + .587*p.y + .114*p.z The output is a single char because we only have one channel. In the kernel each thread is responsible for calculating the mentioned formula for each pixel and the put the result into the greyscale image placehold. First we find out where the thread (pixel) is and then we do the above. */ __global__ void rgba_to_grey(uchar4 *const d_rgba, unsigned char *const d_grey, size_t rows, size_t cols) { size_t j = blockIdx.y * blockDim.y + threadIdx.y; size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= rows || j >= cols) return; uchar4 p = d_rgba[i * cols + j]; d_grey[i * cols + j] = (unsigned char) (0.299f * p.x + 0.587f * p.y + 0.114f * p.z); } /* The image is divided into number of blocks. Each block holds BLOCK_WIDTH*BLOCK_WIDTH threads and in total we have (rows/BLOCK_WIDTH)*(cols/BLOCK_WIDTH) blocks. */ void rgba_to_grey_launcher(uchar4 *const d_rgba, unsigned char *const d_grey, size_t rows, size_t cols) { const dim3 block_size (BLOCK_WIDTH, BLOCK_WIDTH, 1); unsigned int grid_x = (unsigned int) (rows / BLOCK_WIDTH + 1); unsigned int grid_y = (unsigned int) (cols / BLOCK_WIDTH + 1); const dim3 grid_size (grid_x, grid_y, 1); rgba_to_grey<<<grid_size, block_size>>>(d_rgba, d_grey, rows, cols); cudaDeviceSynchronize(); cuda_check_errors(cudaGetLastError()); }
code for sm_80 Function : _Z12rgba_to_greyP6uchar4Phmm .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002600 */ /*0020*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002200 */ /*0030*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */ /* 0x000e680000002500 */ /*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R2, R2, c[0x0][0x4], R3 ; /* 0x0000010002027a24 */ /* 0x001fca00078e0203 */ /*0060*/ ISETP.GE.U32.AND P1, PT, R2, c[0x0][0x178], PT ; /* 0x00005e0002007a0c */ /* 0x000fe20003f26070 */ /*0070*/ IMAD R5, R5, c[0x0][0x0], R0 ; /* 0x0000000005057a24 */ /* 0x002fc600078e0200 */ /*0080*/ ISETP.GE.U32.AND.EX P1, PT, RZ, c[0x0][0x17c], PT, P1 ; /* 0x00005f00ff007a0c */ /* 0x000fe40003f26110 */ /*0090*/ ISETP.GE.U32.AND P0, PT, R5, c[0x0][0x170], PT ; /* 0x00005c0005007a0c */ /* 0x000fc80003f06070 */ /*00a0*/ ISETP.GE.U32.OR.EX P0, PT, RZ, c[0x0][0x174], P1, P0 ; /* 0x00005d00ff007a0c */ /* 0x000fda0000f06500 */ /*00b0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00c0*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */ /* 0x000fe200000001ff */ /*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*00e0*/ IMAD.WIDE.U32 R2, R5, c[0x0][0x178], R2 ; /* 0x00005e0005027a25 */ /* 0x000fc800078e0002 */ /*00f0*/ IMAD R9, R5, c[0x0][0x17c], R3 ; /* 0x00005f0005097a24 */ /* 0x000fe200078e0203 */ /*0100*/ LEA R4, P0, R2, c[0x0][0x160], 0x2 ; /* 0x0000580002047a11 */ /* 0x000fc800078010ff */ /*0110*/ LEA.HI.X R5, R2, c[0x0][0x164], R9, 0x2, P0 ; /* 0x0000590002057a11 */ /* 0x000fca00000f1409 */ /*0120*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*0130*/ IADD3 R2, P0, R2, c[0x0][0x168], RZ ; /* 0x00005a0002027a10 */ /* 0x000fe20007f1e0ff */ /*0140*/ I2F.U8 R3, R4.B1 ; /* 0x1000000400037306 */ /* 0x004e300000001000 */ /*0150*/ I2F.U8 R0, R4 ; /* 0x0000000400007306 */ /* 0x000e700000001000 */ /*0160*/ I2F.U8 R7, R4.B2 ; /* 0x2000000400077306 */ /* 0x000ea20000001000 */ /*0170*/ FMUL R3, R3, 0.58700001239776611328 ; /* 0x3f1645a203037820 */ /* 0x001fc80000400000 */ /*0180*/ FFMA R0, R0, 0.29899999499320983887, R3 ; /* 0x3e99168700007823 */ /* 0x002fe20000000003 */ /*0190*/ IADD3.X R3, R9, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b0009037a10 */ /* 0x000fc600007fe4ff */ /*01a0*/ FFMA R0, R7, 0.11400000005960464478, R0 ; /* 0x3de978d507007823 */ /* 0x004fc80000000000 */ /*01b0*/ F2I.U32.TRUNC.NTZ R7, R0 ; /* 0x0000000000077305 */ /* 0x000e24000020f000 */ /*01c0*/ STG.E.U8 [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x001fe2000c101104 */ /*01d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* kernel.cu Holds the kernel for the main program */ #include <iostream> #define BLOCK_WIDTH 32 #define cuda_check_errors(val) check( (val), #val, __FILE__, __LINE__) using namespace std; /* Reports the location of the occured error and exits the program */ template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != cudaSuccess) { cerr << "CUDA error at: " << file << ":" << line << endl; cerr << cudaGetErrorString(err) << " " << func << endl; exit(1); } } /* The primary kernel (heart of the program!) Each pixel p in the RGBA image is a struct of four unsigned chars: - p.x Which is the red channel number. - p.y The green channel. - p.z The blue channel. - p.w The alpha channel (which we ignore). For each greyscale pixel to be created we calculate this formula: p = .299*p.x + .587*p.y + .114*p.z The output is a single char because we only have one channel. In the kernel each thread is responsible for calculating the mentioned formula for each pixel and the put the result into the greyscale image placehold. First we find out where the thread (pixel) is and then we do the above. */ __global__ void rgba_to_grey(uchar4 *const d_rgba, unsigned char *const d_grey, size_t rows, size_t cols) { size_t j = blockIdx.y * blockDim.y + threadIdx.y; size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= rows || j >= cols) return; uchar4 p = d_rgba[i * cols + j]; d_grey[i * cols + j] = (unsigned char) (0.299f * p.x + 0.587f * p.y + 0.114f * p.z); } /* The image is divided into number of blocks. Each block holds BLOCK_WIDTH*BLOCK_WIDTH threads and in total we have (rows/BLOCK_WIDTH)*(cols/BLOCK_WIDTH) blocks. */ void rgba_to_grey_launcher(uchar4 *const d_rgba, unsigned char *const d_grey, size_t rows, size_t cols) { const dim3 block_size (BLOCK_WIDTH, BLOCK_WIDTH, 1); unsigned int grid_x = (unsigned int) (rows / BLOCK_WIDTH + 1); unsigned int grid_y = (unsigned int) (cols / BLOCK_WIDTH + 1); const dim3 grid_size (grid_x, grid_y, 1); rgba_to_grey<<<grid_size, block_size>>>(d_rgba, d_grey, rows, cols); cudaDeviceSynchronize(); cuda_check_errors(cudaGetLastError()); }
.file "tmpxft_0004a10d_00000000-6_kernel.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3673: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3673: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm .type _Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm, @function _Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm: .LFB3695: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12rgba_to_greyP6uchar4Phmm(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3695: .size _Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm, .-_Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm .globl _Z12rgba_to_greyP6uchar4Phmm .type _Z12rgba_to_greyP6uchar4Phmm, @function _Z12rgba_to_greyP6uchar4Phmm: .LFB3696: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3696: .size _Z12rgba_to_greyP6uchar4Phmm, .-_Z12rgba_to_greyP6uchar4Phmm .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "CUDA error at: " .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "/home/ubuntu/Datasets/stackv2/train-structured/mhezarei/CUDA-RGB-grey/master/kernel.cu" .section .rodata.str1.1 .LC2: .string ":" .LC3: .string " " .LC4: .string "cudaGetLastError()" .text .globl _Z21rgba_to_grey_launcherP6uchar4Phmm .type _Z21rgba_to_grey_launcherP6uchar4Phmm, @function _Z21rgba_to_grey_launcherP6uchar4Phmm: .LFB3670: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $40, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r12 movq %rsi, %r13 movq %rdx, %rbx movq %rcx, %rbp movl $32, 8(%rsp) movl $32, 12(%rsp) movq %rdx, %rax shrq $5, %rax addl $1, %eax movl %eax, 20(%rsp) movq %rcx, %rax shrq $5, %rax addl $1, %eax movl %eax, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 8(%rsp), %rdx movl $1, %ecx movq 20(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L12: call cudaDeviceSynchronize@PLT call cudaGetLastError@PLT movl %eax, %ebx testl %eax, %eax jne .L16 addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movq %rbp, %rcx movq %rbx, %rdx movq %r13, %rsi movq %r12, %rdi call _Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm jmp .L12 .L16: leaq .LC0(%rip), %rsi leaq _ZSt4cerr(%rip), %rbp movq %rbp, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi leaq .LC1(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi leaq .LC2(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $68, %esi call _ZNSolsEi@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl %ebx, %edi call cudaGetErrorString@PLT movq %rax, %rsi movq %rbp, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi leaq .LC3(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi leaq .LC4(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $1, %edi call exit@PLT .cfi_endproc .LFE3670: .size _Z21rgba_to_grey_launcherP6uchar4Phmm, .-_Z21rgba_to_grey_launcherP6uchar4Phmm .section .rodata.str1.1 .LC5: .string "_Z12rgba_to_greyP6uchar4Phmm" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3698: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z12rgba_to_greyP6uchar4Phmm(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3698: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* kernel.cu Holds the kernel for the main program */ #include <iostream> #define BLOCK_WIDTH 32 #define cuda_check_errors(val) check( (val), #val, __FILE__, __LINE__) using namespace std; /* Reports the location of the occured error and exits the program */ template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != cudaSuccess) { cerr << "CUDA error at: " << file << ":" << line << endl; cerr << cudaGetErrorString(err) << " " << func << endl; exit(1); } } /* The primary kernel (heart of the program!) Each pixel p in the RGBA image is a struct of four unsigned chars: - p.x Which is the red channel number. - p.y The green channel. - p.z The blue channel. - p.w The alpha channel (which we ignore). For each greyscale pixel to be created we calculate this formula: p = .299*p.x + .587*p.y + .114*p.z The output is a single char because we only have one channel. In the kernel each thread is responsible for calculating the mentioned formula for each pixel and the put the result into the greyscale image placehold. First we find out where the thread (pixel) is and then we do the above. */ __global__ void rgba_to_grey(uchar4 *const d_rgba, unsigned char *const d_grey, size_t rows, size_t cols) { size_t j = blockIdx.y * blockDim.y + threadIdx.y; size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= rows || j >= cols) return; uchar4 p = d_rgba[i * cols + j]; d_grey[i * cols + j] = (unsigned char) (0.299f * p.x + 0.587f * p.y + 0.114f * p.z); } /* The image is divided into number of blocks. Each block holds BLOCK_WIDTH*BLOCK_WIDTH threads and in total we have (rows/BLOCK_WIDTH)*(cols/BLOCK_WIDTH) blocks. */ void rgba_to_grey_launcher(uchar4 *const d_rgba, unsigned char *const d_grey, size_t rows, size_t cols) { const dim3 block_size (BLOCK_WIDTH, BLOCK_WIDTH, 1); unsigned int grid_x = (unsigned int) (rows / BLOCK_WIDTH + 1); unsigned int grid_y = (unsigned int) (cols / BLOCK_WIDTH + 1); const dim3 grid_size (grid_x, grid_y, 1); rgba_to_grey<<<grid_size, block_size>>>(d_rgba, d_grey, rows, cols); cudaDeviceSynchronize(); cuda_check_errors(cudaGetLastError()); }
/* kernel.cu Holds the kernel for the main program */ #include <hip/hip_runtime.h> #include <iostream> #define BLOCK_WIDTH 32 #define cuda_check_errors(val) check( (val), #val, __FILE__, __LINE__) using namespace std; /* Reports the location of the occured error and exits the program */ template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != hipSuccess) { cerr << "CUDA error at: " << file << ":" << line << endl; cerr << hipGetErrorString(err) << " " << func << endl; exit(1); } } /* The primary kernel (heart of the program!) Each pixel p in the RGBA image is a struct of four unsigned chars: - p.x Which is the red channel number. - p.y The green channel. - p.z The blue channel. - p.w The alpha channel (which we ignore). For each greyscale pixel to be created we calculate this formula: p = .299*p.x + .587*p.y + .114*p.z The output is a single char because we only have one channel. In the kernel each thread is responsible for calculating the mentioned formula for each pixel and the put the result into the greyscale image placehold. First we find out where the thread (pixel) is and then we do the above. */ __global__ void rgba_to_grey(uchar4 *const d_rgba, unsigned char *const d_grey, size_t rows, size_t cols) { size_t j = blockIdx.y * blockDim.y + threadIdx.y; size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= rows || j >= cols) return; uchar4 p = d_rgba[i * cols + j]; d_grey[i * cols + j] = (unsigned char) (0.299f * p.x + 0.587f * p.y + 0.114f * p.z); } /* The image is divided into number of blocks. Each block holds BLOCK_WIDTH*BLOCK_WIDTH threads and in total we have (rows/BLOCK_WIDTH)*(cols/BLOCK_WIDTH) blocks. */ void rgba_to_grey_launcher(uchar4 *const d_rgba, unsigned char *const d_grey, size_t rows, size_t cols) { const dim3 block_size (BLOCK_WIDTH, BLOCK_WIDTH, 1); unsigned int grid_x = (unsigned int) (rows / BLOCK_WIDTH + 1); unsigned int grid_y = (unsigned int) (cols / BLOCK_WIDTH + 1); const dim3 grid_size (grid_x, grid_y, 1); rgba_to_grey<<<grid_size, block_size>>>(d_rgba, d_grey, rows, cols); hipDeviceSynchronize(); cuda_check_errors(hipGetLastError()); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* kernel.cu Holds the kernel for the main program */ #include <hip/hip_runtime.h> #include <iostream> #define BLOCK_WIDTH 32 #define cuda_check_errors(val) check( (val), #val, __FILE__, __LINE__) using namespace std; /* Reports the location of the occured error and exits the program */ template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != hipSuccess) { cerr << "CUDA error at: " << file << ":" << line << endl; cerr << hipGetErrorString(err) << " " << func << endl; exit(1); } } /* The primary kernel (heart of the program!) Each pixel p in the RGBA image is a struct of four unsigned chars: - p.x Which is the red channel number. - p.y The green channel. - p.z The blue channel. - p.w The alpha channel (which we ignore). For each greyscale pixel to be created we calculate this formula: p = .299*p.x + .587*p.y + .114*p.z The output is a single char because we only have one channel. In the kernel each thread is responsible for calculating the mentioned formula for each pixel and the put the result into the greyscale image placehold. First we find out where the thread (pixel) is and then we do the above. */ __global__ void rgba_to_grey(uchar4 *const d_rgba, unsigned char *const d_grey, size_t rows, size_t cols) { size_t j = blockIdx.y * blockDim.y + threadIdx.y; size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= rows || j >= cols) return; uchar4 p = d_rgba[i * cols + j]; d_grey[i * cols + j] = (unsigned char) (0.299f * p.x + 0.587f * p.y + 0.114f * p.z); } /* The image is divided into number of blocks. Each block holds BLOCK_WIDTH*BLOCK_WIDTH threads and in total we have (rows/BLOCK_WIDTH)*(cols/BLOCK_WIDTH) blocks. */ void rgba_to_grey_launcher(uchar4 *const d_rgba, unsigned char *const d_grey, size_t rows, size_t cols) { const dim3 block_size (BLOCK_WIDTH, BLOCK_WIDTH, 1); unsigned int grid_x = (unsigned int) (rows / BLOCK_WIDTH + 1); unsigned int grid_y = (unsigned int) (cols / BLOCK_WIDTH + 1); const dim3 grid_size (grid_x, grid_y, 1); rgba_to_grey<<<grid_size, block_size>>>(d_rgba, d_grey, rows, cols); hipDeviceSynchronize(); cuda_check_errors(hipGetLastError()); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .globl _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .p2align 8 .type _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm,@function _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b128 s[4:7], s[0:1], 0x10 v_bfe_u32 v1, v0, 10, 10 v_and_b32_e32 v4, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_lshr_b32 s3, s2, 16 s_and_b32 s2, s2, 0xffff v_mad_u64_u32 v[2:3], null, s15, s3, v[1:2] v_mov_b32_e32 v3, 0 v_mad_u64_u32 v[0:1], null, s14, s2, v[4:5] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_mov_b32_e32 v1, v3 v_cmp_gt_u64_e64 s2, s[6:7], v[2:3] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[0:1] s_and_b32 s2, s2, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_2 v_mad_u64_u32 v[4:5], null, v0, s6, v[2:3] s_load_b128 s[0:3], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mov_b32_e32 v1, v5 v_mad_u64_u32 v[2:3], null, v0, s7, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mov_b32_e32 v5, v2 v_lshlrev_b64 v[0:1], 2, v[4:5] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_clause 0x2 global_load_u8 v2, v[0:1], off offset:1 global_load_u8 v3, v[0:1], off global_load_u8 v0, v[0:1], off offset:2 s_waitcnt vmcnt(2) v_cvt_f32_ubyte0_e32 v1, v2 s_waitcnt vmcnt(1) v_cvt_f32_ubyte0_e32 v2, v3 s_waitcnt vmcnt(0) v_cvt_f32_ubyte0_e32 v0, v0 v_mul_f32_e32 v1, 0x3f1645a2, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmamk_f32 v1, v2, 0x3e991687, v1 v_fmamk_f32 v0, v0, 0x3de978d5, v1 s_delay_alu instid0(VALU_DEP_1) v_cvt_i32_f32_e32 v2, v0 v_add_co_u32 v0, vcc_lo, s2, v4 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v5, vcc_lo global_store_b8 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm, .Lfunc_end0-_Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* kernel.cu Holds the kernel for the main program */ #include <hip/hip_runtime.h> #include <iostream> #define BLOCK_WIDTH 32 #define cuda_check_errors(val) check( (val), #val, __FILE__, __LINE__) using namespace std; /* Reports the location of the occured error and exits the program */ template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != hipSuccess) { cerr << "CUDA error at: " << file << ":" << line << endl; cerr << hipGetErrorString(err) << " " << func << endl; exit(1); } } /* The primary kernel (heart of the program!) Each pixel p in the RGBA image is a struct of four unsigned chars: - p.x Which is the red channel number. - p.y The green channel. - p.z The blue channel. - p.w The alpha channel (which we ignore). For each greyscale pixel to be created we calculate this formula: p = .299*p.x + .587*p.y + .114*p.z The output is a single char because we only have one channel. In the kernel each thread is responsible for calculating the mentioned formula for each pixel and the put the result into the greyscale image placehold. First we find out where the thread (pixel) is and then we do the above. */ __global__ void rgba_to_grey(uchar4 *const d_rgba, unsigned char *const d_grey, size_t rows, size_t cols) { size_t j = blockIdx.y * blockDim.y + threadIdx.y; size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= rows || j >= cols) return; uchar4 p = d_rgba[i * cols + j]; d_grey[i * cols + j] = (unsigned char) (0.299f * p.x + 0.587f * p.y + 0.114f * p.z); } /* The image is divided into number of blocks. Each block holds BLOCK_WIDTH*BLOCK_WIDTH threads and in total we have (rows/BLOCK_WIDTH)*(cols/BLOCK_WIDTH) blocks. */ void rgba_to_grey_launcher(uchar4 *const d_rgba, unsigned char *const d_grey, size_t rows, size_t cols) { const dim3 block_size (BLOCK_WIDTH, BLOCK_WIDTH, 1); unsigned int grid_x = (unsigned int) (rows / BLOCK_WIDTH + 1); unsigned int grid_y = (unsigned int) (cols / BLOCK_WIDTH + 1); const dim3 grid_size (grid_x, grid_y, 1); rgba_to_grey<<<grid_size, block_size>>>(d_rgba, d_grey, rows, cols); hipDeviceSynchronize(); cuda_check_errors(hipGetLastError()); }
.text .file "kernel.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm # -- Begin function _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .p2align 4, 0x90 .type _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm,@function _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm: # @_Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movq %rcx, 48(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rax movq %rax, 104(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm, .Lfunc_end0-_Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .cfi_endproc # -- End function .globl _Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm # -- Begin function _Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm .p2align 4, 0x90 .type _Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm,@function _Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm: # @_Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 subq $120, %rsp .cfi_def_cfa_offset 160 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rcx, %rbx movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %r12 movq %rdx, %rax shrq $5, %rax incl %eax andq $-32, %rcx shlq $27, %rcx orq %rax, %rcx movabsq $4294967296, %rdi # imm = 0x100000000 addq %rcx, %rdi movabsq $137438953504, %rdx # imm = 0x2000000020 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq %r12, 72(%rsp) movq %r15, 64(%rsp) movq %r14, 56(%rsp) movq %rbx, 48(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rax movq %rax, 104(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize callq hipGetLastError movl $.L.str, %esi movl $.L.str.1, %edx movl %eax, %edi movl $70, %ecx callq _Z5checkI10hipError_tEvT_PKcS3_i addq $120, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm, .Lfunc_end1-_Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm .cfi_endproc # -- End function .section .text._Z5checkI10hipError_tEvT_PKcS3_i,"axG",@progbits,_Z5checkI10hipError_tEvT_PKcS3_i,comdat .weak _Z5checkI10hipError_tEvT_PKcS3_i # -- Begin function _Z5checkI10hipError_tEvT_PKcS3_i .p2align 4, 0x90 .type _Z5checkI10hipError_tEvT_PKcS3_i,@function _Z5checkI10hipError_tEvT_PKcS3_i: # @_Z5checkI10hipError_tEvT_PKcS3_i .cfi_startproc # %bb.0: testl %edi, %edi jne .LBB2_2 # %bb.1: retq .LBB2_2: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edi, %ebp movl $_ZSt4cerr, %edi movq %rsi, %rbx movl $.L.str.2, %esi movl %ecx, %r14d movq %rdx, %r15 callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %rdi movq %r15, %rsi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movl $.L.str.3, %esi movq %rax, %rdi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %rdi movl %r14d, %esi callq _ZNSolsEi movq %rax, %rdi callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_ movl %ebp, %edi callq hipGetErrorString movl $_ZSt4cerr, %edi movq %rax, %rsi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movl $.L.str.4, %esi movq %rax, %rdi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %rdi movq %rbx, %rsi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %rdi callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_ movl $1, %edi callq exit .Lfunc_end2: .size _Z5checkI10hipError_tEvT_PKcS3_i, .Lfunc_end2-_Z5checkI10hipError_tEvT_PKcS3_i .cfi_endproc # -- End function .text .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm,@object # @_Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .section .rodata,"a",@progbits .globl _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .p2align 3, 0x0 _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm: .quad _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .size _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "hipGetLastError()" .size .L.str, 18 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/mhezarei/CUDA-RGB-grey/master/kernel.hip" .size .L.str.1, 98 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "CUDA error at: " .size .L.str.2, 16 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz ":" .size .L.str.3, 2 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " " .size .L.str.4, 2 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm" .size .L__unnamed_1, 46 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .addrsig_sym _ZSt4cerr .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z12rgba_to_greyP6uchar4Phmm .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000e280000002600 */ /*0020*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002200 */ /*0030*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */ /* 0x000e680000002500 */ /*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e620000002100 */ /*0050*/ IMAD R2, R2, c[0x0][0x4], R3 ; /* 0x0000010002027a24 */ /* 0x001fca00078e0203 */ /*0060*/ ISETP.GE.U32.AND P1, PT, R2, c[0x0][0x178], PT ; /* 0x00005e0002007a0c */ /* 0x000fe20003f26070 */ /*0070*/ IMAD R5, R5, c[0x0][0x0], R0 ; /* 0x0000000005057a24 */ /* 0x002fc600078e0200 */ /*0080*/ ISETP.GE.U32.AND.EX P1, PT, RZ, c[0x0][0x17c], PT, P1 ; /* 0x00005f00ff007a0c */ /* 0x000fe40003f26110 */ /*0090*/ ISETP.GE.U32.AND P0, PT, R5, c[0x0][0x170], PT ; /* 0x00005c0005007a0c */ /* 0x000fc80003f06070 */ /*00a0*/ ISETP.GE.U32.OR.EX P0, PT, RZ, c[0x0][0x174], P1, P0 ; /* 0x00005d00ff007a0c */ /* 0x000fda0000f06500 */ /*00b0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00c0*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */ /* 0x000fe200000001ff */ /*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*00e0*/ IMAD.WIDE.U32 R2, R5, c[0x0][0x178], R2 ; /* 0x00005e0005027a25 */ /* 0x000fc800078e0002 */ /*00f0*/ IMAD R9, R5, c[0x0][0x17c], R3 ; /* 0x00005f0005097a24 */ /* 0x000fe200078e0203 */ /*0100*/ LEA R4, P0, R2, c[0x0][0x160], 0x2 ; /* 0x0000580002047a11 */ /* 0x000fc800078010ff */ /*0110*/ LEA.HI.X R5, R2, c[0x0][0x164], R9, 0x2, P0 ; /* 0x0000590002057a11 */ /* 0x000fca00000f1409 */ /*0120*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*0130*/ IADD3 R2, P0, R2, c[0x0][0x168], RZ ; /* 0x00005a0002027a10 */ /* 0x000fe20007f1e0ff */ /*0140*/ I2F.U8 R3, R4.B1 ; /* 0x1000000400037306 */ /* 0x004e300000001000 */ /*0150*/ I2F.U8 R0, R4 ; /* 0x0000000400007306 */ /* 0x000e700000001000 */ /*0160*/ I2F.U8 R7, R4.B2 ; /* 0x2000000400077306 */ /* 0x000ea20000001000 */ /*0170*/ FMUL R3, R3, 0.58700001239776611328 ; /* 0x3f1645a203037820 */ /* 0x001fc80000400000 */ /*0180*/ FFMA R0, R0, 0.29899999499320983887, R3 ; /* 0x3e99168700007823 */ /* 0x002fe20000000003 */ /*0190*/ IADD3.X R3, R9, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b0009037a10 */ /* 0x000fc600007fe4ff */ /*01a0*/ FFMA R0, R7, 0.11400000005960464478, R0 ; /* 0x3de978d507007823 */ /* 0x004fc80000000000 */ /*01b0*/ F2I.U32.TRUNC.NTZ R7, R0 ; /* 0x0000000000077305 */ /* 0x000e24000020f000 */ /*01c0*/ STG.E.U8 [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x001fe2000c101104 */ /*01d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .globl _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .p2align 8 .type _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm,@function _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm: s_clause 0x1 s_load_b32 s2, s[0:1], 0x2c s_load_b128 s[4:7], s[0:1], 0x10 v_bfe_u32 v1, v0, 10, 10 v_and_b32_e32 v4, 0x3ff, v0 s_waitcnt lgkmcnt(0) s_lshr_b32 s3, s2, 16 s_and_b32 s2, s2, 0xffff v_mad_u64_u32 v[2:3], null, s15, s3, v[1:2] v_mov_b32_e32 v3, 0 v_mad_u64_u32 v[0:1], null, s14, s2, v[4:5] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_mov_b32_e32 v1, v3 v_cmp_gt_u64_e64 s2, s[6:7], v[2:3] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[0:1] s_and_b32 s2, s2, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB0_2 v_mad_u64_u32 v[4:5], null, v0, s6, v[2:3] s_load_b128 s[0:3], s[0:1], 0x0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mov_b32_e32 v1, v5 v_mad_u64_u32 v[2:3], null, v0, s7, v[1:2] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mov_b32_e32 v5, v2 v_lshlrev_b64 v[0:1], 2, v[4:5] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo s_clause 0x2 global_load_u8 v2, v[0:1], off offset:1 global_load_u8 v3, v[0:1], off global_load_u8 v0, v[0:1], off offset:2 s_waitcnt vmcnt(2) v_cvt_f32_ubyte0_e32 v1, v2 s_waitcnt vmcnt(1) v_cvt_f32_ubyte0_e32 v2, v3 s_waitcnt vmcnt(0) v_cvt_f32_ubyte0_e32 v0, v0 v_mul_f32_e32 v1, 0x3f1645a2, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmamk_f32 v1, v2, 0x3e991687, v1 v_fmamk_f32 v0, v0, 0x3de978d5, v1 s_delay_alu instid0(VALU_DEP_1) v_cvt_i32_f32_e32 v2, v0 v_add_co_u32 v0, vcc_lo, s2, v4 v_add_co_ci_u32_e32 v1, vcc_lo, s3, v5, vcc_lo global_store_b8 v[0:1], v2, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 6 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm, .Lfunc_end0-_Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 8 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 6 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0004a10d_00000000-6_kernel.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3673: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3673: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm .type _Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm, @function _Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm: .LFB3695: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %rcx, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movq %rsp, %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z12rgba_to_greyP6uchar4Phmm(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3695: .size _Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm, .-_Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm .globl _Z12rgba_to_greyP6uchar4Phmm .type _Z12rgba_to_greyP6uchar4Phmm, @function _Z12rgba_to_greyP6uchar4Phmm: .LFB3696: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3696: .size _Z12rgba_to_greyP6uchar4Phmm, .-_Z12rgba_to_greyP6uchar4Phmm .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "CUDA error at: " .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC1: .string "/home/ubuntu/Datasets/stackv2/train-structured/mhezarei/CUDA-RGB-grey/master/kernel.cu" .section .rodata.str1.1 .LC2: .string ":" .LC3: .string " " .LC4: .string "cudaGetLastError()" .text .globl _Z21rgba_to_grey_launcherP6uchar4Phmm .type _Z21rgba_to_grey_launcherP6uchar4Phmm, @function _Z21rgba_to_grey_launcherP6uchar4Phmm: .LFB3670: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $40, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r12 movq %rsi, %r13 movq %rdx, %rbx movq %rcx, %rbp movl $32, 8(%rsp) movl $32, 12(%rsp) movq %rdx, %rax shrq $5, %rax addl $1, %eax movl %eax, 20(%rsp) movq %rcx, %rax shrq $5, %rax addl $1, %eax movl %eax, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 8(%rsp), %rdx movl $1, %ecx movq 20(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L12: call cudaDeviceSynchronize@PLT call cudaGetLastError@PLT movl %eax, %ebx testl %eax, %eax jne .L16 addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movq %rbp, %rcx movq %rbx, %rdx movq %r13, %rsi movq %r12, %rdi call _Z42__device_stub__Z12rgba_to_greyP6uchar4PhmmP6uchar4Phmm jmp .L12 .L16: leaq .LC0(%rip), %rsi leaq _ZSt4cerr(%rip), %rbp movq %rbp, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi leaq .LC1(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi leaq .LC2(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi movl $68, %esi call _ZNSolsEi@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl %ebx, %edi call cudaGetErrorString@PLT movq %rax, %rsi movq %rbp, %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi leaq .LC3(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi leaq .LC4(%rip), %rsi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movq %rax, %rdi call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT movl $1, %edi call exit@PLT .cfi_endproc .LFE3670: .size _Z21rgba_to_grey_launcherP6uchar4Phmm, .-_Z21rgba_to_grey_launcherP6uchar4Phmm .section .rodata.str1.1 .LC5: .string "_Z12rgba_to_greyP6uchar4Phmm" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3698: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC5(%rip), %rdx movq %rdx, %rcx leaq _Z12rgba_to_greyP6uchar4Phmm(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3698: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "kernel.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm # -- Begin function _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .p2align 4, 0x90 .type _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm,@function _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm: # @_Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movq %rcx, 48(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rax movq %rax, 104(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm, .Lfunc_end0-_Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .cfi_endproc # -- End function .globl _Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm # -- Begin function _Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm .p2align 4, 0x90 .type _Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm,@function _Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm: # @_Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r12 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 subq $120, %rsp .cfi_def_cfa_offset 160 .cfi_offset %rbx, -40 .cfi_offset %r12, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movq %rcx, %rbx movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %r12 movq %rdx, %rax shrq $5, %rax incl %eax andq $-32, %rcx shlq $27, %rcx orq %rax, %rcx movabsq $4294967296, %rdi # imm = 0x100000000 addq %rcx, %rdi movabsq $137438953504, %rdx # imm = 0x2000000020 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq %r12, 72(%rsp) movq %r15, 64(%rsp) movq %r14, 56(%rsp) movq %rbx, 48(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rax movq %rax, 104(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize callq hipGetLastError movl $.L.str, %esi movl $.L.str.1, %edx movl %eax, %edi movl $70, %ecx callq _Z5checkI10hipError_tEvT_PKcS3_i addq $120, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm, .Lfunc_end1-_Z21rgba_to_grey_launcherP15HIP_vector_typeIhLj4EEPhmm .cfi_endproc # -- End function .section .text._Z5checkI10hipError_tEvT_PKcS3_i,"axG",@progbits,_Z5checkI10hipError_tEvT_PKcS3_i,comdat .weak _Z5checkI10hipError_tEvT_PKcS3_i # -- Begin function _Z5checkI10hipError_tEvT_PKcS3_i .p2align 4, 0x90 .type _Z5checkI10hipError_tEvT_PKcS3_i,@function _Z5checkI10hipError_tEvT_PKcS3_i: # @_Z5checkI10hipError_tEvT_PKcS3_i .cfi_startproc # %bb.0: testl %edi, %edi jne .LBB2_2 # %bb.1: retq .LBB2_2: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edi, %ebp movl $_ZSt4cerr, %edi movq %rsi, %rbx movl $.L.str.2, %esi movl %ecx, %r14d movq %rdx, %r15 callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %rdi movq %r15, %rsi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movl $.L.str.3, %esi movq %rax, %rdi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %rdi movl %r14d, %esi callq _ZNSolsEi movq %rax, %rdi callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_ movl %ebp, %edi callq hipGetErrorString movl $_ZSt4cerr, %edi movq %rax, %rsi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movl $.L.str.4, %esi movq %rax, %rdi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %rdi movq %rbx, %rsi callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc movq %rax, %rdi callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_ movl $1, %edi callq exit .Lfunc_end2: .size _Z5checkI10hipError_tEvT_PKcS3_i, .Lfunc_end2-_Z5checkI10hipError_tEvT_PKcS3_i .cfi_endproc # -- End function .text .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm,@object # @_Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .section .rodata,"a",@progbits .globl _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .p2align 3, 0x0 _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm: .quad _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .size _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "hipGetLastError()" .size .L.str, 18 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/mhezarei/CUDA-RGB-grey/master/kernel.hip" .size .L.str.1, 98 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "CUDA error at: " .size .L.str.2, 16 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz ":" .size .L.str.3, 2 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz " " .size .L.str.4, 2 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm" .size .L__unnamed_1, 46 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12rgba_to_greyP15HIP_vector_typeIhLj4EEPhmm .addrsig_sym _ZSt4cerr .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
/* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif #ifndef BATCH_SIZE #define BATCH_SIZE 1 #endif #ifndef NUM_ITERATIONS #define NUM_ITERATIONS 1024 #endif /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int m1, n1, k1, m2, n2, k2, m3, n3, k3; /* Fixed seed for illustration */ srand(3333); m1=BATCH_SIZE; n1=65536; k1=4096; m2=BATCH_SIZE; n2=4096; k2=1024; m3=BATCH_SIZE; n3=4096; k3=10; // allocate memory in host RAM int *h_a, *h_b, *h_c, *h_d, *h_e, *h_f, *h_g; cudaMallocHost((void **) &h_a, sizeof(int)*m1*n1); cudaMallocHost((void **) &h_b, sizeof(int)*n1*k1); cudaMallocHost((void **) &h_c, sizeof(int)*m1*k1); cudaMallocHost((void **) &h_d, sizeof(int)*n2*k2); cudaMallocHost((void **) &h_e, sizeof(int)*m2*k2); cudaMallocHost((void **) &h_f, sizeof(int)*n3*k3); cudaMallocHost((void **) &h_g, sizeof(int)*m3*k3); // random initialize matrix B for (int i = 0; i < n1; ++i) { for (int j = 0; j < k1; ++j) { h_b[i * k1 + j] = rand() % 1024; } } // random initialize matrix D for (int i = 0; i < n2; ++i) { for (int j = 0; j < k2; ++j) { h_d[i * k2 + j] = rand() % 1024; } } // random initialize matrix F for (int i = 0; i < n3; ++i) { for (int j = 0; j < k3; ++j) { h_f[i * k3 + j] = rand() % 1024; } } float gpu_elapsed_time_ms; // some events to count the execution time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Allocate memory space on the device int *d_a, *d_b, *d_c, *d_d, *d_e, *d_f, *d_g; cudaMalloc((void **) &d_a, sizeof(int)*m1*n1); cudaMalloc((void **) &d_b, sizeof(int)*n1*k1); cudaMalloc((void **) &d_c, sizeof(int)*m1*k1); cudaMalloc((void **) &d_d, sizeof(int)*n2*k2); cudaMalloc((void **) &d_e, sizeof(int)*m2*k2); cudaMalloc((void **) &d_f, sizeof(int)*n3*k3); cudaMalloc((void **) &d_g, sizeof(int)*m3*k3); // copy matrix B,D,F from host to device memory - these are weight matrices cudaMemcpy(d_b, h_b, sizeof(int)*n1*k1, cudaMemcpyHostToDevice); cudaMemcpy(d_d, h_d, sizeof(int)*n2*k2, cudaMemcpyHostToDevice); cudaMemcpy(d_f, h_f, sizeof(int)*n3*k3, cudaMemcpyHostToDevice); int numExamples = 0; double total_time_ms = 0.0; for(int i=0;i<NUM_ITERATIONS;i++) { // random initialize matrix A - this is the input matrix for (int i = 0; i < m1; ++i) { for (int j = 0; j < n1; ++j) { h_a[i * n1 + j] = rand() % 1024; } } cudaEventRecord(start, 0); // copy from host to device cudaMemcpy(d_a, h_a, sizeof(int)*m1*n1, cudaMemcpyHostToDevice); unsigned int grid_rows = (m1 + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k1 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel for multiplication 1 #ifdef USE_CUDA_STREAMS gpu_matrix_mult<<<dimGrid, dimBlock, 0, 0>>>(d_a, d_b, d_c, m1, n1, k1); // execute on default stream #else gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m1, n1, k1); #endif cudaDeviceSynchronize(); // Launch kernel for multiplication 2 grid_rows = (m2 + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (k2 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid2(grid_cols, grid_rows); dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE); #ifdef USE_CUDA_STREAMS gpu_matrix_mult<<<dimGrid2, dimBlock2, 0, 0>>>(d_c, d_d, d_e, m2, n2, k2); // execute on default stream #else gpu_matrix_mult<<<dimGrid2, dimBlock2>>>(d_c, d_d, d_e, m2, n2, k2); #endif // Launch kernel for multiplication 3 - DR model grid_rows = (m3 + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (k3 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid3(grid_cols, grid_rows); dim3 dimBlock3(BLOCK_SIZE, BLOCK_SIZE); #ifdef USE_CUDA_STREAMS cudaStream_t streams[1]; cudaStreamCreate(&streams[0]); gpu_matrix_mult<<<dimGrid3, dimBlock3, 0, streams[0]>>>(d_c, d_f, d_g, m3, n3, k3); // execute on non-default stream #else gpu_matrix_mult<<<dimGrid3, dimBlock3>>>(d_c, d_f, d_g, m3, n3, k3); #endif // Transfer results from device to host - only DR model result cudaMemcpy(h_g, d_g, sizeof(int)*m2*k2, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); // time counting terminate cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // compute time elapse on GPU computing cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); numExamples += BATCH_SIZE; total_time_ms += gpu_elapsed_time_ms; } printf("Avg. Latency: %g ms :: Avg. Throughput: %g examples/sec\n", total_time_ms/NUM_ITERATIONS, numExamples*1000.0/total_time_ms); // free memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_d); cudaFree(d_e); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); cudaFreeHost(h_d); cudaFreeHost(h_e); return 0; }
code for sm_80 Function : _Z15gpu_matrix_multPiS_S_iii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e280000002100 */ /*0030*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e680000002600 */ /*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e620000002200 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0205 */ /*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */ /* 0x000fe20003f06270 */ /*0070*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */ /* 0x002fca00078e0202 */ /*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */ /* 0x000fda0000706670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ MOV R4, c[0x0][0x17c] ; /* 0x00005f0000047a02 */ /* 0x000fe20000000f00 */ /*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*00c0*/ HFMA2.MMA R24, -RZ, RZ, 0, 0 ; /* 0x00000000ff187435 */ /* 0x000fe400000001ff */ /*00d0*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */ /* 0x000fda0003f06270 */ /*00e0*/ @!P0 BRA 0xc30 ; /* 0x00000b4000008947 */ /* 0x000fea0003800000 */ /*00f0*/ IADD3 R2, R4.reuse, -0x1, RZ ; /* 0xffffffff04027810 */ /* 0x040fe40007ffe0ff */ /*0100*/ LOP3.LUT R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */ /* 0x000fe400078ec0ff */ /*0110*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */ /* 0x000fe40003f06070 */ /*0120*/ MOV R2, RZ ; /* 0x000000ff00027202 */ /* 0x000fe40000000f00 */ /*0130*/ MOV R24, RZ ; /* 0x000000ff00187202 */ /* 0x000fd20000000f00 */ /*0140*/ @!P0 BRA 0xb20 ; /* 0x000009d000008947 */ /* 0x000fea0003800000 */ /*0150*/ IADD3 R5, -R4, c[0x0][0x17c], RZ ; /* 0x00005f0004057a10 */ /* 0x000fe20007ffe1ff */ /*0160*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */ /* 0x000fe200000001ff */ /*0170*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */ /* 0x000fe20000000a00 */ /*0180*/ IMAD R6, R3, c[0x0][0x17c], RZ ; /* 0x00005f0003067a24 */ /* 0x000fe200078e02ff */ /*0190*/ ISETP.GT.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fe40003f04270 */ /*01a0*/ MOV R2, RZ ; /* 0x000000ff00027202 */ /* 0x000fca0000000f00 */ /*01b0*/ IMAD.WIDE R8, R0, R9, c[0x0][0x168] ; /* 0x00005a0000087625 */ /* 0x000fcc00078e0209 */ /*01c0*/ @!P0 BRA 0x980 ; /* 0x000007b000008947 */ /* 0x000fea0003800000 */ /*01d0*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */ /* 0x000fe40003f24270 */ /*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*01f0*/ @!P1 BRA 0x6b0 ; /* 0x000004b000009947 */ /* 0x000fea0003800000 */ /*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0210*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */ /* 0x000fe20008000f00 */ /*0220*/ LDG.E R21, [R8.64] ; /* 0x0000000408157981 */ /* 0x0000a2000c1e1900 */ /*0230*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */ /* 0x000fca0008000f00 */ /*0240*/ IMAD.WIDE R12, R6, 0x4, R12 ; /* 0x00000004060c7825 */ /* 0x000fca00078e020c */ /*0250*/ LDG.E R20, [R12.64] ; /* 0x000000040c147981 */ /* 0x000ea2000c1e1900 */ /*0260*/ MOV R7, c[0x0][0x180] ; /* 0x0000600000077a02 */ /* 0x000fc60000000f00 */ /*0270*/ LDG.E R14, [R12.64+0x4] ; /* 0x000004040c0e7981 */ /* 0x000ee4000c1e1900 */ /*0280*/ IMAD.WIDE R10, R7.reuse, 0x4, R8 ; /* 0x00000004070a7825 */ /* 0x040fe400078e0208 */ /*0290*/ LDG.E R27, [R12.64+0x8] ; /* 0x000008040c1b7981 */ /* 0x000f28000c1e1900 */ /*02a0*/ LDG.E R15, [R10.64] ; /* 0x000000040a0f7981 */ /* 0x0002e2000c1e1900 */ /*02b0*/ IMAD.WIDE R22, R7, 0x4, R10 ; /* 0x0000000407167825 */ /* 0x000fc600078e020a */ /*02c0*/ LDG.E R18, [R12.64+0xc] ; /* 0x00000c040c127981 */ /* 0x000f66000c1e1900 */ /*02d0*/ IMAD.WIDE R28, R7.reuse, 0x4, R22 ; /* 0x00000004071c7825 */ /* 0x040fe200078e0216 */ /*02e0*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */ /* 0x000328000c1e1900 */ /*02f0*/ LDG.E R19, [R28.64] ; /* 0x000000041c137981 */ /* 0x000362000c1e1900 */ /*0300*/ IMAD.WIDE R16, R7, 0x4, R28 ; /* 0x0000000407107825 */ /* 0x000fc600078e021c */ /*0310*/ LDG.E R8, [R12.64+0x10] ; /* 0x000010040c087981 */ /* 0x001f68000c1e1900 */ /*0320*/ LDG.E R9, [R16.64] ; /* 0x0000000410097981 */ /* 0x000168000c1e1900 */ /*0330*/ LDG.E R10, [R12.64+0x14] ; /* 0x000014040c0a7981 */ /* 0x002f68000c1e1900 */ /*0340*/ LDG.E R28, [R12.64+0x1c] ; /* 0x00001c040c1c7981 */ /* 0x000f62000c1e1900 */ /*0350*/ IMAD.WIDE R16, R7, 0x4, R16 ; /* 0x0000000407107825 */ /* 0x001fca00078e0210 */ /*0360*/ LDG.E R11, [R16.64] ; /* 0x00000004100b7981 */ /* 0x000562000c1e1900 */ /*0370*/ IMAD.WIDE R22, R7, 0x4, R16 ; /* 0x0000000407167825 */ /* 0x000fc800078e0210 */ /*0380*/ IMAD R16, R21, R20, R24 ; /* 0x0000001415107224 */ /* 0x004fe400078e0218 */ /*0390*/ LDG.E R20, [R12.64+0x18] ; /* 0x000018040c147981 */ /* 0x000ea2000c1e1900 */ /*03a0*/ IMAD.WIDE R24, R7, 0x4, R22 ; /* 0x0000000407187825 */ /* 0x000fc600078e0216 */ /*03b0*/ LDG.E R21, [R22.64] ; /* 0x0000000416157981 */ /* 0x0000a8000c1e1900 */ /*03c0*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */ /* 0x0002a2000c1e1900 */ /*03d0*/ IMAD R16, R15, R14, R16 ; /* 0x0000000e0f107224 */ /* 0x008fe400078e0210 */ /*03e0*/ IMAD.WIDE R14, R7.reuse, 0x4, R24 ; /* 0x00000004070e7825 */ /* 0x040fe200078e0218 */ /*03f0*/ LDG.E R23, [R12.64+0x20] ; /* 0x000020040c177981 */ /* 0x001ee6000c1e1900 */ /*0400*/ IMAD R26, R26, R27, R16 ; /* 0x0000001b1a1a7224 */ /* 0x010fe200078e0210 */ /*0410*/ LDG.E R25, [R12.64+0x24] ; /* 0x000024040c197981 */ /* 0x002f22000c1e1900 */ /*0420*/ IMAD.WIDE R16, R7, 0x4, R14 ; /* 0x0000000407107825 */ /* 0x000fc600078e020e */ /*0430*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x0000e2000c1e1900 */ /*0440*/ IMAD R26, R19, R18, R26 ; /* 0x00000012131a7224 */ /* 0x020fe400078e021a */ /*0450*/ IMAD.WIDE R18, R7, 0x4, R16 ; /* 0x0000000407127825 */ /* 0x000fe200078e0210 */ /*0460*/ LDG.E R22, [R12.64+0x28] ; /* 0x000028040c167981 */ /* 0x000f66000c1e1900 */ /*0470*/ IMAD R26, R9, R8, R26 ; /* 0x00000008091a7224 */ /* 0x000fe200078e021a */ /*0480*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */ /* 0x000322000c1e1900 */ /*0490*/ IMAD.WIDE R8, R7, 0x4, R18 ; /* 0x0000000407087825 */ /* 0x000fc600078e0212 */ /*04a0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */ /* 0x000368000c1e1900 */ /*04b0*/ LDG.E R24, [R8.64] ; /* 0x0000000408187981 */ /* 0x000568000c1e1900 */ /*04c0*/ LDG.E R15, [R12.64+0x2c] ; /* 0x00002c040c0f7981 */ /* 0x001f62000c1e1900 */ /*04d0*/ IMAD R26, R11, R10, R26 ; /* 0x0000000a0b1a7224 */ /* 0x000fe400078e021a */ /*04e0*/ IMAD.WIDE R10, R7, 0x4, R8 ; /* 0x00000004070a7825 */ /* 0x000fe200078e0208 */ /*04f0*/ LDG.E R17, [R12.64+0x30] ; /* 0x000030040c117981 */ /* 0x002f66000c1e1900 */ /*0500*/ IMAD R26, R21, R20, R26 ; /* 0x00000014151a7224 */ /* 0x004fc400078e021a */ /*0510*/ IMAD.WIDE R20, R7, 0x4, R10 ; /* 0x0000000407147825 */ /* 0x000fe400078e020a */ /*0520*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */ /* 0x0000a4000c1e1900 */ /*0530*/ IMAD R28, R29, R28, R26 ; /* 0x0000001c1d1c7224 */ /* 0x000fe400078e021a */ /*0540*/ IMAD.WIDE R26, R7.reuse, 0x4, R20 ; /* 0x00000004071a7825 */ /* 0x040fe200078e0214 */ /*0550*/ LDG.E R29, [R12.64+0x34] ; /* 0x000034040c1d7981 */ /* 0x000ea8000c1e1900 */ /*0560*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */ /* 0x0002a2000c1e1900 */ /*0570*/ IMAD.WIDE R8, R7, 0x4, R26 ; /* 0x0000000407087825 */ /* 0x000fc600078e021a */ /*0580*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */ /* 0x0006a8000c1e1900 */ /*0590*/ LDG.E R11, [R8.64] ; /* 0x00000004080b7981 */ /* 0x0010a8000c1e1900 */ /*05a0*/ LDG.E R21, [R12.64+0x38] ; /* 0x000038040c157981 */ /* 0x002ea8000c1e1900 */ /*05b0*/ LDG.E R26, [R12.64+0x3c] ; /* 0x00003c040c1a7981 */ /* 0x008ee2000c1e1900 */ /*05c0*/ IMAD R14, R14, R23, R28 ; /* 0x000000170e0e7224 */ /* 0x000fc800078e021c */ /*05d0*/ IMAD R25, R16, R25, R14 ; /* 0x0000001910197224 */ /* 0x010fe200078e020e */ /*05e0*/ IADD3 R5, R5, -0x10, RZ ; /* 0xfffffff005057810 */ /* 0x000fc60007ffe0ff */ /*05f0*/ IMAD R18, R18, R22, R25 ; /* 0x0000001612127224 */ /* 0x020fe200078e0219 */ /*0600*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */ /* 0x000fc60003f24270 */ /*0610*/ IMAD R15, R24, R15, R18 ; /* 0x0000000f180f7224 */ /* 0x000fe200078e0212 */ /*0620*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */ /* 0x000fe2000ff1e03f */ /*0630*/ IMAD.WIDE R8, R7, 0x4, R8 ; /* 0x0000000407087825 */ /* 0x001fc600078e0208 */ /*0640*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */ /* 0x000fe200087fe43f */ /*0650*/ IADD3 R2, R2, 0x10, RZ ; /* 0x0000001002027810 */ /* 0x000fe20007ffe0ff */ /*0660*/ IMAD R10, R10, R17, R15 ; /* 0x000000110a0a7224 */ /* 0x004fc800078e020f */ /*0670*/ IMAD R10, R20, R29, R10 ; /* 0x0000001d140a7224 */ /* 0x000fc800078e020a */ /*0680*/ IMAD R10, R19, R21, R10 ; /* 0x00000015130a7224 */ /* 0x000fc800078e020a */ /*0690*/ IMAD R24, R11, R26, R10 ; /* 0x0000001a0b187224 */ /* 0x008fe200078e020a */ /*06a0*/ @P1 BRA 0x210 ; /* 0xfffffb6000001947 */ /* 0x000fea000383ffff */ /*06b0*/ ISETP.GT.AND P1, PT, R5, 0x4, PT ; /* 0x000000040500780c */ /* 0x000fda0003f24270 */ /*06c0*/ @!P1 BRA 0x960 ; /* 0x0000029000009947 */ /* 0x000fea0003800000 */ /*06d0*/ MOV R7, c[0x0][0x180] ; /* 0x0000600000077a02 */ /* 0x000fe20000000f00 */ /*06e0*/ LDG.E R23, [R8.64] ; /* 0x0000000408177981 */ /* 0x0000a2000c1e1900 */ /*06f0*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */ /* 0x000fe40008000f00 */ /*0700*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */ /* 0x000fe20008000f00 */ /*0710*/ IMAD.WIDE R16, R7, 0x4, R8 ; /* 0x0000000407107825 */ /* 0x000fc800078e0208 */ /*0720*/ IMAD.WIDE R10, R6, 0x4, R10 ; /* 0x00000004060a7825 */ /* 0x000fc800078e020a */ /*0730*/ IMAD.WIDE R12, R7.reuse, 0x4, R16 ; /* 0x00000004070c7825 */ /* 0x040fe200078e0210 */ /*0740*/ LDG.E R22, [R10.64] ; /* 0x000000040a167981 */ /* 0x000ea8000c1e1900 */ /*0750*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */ /* 0x0002e2000c1e1900 */ /*0760*/ IMAD.WIDE R14, R7, 0x4, R12 ; /* 0x00000004070e7825 */ /* 0x000fc600078e020c */ /*0770*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */ /* 0x000ee6000c1e1900 */ /*0780*/ IMAD.WIDE R18, R7.reuse, 0x4, R14 ; /* 0x0000000407127825 */ /* 0x040fe200078e020e */ /*0790*/ LDG.E R26, [R12.64] ; /* 0x000000040c1a7981 */ /* 0x000968000c1e1900 */ /*07a0*/ LDG.E R27, [R10.64+0x8] ; /* 0x000008040a1b7981 */ /* 0x000f62000c1e1900 */ /*07b0*/ IMAD.WIDE R20, R7, 0x4, R18 ; /* 0x0000000407147825 */ /* 0x000fc600078e0212 */ /*07c0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x000368000c1e1900 */ /*07d0*/ LDG.E R29, [R10.64+0xc] ; /* 0x00000c040a1d7981 */ /* 0x000f62000c1e1900 */ /*07e0*/ IMAD.WIDE R8, R7, 0x4, R20 ; /* 0x0000000407087825 */ /* 0x001fc600078e0214 */ /*07f0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */ /* 0x000168000c1e1900 */ /*0800*/ LDG.E R28, [R10.64+0x10] ; /* 0x000010040a1c7981 */ /* 0x000f62000c1e1900 */ /*0810*/ IMAD.WIDE R12, R7, 0x4, R8 ; /* 0x00000004070c7825 */ /* 0x010fc600078e0208 */ /*0820*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */ /* 0x000968000c1e1900 */ /*0830*/ LDG.E R15, [R10.64+0x14] ; /* 0x000014040a0f7981 */ /* 0x002f68000c1e1900 */ /*0840*/ LDG.E R17, [R8.64] ; /* 0x0000000408117981 */ /* 0x000368000c1e1900 */ /*0850*/ LDG.E R21, [R10.64+0x1c] ; /* 0x00001c040a157981 */ /* 0x010f28000c1e1900 */ /*0860*/ LDG.E R19, [R12.64] ; /* 0x000000040c137981 */ /* 0x001f28000c1e1900 */ /*0870*/ LDG.E R8, [R10.64+0x18] ; /* 0x000018040a087981 */ /* 0x002f22000c1e1900 */ /*0880*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */ /* 0x000fe2000ff1e03f */ /*0890*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fc40003f0e170 */ /*08a0*/ IADD3 R2, R2, 0x8, RZ ; /* 0x0000000802027810 */ /* 0x000fe40007ffe0ff */ /*08b0*/ IADD3 R5, R5, -0x8, RZ ; /* 0xfffffff805057810 */ /* 0x000fe20007ffe0ff */ /*08c0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */ /* 0x000fe200087fe43f */ /*08d0*/ IMAD R22, R23, R22, R24 ; /* 0x0000001617167224 */ /* 0x004fc800078e0218 */ /*08e0*/ IMAD R16, R16, R25, R22 ; /* 0x0000001910107224 */ /* 0x008fc800078e0216 */ /*08f0*/ IMAD R16, R26, R27, R16 ; /* 0x0000001b1a107224 */ /* 0x020fc800078e0210 */ /*0900*/ IMAD R29, R14, R29, R16 ; /* 0x0000001d0e1d7224 */ /* 0x000fc800078e0210 */ /*0910*/ IMAD R18, R18, R28, R29 ; /* 0x0000001c12127224 */ /* 0x000fc800078e021d */ /*0920*/ IMAD R15, R20, R15, R18 ; /* 0x0000000f140f7224 */ /* 0x000fc800078e0212 */ /*0930*/ IMAD R24, R17, R8, R15 ; /* 0x0000000811187224 */ /* 0x010fe400078e020f */ /*0940*/ IMAD.WIDE R8, R7, 0x4, R12 ; /* 0x0000000407087825 */ /* 0x000fc800078e020c */ /*0950*/ IMAD R24, R19, R21, R24 ; /* 0x0000001513187224 */ /* 0x000fe400078e0218 */ /*0960*/ ISETP.NE.OR P0, PT, R5, RZ, P0 ; /* 0x000000ff0500720c */ /* 0x000fda0000705670 */ /*0970*/ @!P0 BRA 0xb20 ; /* 0x000001a000008947 */ /* 0x000fea0003800000 */ /*0980*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */ /* 0x000fe40008000f00 */ /*0990*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */ /* 0x000fe40008000f00 */ /*09a0*/ MOV R7, c[0x0][0x180] ; /* 0x0000600000077a02 */ /* 0x000fc60000000f00 */ /*09b0*/ IMAD.WIDE R10, R6, 0x4, R10 ; /* 0x00000004060a7825 */ /* 0x000fc800078e020a */ /*09c0*/ IMAD.WIDE R16, R7.reuse, 0x4, R8 ; /* 0x0000000407107825 */ /* 0x040fe200078e0208 */ /*09d0*/ LDG.E R18, [R10.64] ; /* 0x000000040a127981 */ /* 0x000ea8000c1e1900 */ /*09e0*/ LDG.E R9, [R8.64] ; /* 0x0000000408097981 */ /* 0x000ea2000c1e1900 */ /*09f0*/ IMAD.WIDE R12, R7, 0x4, R16 ; /* 0x00000004070c7825 */ /* 0x000fc600078e0210 */ /*0a00*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */ /* 0x000ee8000c1e1900 */ /*0a10*/ LDG.E R19, [R10.64+0x4] ; /* 0x000004040a137981 */ /* 0x000ee2000c1e1900 */ /*0a20*/ IMAD.WIDE R14, R7, 0x4, R12 ; /* 0x00000004070e7825 */ /* 0x000fc600078e020c */ /*0a30*/ LDG.E R21, [R12.64] ; /* 0x000000040c157981 */ /* 0x000f28000c1e1900 */ /*0a40*/ LDG.E R20, [R10.64+0x8] ; /* 0x000008040a147981 */ /* 0x000f28000c1e1900 */ /*0a50*/ LDG.E R22, [R10.64+0xc] ; /* 0x00000c040a167981 */ /* 0x000f68000c1e1900 */ /*0a60*/ LDG.E R23, [R14.64] ; /* 0x000000040e177981 */ /* 0x000f62000c1e1900 */ /*0a70*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */ /* 0x000fc80007ffe0ff */ /*0a80*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fe20003f05270 */ /*0a90*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */ /* 0x000fe2000ff1e03f */ /*0aa0*/ IADD3 R2, R2, 0x4, RZ ; /* 0x0000000402027810 */ /* 0x000fc60007ffe0ff */ /*0ab0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */ /* 0x000fe200087fe43f */ /*0ac0*/ IMAD R18, R9, R18, R24 ; /* 0x0000001209127224 */ /* 0x004fc800078e0218 */ /*0ad0*/ IMAD R18, R17, R19, R18 ; /* 0x0000001311127224 */ /* 0x008fe400078e0212 */ /*0ae0*/ IMAD.WIDE R8, R7, 0x4, R14 ; /* 0x0000000407087825 */ /* 0x000fc800078e020e */ /*0af0*/ IMAD R18, R21, R20, R18 ; /* 0x0000001415127224 */ /* 0x010fc800078e0212 */ /*0b00*/ IMAD R24, R23, R22, R18 ; /* 0x0000001617187224 */ /* 0x020fe200078e0212 */ /*0b10*/ @P0 BRA 0x980 ; /* 0xfffffe6000000947 */ /* 0x000fea000383ffff */ /*0b20*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fda0003f05270 */ /*0b30*/ @!P0 BRA 0xc30 ; /* 0x000000f000008947 */ /* 0x000fea0003800000 */ /*0b40*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */ /* 0x000fe200000001ff */ /*0b50*/ IMAD R6, R3, c[0x0][0x17c], R2 ; /* 0x00005f0003067a24 */ /* 0x000fe400078e0202 */ /*0b60*/ IMAD R2, R2, c[0x0][0x180], R0 ; /* 0x0000600002027a24 */ /* 0x000fce00078e0200 */ /*0b70*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */ /* 0x000fc800078e0209 */ /*0b80*/ IMAD.WIDE R8, R2, R9, c[0x0][0x168] ; /* 0x00005a0002087625 */ /* 0x000fca00078e0209 */ /*0b90*/ LDG.E R5, [R8.64] ; /* 0x0000000408057981 */ /* 0x0000a8000c1e1900 */ /*0ba0*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */ /* 0x0002a2000c1e1900 */ /*0bb0*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */ /* 0x000fe40007ffe0ff */ /*0bc0*/ MOV R11, c[0x0][0x180] ; /* 0x00006000000b7a02 */ /* 0x000fe40000000f00 */ /*0bd0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fc60003f05270 */ /*0be0*/ IMAD.WIDE R8, R11, 0x4, R8 ; /* 0x000000040b087825 */ /* 0x001fe200078e0208 */ /*0bf0*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */ /* 0x002fc80007f3e0ff */ /*0c00*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */ /* 0x000fe20000ffe4ff */ /*0c10*/ IMAD R24, R5, R2, R24 ; /* 0x0000000205187224 */ /* 0x004fc800078e0218 */ /*0c20*/ @P0 BRA 0xb90 ; /* 0xffffff6000000947 */ /* 0x000fea000383ffff */ /*0c30*/ MOV R2, 0x4 ; /* 0x0000000400027802 */ /* 0x000fe20000000f00 */ /*0c40*/ IMAD R3, R3, c[0x0][0x180], R0 ; /* 0x0000600003037a24 */ /* 0x000fc800078e0200 */ /*0c50*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */ /* 0x000fca00078e0202 */ /*0c60*/ STG.E [R2.64], R24 ; /* 0x0000001802007986 */ /* 0x000fe2000c101904 */ /*0c70*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0c80*/ BRA 0xc80; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0c90*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ca0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cb0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cc0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cd0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ce0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cf0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
/* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif #ifndef BATCH_SIZE #define BATCH_SIZE 1 #endif #ifndef NUM_ITERATIONS #define NUM_ITERATIONS 1024 #endif /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int m1, n1, k1, m2, n2, k2, m3, n3, k3; /* Fixed seed for illustration */ srand(3333); m1=BATCH_SIZE; n1=65536; k1=4096; m2=BATCH_SIZE; n2=4096; k2=1024; m3=BATCH_SIZE; n3=4096; k3=10; // allocate memory in host RAM int *h_a, *h_b, *h_c, *h_d, *h_e, *h_f, *h_g; cudaMallocHost((void **) &h_a, sizeof(int)*m1*n1); cudaMallocHost((void **) &h_b, sizeof(int)*n1*k1); cudaMallocHost((void **) &h_c, sizeof(int)*m1*k1); cudaMallocHost((void **) &h_d, sizeof(int)*n2*k2); cudaMallocHost((void **) &h_e, sizeof(int)*m2*k2); cudaMallocHost((void **) &h_f, sizeof(int)*n3*k3); cudaMallocHost((void **) &h_g, sizeof(int)*m3*k3); // random initialize matrix B for (int i = 0; i < n1; ++i) { for (int j = 0; j < k1; ++j) { h_b[i * k1 + j] = rand() % 1024; } } // random initialize matrix D for (int i = 0; i < n2; ++i) { for (int j = 0; j < k2; ++j) { h_d[i * k2 + j] = rand() % 1024; } } // random initialize matrix F for (int i = 0; i < n3; ++i) { for (int j = 0; j < k3; ++j) { h_f[i * k3 + j] = rand() % 1024; } } float gpu_elapsed_time_ms; // some events to count the execution time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Allocate memory space on the device int *d_a, *d_b, *d_c, *d_d, *d_e, *d_f, *d_g; cudaMalloc((void **) &d_a, sizeof(int)*m1*n1); cudaMalloc((void **) &d_b, sizeof(int)*n1*k1); cudaMalloc((void **) &d_c, sizeof(int)*m1*k1); cudaMalloc((void **) &d_d, sizeof(int)*n2*k2); cudaMalloc((void **) &d_e, sizeof(int)*m2*k2); cudaMalloc((void **) &d_f, sizeof(int)*n3*k3); cudaMalloc((void **) &d_g, sizeof(int)*m3*k3); // copy matrix B,D,F from host to device memory - these are weight matrices cudaMemcpy(d_b, h_b, sizeof(int)*n1*k1, cudaMemcpyHostToDevice); cudaMemcpy(d_d, h_d, sizeof(int)*n2*k2, cudaMemcpyHostToDevice); cudaMemcpy(d_f, h_f, sizeof(int)*n3*k3, cudaMemcpyHostToDevice); int numExamples = 0; double total_time_ms = 0.0; for(int i=0;i<NUM_ITERATIONS;i++) { // random initialize matrix A - this is the input matrix for (int i = 0; i < m1; ++i) { for (int j = 0; j < n1; ++j) { h_a[i * n1 + j] = rand() % 1024; } } cudaEventRecord(start, 0); // copy from host to device cudaMemcpy(d_a, h_a, sizeof(int)*m1*n1, cudaMemcpyHostToDevice); unsigned int grid_rows = (m1 + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k1 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel for multiplication 1 #ifdef USE_CUDA_STREAMS gpu_matrix_mult<<<dimGrid, dimBlock, 0, 0>>>(d_a, d_b, d_c, m1, n1, k1); // execute on default stream #else gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m1, n1, k1); #endif cudaDeviceSynchronize(); // Launch kernel for multiplication 2 grid_rows = (m2 + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (k2 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid2(grid_cols, grid_rows); dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE); #ifdef USE_CUDA_STREAMS gpu_matrix_mult<<<dimGrid2, dimBlock2, 0, 0>>>(d_c, d_d, d_e, m2, n2, k2); // execute on default stream #else gpu_matrix_mult<<<dimGrid2, dimBlock2>>>(d_c, d_d, d_e, m2, n2, k2); #endif // Launch kernel for multiplication 3 - DR model grid_rows = (m3 + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (k3 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid3(grid_cols, grid_rows); dim3 dimBlock3(BLOCK_SIZE, BLOCK_SIZE); #ifdef USE_CUDA_STREAMS cudaStream_t streams[1]; cudaStreamCreate(&streams[0]); gpu_matrix_mult<<<dimGrid3, dimBlock3, 0, streams[0]>>>(d_c, d_f, d_g, m3, n3, k3); // execute on non-default stream #else gpu_matrix_mult<<<dimGrid3, dimBlock3>>>(d_c, d_f, d_g, m3, n3, k3); #endif // Transfer results from device to host - only DR model result cudaMemcpy(h_g, d_g, sizeof(int)*m2*k2, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); // time counting terminate cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // compute time elapse on GPU computing cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); numExamples += BATCH_SIZE; total_time_ms += gpu_elapsed_time_ms; } printf("Avg. Latency: %g ms :: Avg. Throughput: %g examples/sec\n", total_time_ms/NUM_ITERATIONS, numExamples*1000.0/total_time_ms); // free memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_d); cudaFree(d_e); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); cudaFreeHost(h_d); cudaFreeHost(h_e); return 0; }
.file "tmpxft_0002ca4f_00000000-6_dr_model_poc.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii .type _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii, @function _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii: .LFB2082: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 40(%rsp) movq %rsi, 32(%rsp) movq %rdx, 24(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 32(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 20(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 200 pushq 56(%rsp) .cfi_def_cfa_offset 208 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq _Z15gpu_matrix_multPiS_S_iii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii, .-_Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii .globl _Z15gpu_matrix_multPiS_S_iii .type _Z15gpu_matrix_multPiS_S_iii, @function _Z15gpu_matrix_multPiS_S_iii: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z15gpu_matrix_multPiS_S_iii, .-_Z15gpu_matrix_multPiS_S_iii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC3: .string "Avg. Latency: %g ms :: Avg. Throughput: %g examples/sec\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $240, %rsp .cfi_def_cfa_offset 272 movq %fs:40, %rax movq %rax, 232(%rsp) xorl %eax, %eax movl $3333, %edi call srand@PLT leaq 32(%rsp), %rdi movl $262144, %esi call cudaMallocHost@PLT leaq 40(%rsp), %rdi movl $1073741824, %esi call cudaMallocHost@PLT leaq 48(%rsp), %rdi movl $16384, %esi call cudaMallocHost@PLT leaq 56(%rsp), %rdi movl $16777216, %esi call cudaMallocHost@PLT leaq 64(%rsp), %rdi movl $4096, %esi call cudaMallocHost@PLT leaq 72(%rsp), %rdi movl $163840, %esi call cudaMallocHost@PLT leaq 80(%rsp), %rdi movl $40, %esi call cudaMallocHost@PLT movl $16384, %ebp .L12: leaq -16384(%rbp), %rbx .L13: call rand@PLT cltd shrl $22, %edx addl %edx, %eax andl $1023, %eax subl %edx, %eax movq 40(%rsp), %rdx movl %eax, (%rdx,%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L13 addq $16384, %rbp cmpq $1073758208, %rbp jne .L12 movl $4096, %ebp .L14: leaq -4096(%rbp), %rbx .L15: call rand@PLT cltd shrl $22, %edx addl %edx, %eax andl $1023, %eax subl %edx, %eax movq 56(%rsp), %rdx movl %eax, (%rdx,%rbx) addq $4, %rbx cmpq %rbp, %rbx jne .L15 addq $4096, %rbp cmpq $16781312, %rbp jne .L14 movl $40, %ebp jmp .L16 .L34: addq $40, %rbp cmpq $163880, %rbp je .L33 .L16: leaq -40(%rbp), %rbx .L17: call rand@PLT cltd shrl $22, %edx addl %edx, %eax andl $1023, %eax subl %edx, %eax movq 72(%rsp), %rdx movl %eax, (%rdx,%rbx) addq $4, %rbx cmpq %rbx, %rbp jne .L17 jmp .L34 .L33: leaq 88(%rsp), %rdi call cudaEventCreate@PLT leaq 96(%rsp), %rdi call cudaEventCreate@PLT leaq 104(%rsp), %rdi movl $262144, %esi call cudaMalloc@PLT leaq 112(%rsp), %rdi movl $1073741824, %esi call cudaMalloc@PLT leaq 120(%rsp), %rdi movl $16384, %esi call cudaMalloc@PLT leaq 128(%rsp), %rdi movl $16777216, %esi call cudaMalloc@PLT leaq 136(%rsp), %rdi movl $4096, %esi call cudaMalloc@PLT leaq 144(%rsp), %rdi movl $163840, %esi call cudaMalloc@PLT leaq 152(%rsp), %rdi movl $40, %esi call cudaMalloc@PLT movl $1, %ecx movl $1073741824, %edx movq 40(%rsp), %rsi movq 112(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $16777216, %edx movq 56(%rsp), %rsi movq 128(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $163840, %edx movq 72(%rsp), %rsi movq 144(%rsp), %rdi call cudaMemcpy@PLT movl $1024, %ebp movq $0x000000000, 8(%rsp) leaq 28(%rsp), %r12 jmp .L19 .L35: movl $4096, %r9d movl $65536, %r8d movl $1, %ecx movq 120(%rsp), %rdx movq 112(%rsp), %rsi movq 104(%rsp), %rdi call _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii jmp .L21 .L36: movl $1024, %r9d movl $4096, %r8d movl $1, %ecx movq 136(%rsp), %rdx movq 128(%rsp), %rsi movq 120(%rsp), %rdi call _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii jmp .L22 .L23: movl $2, %ecx movl $4096, %edx movq 152(%rsp), %rsi movq 80(%rsp), %rdi call cudaMemcpy@PLT call cudaThreadSynchronize@PLT movl $0, %esi movq 96(%rsp), %rdi call cudaEventRecord@PLT movq 96(%rsp), %rdi call cudaEventSynchronize@PLT movq 96(%rsp), %rdx movq 88(%rsp), %rsi movq %r12, %rdi call cudaEventElapsedTime@PLT pxor %xmm0, %xmm0 cvtss2sd 28(%rsp), %xmm0 addsd 8(%rsp), %xmm0 movsd %xmm0, 8(%rsp) subl $1, %ebp je .L24 .L19: movl $0, %ebx .L20: call rand@PLT cltd shrl $22, %edx addl %edx, %eax andl $1023, %eax subl %edx, %eax movq 32(%rsp), %rdx movl %eax, (%rdx,%rbx) addq $4, %rbx cmpq $262144, %rbx jne .L20 movl $0, %esi movq 88(%rsp), %rdi call cudaEventRecord@PLT movl $1, %ecx movl $262144, %edx movq 32(%rsp), %rsi movq 104(%rsp), %rdi call cudaMemcpy@PLT movl $256, 160(%rsp) movl $1, 164(%rsp) movl $16, 172(%rsp) movl $16, 176(%rsp) movl $0, %r9d movl $0, %r8d movq 172(%rsp), %rdx movl $1, %ecx movq 160(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L35 .L21: call cudaDeviceSynchronize@PLT movl $64, 184(%rsp) movl $1, 188(%rsp) movl $16, 196(%rsp) movl $16, 200(%rsp) movl $0, %r9d movl $0, %r8d movq 196(%rsp), %rdx movl $1, %ecx movq 184(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L36 .L22: movl $1, 208(%rsp) movl $1, 212(%rsp) movl $16, 220(%rsp) movl $16, 224(%rsp) movl $0, %r9d movl $0, %r8d movq 220(%rsp), %rdx movl $1, %ecx movq 208(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L23 movl $10, %r9d movl $4096, %r8d movl $1, %ecx movq 152(%rsp), %rdx movq 144(%rsp), %rsi movq 120(%rsp), %rdi call _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii jmp .L23 .L24: movsd 8(%rsp), %xmm3 movapd %xmm3, %xmm0 mulsd .LC2(%rip), %xmm0 movsd .LC1(%rip), %xmm1 divsd %xmm3, %xmm1 leaq .LC3(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT movq 104(%rsp), %rdi call cudaFree@PLT movq 112(%rsp), %rdi call cudaFree@PLT movq 120(%rsp), %rdi call cudaFree@PLT movq 128(%rsp), %rdi call cudaFree@PLT movq 136(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFreeHost@PLT movq 40(%rsp), %rdi call cudaFreeHost@PLT movq 48(%rsp), %rdi call cudaFreeHost@PLT movq 56(%rsp), %rdi call cudaFreeHost@PLT movq 64(%rsp), %rdi call cudaFreeHost@PLT movq 232(%rsp), %rax subq %fs:40, %rax jne .L37 movl $0, %eax addq $240, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L37: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC4: .string "_Z15gpu_matrix_multPiS_S_iii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC4(%rip), %rdx movq %rdx, %rcx leaq _Z15gpu_matrix_multPiS_S_iii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC1: .long 0 .long 1093615616 .align 8 .LC2: .long 0 .long 1062207488 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
/* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif #ifndef BATCH_SIZE #define BATCH_SIZE 1 #endif #ifndef NUM_ITERATIONS #define NUM_ITERATIONS 1024 #endif /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int m1, n1, k1, m2, n2, k2, m3, n3, k3; /* Fixed seed for illustration */ srand(3333); m1=BATCH_SIZE; n1=65536; k1=4096; m2=BATCH_SIZE; n2=4096; k2=1024; m3=BATCH_SIZE; n3=4096; k3=10; // allocate memory in host RAM int *h_a, *h_b, *h_c, *h_d, *h_e, *h_f, *h_g; cudaMallocHost((void **) &h_a, sizeof(int)*m1*n1); cudaMallocHost((void **) &h_b, sizeof(int)*n1*k1); cudaMallocHost((void **) &h_c, sizeof(int)*m1*k1); cudaMallocHost((void **) &h_d, sizeof(int)*n2*k2); cudaMallocHost((void **) &h_e, sizeof(int)*m2*k2); cudaMallocHost((void **) &h_f, sizeof(int)*n3*k3); cudaMallocHost((void **) &h_g, sizeof(int)*m3*k3); // random initialize matrix B for (int i = 0; i < n1; ++i) { for (int j = 0; j < k1; ++j) { h_b[i * k1 + j] = rand() % 1024; } } // random initialize matrix D for (int i = 0; i < n2; ++i) { for (int j = 0; j < k2; ++j) { h_d[i * k2 + j] = rand() % 1024; } } // random initialize matrix F for (int i = 0; i < n3; ++i) { for (int j = 0; j < k3; ++j) { h_f[i * k3 + j] = rand() % 1024; } } float gpu_elapsed_time_ms; // some events to count the execution time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Allocate memory space on the device int *d_a, *d_b, *d_c, *d_d, *d_e, *d_f, *d_g; cudaMalloc((void **) &d_a, sizeof(int)*m1*n1); cudaMalloc((void **) &d_b, sizeof(int)*n1*k1); cudaMalloc((void **) &d_c, sizeof(int)*m1*k1); cudaMalloc((void **) &d_d, sizeof(int)*n2*k2); cudaMalloc((void **) &d_e, sizeof(int)*m2*k2); cudaMalloc((void **) &d_f, sizeof(int)*n3*k3); cudaMalloc((void **) &d_g, sizeof(int)*m3*k3); // copy matrix B,D,F from host to device memory - these are weight matrices cudaMemcpy(d_b, h_b, sizeof(int)*n1*k1, cudaMemcpyHostToDevice); cudaMemcpy(d_d, h_d, sizeof(int)*n2*k2, cudaMemcpyHostToDevice); cudaMemcpy(d_f, h_f, sizeof(int)*n3*k3, cudaMemcpyHostToDevice); int numExamples = 0; double total_time_ms = 0.0; for(int i=0;i<NUM_ITERATIONS;i++) { // random initialize matrix A - this is the input matrix for (int i = 0; i < m1; ++i) { for (int j = 0; j < n1; ++j) { h_a[i * n1 + j] = rand() % 1024; } } cudaEventRecord(start, 0); // copy from host to device cudaMemcpy(d_a, h_a, sizeof(int)*m1*n1, cudaMemcpyHostToDevice); unsigned int grid_rows = (m1 + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k1 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel for multiplication 1 #ifdef USE_CUDA_STREAMS gpu_matrix_mult<<<dimGrid, dimBlock, 0, 0>>>(d_a, d_b, d_c, m1, n1, k1); // execute on default stream #else gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m1, n1, k1); #endif cudaDeviceSynchronize(); // Launch kernel for multiplication 2 grid_rows = (m2 + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (k2 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid2(grid_cols, grid_rows); dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE); #ifdef USE_CUDA_STREAMS gpu_matrix_mult<<<dimGrid2, dimBlock2, 0, 0>>>(d_c, d_d, d_e, m2, n2, k2); // execute on default stream #else gpu_matrix_mult<<<dimGrid2, dimBlock2>>>(d_c, d_d, d_e, m2, n2, k2); #endif // Launch kernel for multiplication 3 - DR model grid_rows = (m3 + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (k3 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid3(grid_cols, grid_rows); dim3 dimBlock3(BLOCK_SIZE, BLOCK_SIZE); #ifdef USE_CUDA_STREAMS cudaStream_t streams[1]; cudaStreamCreate(&streams[0]); gpu_matrix_mult<<<dimGrid3, dimBlock3, 0, streams[0]>>>(d_c, d_f, d_g, m3, n3, k3); // execute on non-default stream #else gpu_matrix_mult<<<dimGrid3, dimBlock3>>>(d_c, d_f, d_g, m3, n3, k3); #endif // Transfer results from device to host - only DR model result cudaMemcpy(h_g, d_g, sizeof(int)*m2*k2, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); // time counting terminate cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // compute time elapse on GPU computing cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); numExamples += BATCH_SIZE; total_time_ms += gpu_elapsed_time_ms; } printf("Avg. Latency: %g ms :: Avg. Throughput: %g examples/sec\n", total_time_ms/NUM_ITERATIONS, numExamples*1000.0/total_time_ms); // free memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_d); cudaFree(d_e); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); cudaFreeHost(h_d); cudaFreeHost(h_e); return 0; }
/* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif #ifndef BATCH_SIZE #define BATCH_SIZE 1 #endif #ifndef NUM_ITERATIONS #define NUM_ITERATIONS 1024 #endif /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int m1, n1, k1, m2, n2, k2, m3, n3, k3; /* Fixed seed for illustration */ srand(3333); m1=BATCH_SIZE; n1=65536; k1=4096; m2=BATCH_SIZE; n2=4096; k2=1024; m3=BATCH_SIZE; n3=4096; k3=10; // allocate memory in host RAM int *h_a, *h_b, *h_c, *h_d, *h_e, *h_f, *h_g; hipHostMalloc((void **) &h_a, sizeof(int)*m1*n1, hipHostMallocDefault); hipHostMalloc((void **) &h_b, sizeof(int)*n1*k1, hipHostMallocDefault); hipHostMalloc((void **) &h_c, sizeof(int)*m1*k1, hipHostMallocDefault); hipHostMalloc((void **) &h_d, sizeof(int)*n2*k2, hipHostMallocDefault); hipHostMalloc((void **) &h_e, sizeof(int)*m2*k2, hipHostMallocDefault); hipHostMalloc((void **) &h_f, sizeof(int)*n3*k3, hipHostMallocDefault); hipHostMalloc((void **) &h_g, sizeof(int)*m3*k3, hipHostMallocDefault); // random initialize matrix B for (int i = 0; i < n1; ++i) { for (int j = 0; j < k1; ++j) { h_b[i * k1 + j] = rand() % 1024; } } // random initialize matrix D for (int i = 0; i < n2; ++i) { for (int j = 0; j < k2; ++j) { h_d[i * k2 + j] = rand() % 1024; } } // random initialize matrix F for (int i = 0; i < n3; ++i) { for (int j = 0; j < k3; ++j) { h_f[i * k3 + j] = rand() % 1024; } } float gpu_elapsed_time_ms; // some events to count the execution time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Allocate memory space on the device int *d_a, *d_b, *d_c, *d_d, *d_e, *d_f, *d_g; hipMalloc((void **) &d_a, sizeof(int)*m1*n1); hipMalloc((void **) &d_b, sizeof(int)*n1*k1); hipMalloc((void **) &d_c, sizeof(int)*m1*k1); hipMalloc((void **) &d_d, sizeof(int)*n2*k2); hipMalloc((void **) &d_e, sizeof(int)*m2*k2); hipMalloc((void **) &d_f, sizeof(int)*n3*k3); hipMalloc((void **) &d_g, sizeof(int)*m3*k3); // copy matrix B,D,F from host to device memory - these are weight matrices hipMemcpy(d_b, h_b, sizeof(int)*n1*k1, hipMemcpyHostToDevice); hipMemcpy(d_d, h_d, sizeof(int)*n2*k2, hipMemcpyHostToDevice); hipMemcpy(d_f, h_f, sizeof(int)*n3*k3, hipMemcpyHostToDevice); int numExamples = 0; double total_time_ms = 0.0; for(int i=0;i<NUM_ITERATIONS;i++) { // random initialize matrix A - this is the input matrix for (int i = 0; i < m1; ++i) { for (int j = 0; j < n1; ++j) { h_a[i * n1 + j] = rand() % 1024; } } hipEventRecord(start, 0); // copy from host to device hipMemcpy(d_a, h_a, sizeof(int)*m1*n1, hipMemcpyHostToDevice); unsigned int grid_rows = (m1 + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k1 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel for multiplication 1 #ifdef USE_CUDA_STREAMS gpu_matrix_mult<<<dimGrid, dimBlock, 0, 0>>>(d_a, d_b, d_c, m1, n1, k1); // execute on default stream #else gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m1, n1, k1); #endif hipDeviceSynchronize(); // Launch kernel for multiplication 2 grid_rows = (m2 + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (k2 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid2(grid_cols, grid_rows); dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE); #ifdef USE_CUDA_STREAMS gpu_matrix_mult<<<dimGrid2, dimBlock2, 0, 0>>>(d_c, d_d, d_e, m2, n2, k2); // execute on default stream #else gpu_matrix_mult<<<dimGrid2, dimBlock2>>>(d_c, d_d, d_e, m2, n2, k2); #endif // Launch kernel for multiplication 3 - DR model grid_rows = (m3 + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (k3 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid3(grid_cols, grid_rows); dim3 dimBlock3(BLOCK_SIZE, BLOCK_SIZE); #ifdef USE_CUDA_STREAMS hipStream_t streams[1]; hipStreamCreate(&streams[0]); gpu_matrix_mult<<<dimGrid3, dimBlock3, 0, streams[0]>>>(d_c, d_f, d_g, m3, n3, k3); // execute on non-default stream #else gpu_matrix_mult<<<dimGrid3, dimBlock3>>>(d_c, d_f, d_g, m3, n3, k3); #endif // Transfer results from device to host - only DR model result hipMemcpy(h_g, d_g, sizeof(int)*m2*k2, hipMemcpyDeviceToHost); hipDeviceSynchronize(); // time counting terminate hipEventRecord(stop, 0); hipEventSynchronize(stop); // compute time elapse on GPU computing hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop); numExamples += BATCH_SIZE; total_time_ms += gpu_elapsed_time_ms; } printf("Avg. Latency: %g ms :: Avg. Throughput: %g examples/sec\n", total_time_ms/NUM_ITERATIONS, numExamples*1000.0/total_time_ms); // free memory hipFree(d_a); hipFree(d_b); hipFree(d_c); hipFree(d_d); hipFree(d_e); hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); hipHostFree(h_d); hipHostFree(h_e); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
/* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif #ifndef BATCH_SIZE #define BATCH_SIZE 1 #endif #ifndef NUM_ITERATIONS #define NUM_ITERATIONS 1024 #endif /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int m1, n1, k1, m2, n2, k2, m3, n3, k3; /* Fixed seed for illustration */ srand(3333); m1=BATCH_SIZE; n1=65536; k1=4096; m2=BATCH_SIZE; n2=4096; k2=1024; m3=BATCH_SIZE; n3=4096; k3=10; // allocate memory in host RAM int *h_a, *h_b, *h_c, *h_d, *h_e, *h_f, *h_g; hipHostMalloc((void **) &h_a, sizeof(int)*m1*n1, hipHostMallocDefault); hipHostMalloc((void **) &h_b, sizeof(int)*n1*k1, hipHostMallocDefault); hipHostMalloc((void **) &h_c, sizeof(int)*m1*k1, hipHostMallocDefault); hipHostMalloc((void **) &h_d, sizeof(int)*n2*k2, hipHostMallocDefault); hipHostMalloc((void **) &h_e, sizeof(int)*m2*k2, hipHostMallocDefault); hipHostMalloc((void **) &h_f, sizeof(int)*n3*k3, hipHostMallocDefault); hipHostMalloc((void **) &h_g, sizeof(int)*m3*k3, hipHostMallocDefault); // random initialize matrix B for (int i = 0; i < n1; ++i) { for (int j = 0; j < k1; ++j) { h_b[i * k1 + j] = rand() % 1024; } } // random initialize matrix D for (int i = 0; i < n2; ++i) { for (int j = 0; j < k2; ++j) { h_d[i * k2 + j] = rand() % 1024; } } // random initialize matrix F for (int i = 0; i < n3; ++i) { for (int j = 0; j < k3; ++j) { h_f[i * k3 + j] = rand() % 1024; } } float gpu_elapsed_time_ms; // some events to count the execution time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Allocate memory space on the device int *d_a, *d_b, *d_c, *d_d, *d_e, *d_f, *d_g; hipMalloc((void **) &d_a, sizeof(int)*m1*n1); hipMalloc((void **) &d_b, sizeof(int)*n1*k1); hipMalloc((void **) &d_c, sizeof(int)*m1*k1); hipMalloc((void **) &d_d, sizeof(int)*n2*k2); hipMalloc((void **) &d_e, sizeof(int)*m2*k2); hipMalloc((void **) &d_f, sizeof(int)*n3*k3); hipMalloc((void **) &d_g, sizeof(int)*m3*k3); // copy matrix B,D,F from host to device memory - these are weight matrices hipMemcpy(d_b, h_b, sizeof(int)*n1*k1, hipMemcpyHostToDevice); hipMemcpy(d_d, h_d, sizeof(int)*n2*k2, hipMemcpyHostToDevice); hipMemcpy(d_f, h_f, sizeof(int)*n3*k3, hipMemcpyHostToDevice); int numExamples = 0; double total_time_ms = 0.0; for(int i=0;i<NUM_ITERATIONS;i++) { // random initialize matrix A - this is the input matrix for (int i = 0; i < m1; ++i) { for (int j = 0; j < n1; ++j) { h_a[i * n1 + j] = rand() % 1024; } } hipEventRecord(start, 0); // copy from host to device hipMemcpy(d_a, h_a, sizeof(int)*m1*n1, hipMemcpyHostToDevice); unsigned int grid_rows = (m1 + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k1 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel for multiplication 1 #ifdef USE_CUDA_STREAMS gpu_matrix_mult<<<dimGrid, dimBlock, 0, 0>>>(d_a, d_b, d_c, m1, n1, k1); // execute on default stream #else gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m1, n1, k1); #endif hipDeviceSynchronize(); // Launch kernel for multiplication 2 grid_rows = (m2 + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (k2 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid2(grid_cols, grid_rows); dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE); #ifdef USE_CUDA_STREAMS gpu_matrix_mult<<<dimGrid2, dimBlock2, 0, 0>>>(d_c, d_d, d_e, m2, n2, k2); // execute on default stream #else gpu_matrix_mult<<<dimGrid2, dimBlock2>>>(d_c, d_d, d_e, m2, n2, k2); #endif // Launch kernel for multiplication 3 - DR model grid_rows = (m3 + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (k3 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid3(grid_cols, grid_rows); dim3 dimBlock3(BLOCK_SIZE, BLOCK_SIZE); #ifdef USE_CUDA_STREAMS hipStream_t streams[1]; hipStreamCreate(&streams[0]); gpu_matrix_mult<<<dimGrid3, dimBlock3, 0, streams[0]>>>(d_c, d_f, d_g, m3, n3, k3); // execute on non-default stream #else gpu_matrix_mult<<<dimGrid3, dimBlock3>>>(d_c, d_f, d_g, m3, n3, k3); #endif // Transfer results from device to host - only DR model result hipMemcpy(h_g, d_g, sizeof(int)*m2*k2, hipMemcpyDeviceToHost); hipDeviceSynchronize(); // time counting terminate hipEventRecord(stop, 0); hipEventSynchronize(stop); // compute time elapse on GPU computing hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop); numExamples += BATCH_SIZE; total_time_ms += gpu_elapsed_time_ms; } printf("Avg. Latency: %g ms :: Avg. Throughput: %g examples/sec\n", total_time_ms/NUM_ITERATIONS, numExamples*1000.0/total_time_ms); // free memory hipFree(d_a); hipFree(d_b); hipFree(d_c); hipFree(d_d); hipFree(d_e); hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); hipHostFree(h_d); hipHostFree(h_e); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z15gpu_matrix_multPiS_S_iii .globl _Z15gpu_matrix_multPiS_S_iii .p2align 8 .type _Z15gpu_matrix_multPiS_S_iii,@function _Z15gpu_matrix_multPiS_S_iii: s_clause 0x2 s_load_b32 s2, s[0:1], 0x34 s_load_b32 s3, s[0:1], 0x20 s_load_b32 s4, s[0:1], 0x18 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_lshr_b32 s5, s2, 16 s_and_b32 s2, s2, 0xffff s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_mad_u64_u32 v[0:1], null, s14, s2, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s5, v[3:4] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, s3, v0 v_cmp_gt_i32_e64 s2, s4, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, s2, vcc_lo s_and_saveexec_b32 s4, s2 s_cbranch_execz .LBB0_6 s_load_b32 s2, s[0:1], 0x1c s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_4 s_load_b128 s[4:7], s[0:1], 0x0 v_mul_lo_u32 v2, v1, s2 v_mov_b32_e32 v5, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[3:4], 2, v[2:3] v_mov_b32_e32 v2, 0 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v3, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo .p2align 6 .LBB0_3: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_ashrrev_i32_e32 v6, 31, v5 s_add_i32 s2, s2, -1 s_cmp_eq_u32 s2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[6:7], 2, v[5:6] v_add_co_u32 v6, vcc_lo, s6, v6 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo global_load_b32 v8, v[3:4], off global_load_b32 v9, v[6:7], off s_waitcnt vmcnt(0) v_mad_u64_u32 v[6:7], null, v9, v8, v[2:3] v_add_co_u32 v3, vcc_lo, v3, 4 v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo s_delay_alu instid0(VALU_DEP_3) v_dual_mov_b32 v2, v6 :: v_dual_add_nc_u32 v5, s3, v5 s_cbranch_scc0 .LBB0_3 s_branch .LBB0_5 .LBB0_4: v_mov_b32_e32 v2, 0 .LBB0_5: s_load_b64 s[0:1], s[0:1], 0x10 v_mad_u64_u32 v[3:4], null, v1, s3, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v4, 31, v3 v_lshlrev_b64 v[0:1], 2, v[3:4] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v2, off .LBB0_6: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z15gpu_matrix_multPiS_S_iii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 10 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z15gpu_matrix_multPiS_S_iii, .Lfunc_end0-_Z15gpu_matrix_multPiS_S_iii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z15gpu_matrix_multPiS_S_iii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z15gpu_matrix_multPiS_S_iii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 10 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
/* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif #ifndef BATCH_SIZE #define BATCH_SIZE 1 #endif #ifndef NUM_ITERATIONS #define NUM_ITERATIONS 1024 #endif /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int m1, n1, k1, m2, n2, k2, m3, n3, k3; /* Fixed seed for illustration */ srand(3333); m1=BATCH_SIZE; n1=65536; k1=4096; m2=BATCH_SIZE; n2=4096; k2=1024; m3=BATCH_SIZE; n3=4096; k3=10; // allocate memory in host RAM int *h_a, *h_b, *h_c, *h_d, *h_e, *h_f, *h_g; hipHostMalloc((void **) &h_a, sizeof(int)*m1*n1, hipHostMallocDefault); hipHostMalloc((void **) &h_b, sizeof(int)*n1*k1, hipHostMallocDefault); hipHostMalloc((void **) &h_c, sizeof(int)*m1*k1, hipHostMallocDefault); hipHostMalloc((void **) &h_d, sizeof(int)*n2*k2, hipHostMallocDefault); hipHostMalloc((void **) &h_e, sizeof(int)*m2*k2, hipHostMallocDefault); hipHostMalloc((void **) &h_f, sizeof(int)*n3*k3, hipHostMallocDefault); hipHostMalloc((void **) &h_g, sizeof(int)*m3*k3, hipHostMallocDefault); // random initialize matrix B for (int i = 0; i < n1; ++i) { for (int j = 0; j < k1; ++j) { h_b[i * k1 + j] = rand() % 1024; } } // random initialize matrix D for (int i = 0; i < n2; ++i) { for (int j = 0; j < k2; ++j) { h_d[i * k2 + j] = rand() % 1024; } } // random initialize matrix F for (int i = 0; i < n3; ++i) { for (int j = 0; j < k3; ++j) { h_f[i * k3 + j] = rand() % 1024; } } float gpu_elapsed_time_ms; // some events to count the execution time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Allocate memory space on the device int *d_a, *d_b, *d_c, *d_d, *d_e, *d_f, *d_g; hipMalloc((void **) &d_a, sizeof(int)*m1*n1); hipMalloc((void **) &d_b, sizeof(int)*n1*k1); hipMalloc((void **) &d_c, sizeof(int)*m1*k1); hipMalloc((void **) &d_d, sizeof(int)*n2*k2); hipMalloc((void **) &d_e, sizeof(int)*m2*k2); hipMalloc((void **) &d_f, sizeof(int)*n3*k3); hipMalloc((void **) &d_g, sizeof(int)*m3*k3); // copy matrix B,D,F from host to device memory - these are weight matrices hipMemcpy(d_b, h_b, sizeof(int)*n1*k1, hipMemcpyHostToDevice); hipMemcpy(d_d, h_d, sizeof(int)*n2*k2, hipMemcpyHostToDevice); hipMemcpy(d_f, h_f, sizeof(int)*n3*k3, hipMemcpyHostToDevice); int numExamples = 0; double total_time_ms = 0.0; for(int i=0;i<NUM_ITERATIONS;i++) { // random initialize matrix A - this is the input matrix for (int i = 0; i < m1; ++i) { for (int j = 0; j < n1; ++j) { h_a[i * n1 + j] = rand() % 1024; } } hipEventRecord(start, 0); // copy from host to device hipMemcpy(d_a, h_a, sizeof(int)*m1*n1, hipMemcpyHostToDevice); unsigned int grid_rows = (m1 + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k1 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel for multiplication 1 #ifdef USE_CUDA_STREAMS gpu_matrix_mult<<<dimGrid, dimBlock, 0, 0>>>(d_a, d_b, d_c, m1, n1, k1); // execute on default stream #else gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m1, n1, k1); #endif hipDeviceSynchronize(); // Launch kernel for multiplication 2 grid_rows = (m2 + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (k2 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid2(grid_cols, grid_rows); dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE); #ifdef USE_CUDA_STREAMS gpu_matrix_mult<<<dimGrid2, dimBlock2, 0, 0>>>(d_c, d_d, d_e, m2, n2, k2); // execute on default stream #else gpu_matrix_mult<<<dimGrid2, dimBlock2>>>(d_c, d_d, d_e, m2, n2, k2); #endif // Launch kernel for multiplication 3 - DR model grid_rows = (m3 + BLOCK_SIZE - 1) / BLOCK_SIZE; grid_cols = (k3 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid3(grid_cols, grid_rows); dim3 dimBlock3(BLOCK_SIZE, BLOCK_SIZE); #ifdef USE_CUDA_STREAMS hipStream_t streams[1]; hipStreamCreate(&streams[0]); gpu_matrix_mult<<<dimGrid3, dimBlock3, 0, streams[0]>>>(d_c, d_f, d_g, m3, n3, k3); // execute on non-default stream #else gpu_matrix_mult<<<dimGrid3, dimBlock3>>>(d_c, d_f, d_g, m3, n3, k3); #endif // Transfer results from device to host - only DR model result hipMemcpy(h_g, d_g, sizeof(int)*m2*k2, hipMemcpyDeviceToHost); hipDeviceSynchronize(); // time counting terminate hipEventRecord(stop, 0); hipEventSynchronize(stop); // compute time elapse on GPU computing hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop); numExamples += BATCH_SIZE; total_time_ms += gpu_elapsed_time_ms; } printf("Avg. Latency: %g ms :: Avg. Throughput: %g examples/sec\n", total_time_ms/NUM_ITERATIONS, numExamples*1000.0/total_time_ms); // free memory hipFree(d_a); hipFree(d_b); hipFree(d_c); hipFree(d_d); hipFree(d_e); hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); hipHostFree(h_d); hipHostFree(h_e); return 0; }
.text .file "dr_model_poc.hip" .globl _Z30__device_stub__gpu_matrix_multPiS_S_iii # -- Begin function _Z30__device_stub__gpu_matrix_multPiS_S_iii .p2align 4, 0x90 .type _Z30__device_stub__gpu_matrix_multPiS_S_iii,@function _Z30__device_stub__gpu_matrix_multPiS_S_iii: # @_Z30__device_stub__gpu_matrix_multPiS_S_iii .cfi_startproc # %bb.0: subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 88(%rsp) movq %rsi, 80(%rsp) movq %rdx, 72(%rsp) movl %ecx, 20(%rsp) movl %r8d, 16(%rsp) movl %r9d, 12(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 80(%rsp), %rax movq %rax, 104(%rsp) leaq 72(%rsp), %rax movq %rax, 112(%rsp) leaq 20(%rsp), %rax movq %rax, 120(%rsp) leaq 16(%rsp), %rax movq %rax, 128(%rsp) leaq 12(%rsp), %rax movq %rax, 136(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z15gpu_matrix_multPiS_S_iii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $168, %rsp .cfi_adjust_cfa_offset -168 retq .Lfunc_end0: .size _Z30__device_stub__gpu_matrix_multPiS_S_iii, .Lfunc_end0-_Z30__device_stub__gpu_matrix_multPiS_S_iii .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI1_0: .quad 0x3f50000000000000 # double 9.765625E-4 .LCPI1_1: .quad 0x412f400000000000 # double 1024000 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $280, %rsp # imm = 0x118 .cfi_def_cfa_offset 336 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl $3333, %edi # imm = 0xD05 callq srand leaq 144(%rsp), %rdi xorl %ebx, %ebx movl $262144, %esi # imm = 0x40000 xorl %edx, %edx callq hipHostMalloc leaq 136(%rsp), %rdi movl $1073741824, %esi # imm = 0x40000000 xorl %edx, %edx callq hipHostMalloc leaq 272(%rsp), %rdi movl $16384, %esi # imm = 0x4000 xorl %edx, %edx callq hipHostMalloc leaq 128(%rsp), %rdi movl $16777216, %esi # imm = 0x1000000 xorl %edx, %edx callq hipHostMalloc leaq 264(%rsp), %rdi movl $4096, %esi # imm = 0x1000 xorl %edx, %edx callq hipHostMalloc leaq 240(%rsp), %rdi movl $163840, %esi # imm = 0x28000 xorl %edx, %edx callq hipHostMalloc leaq 256(%rsp), %rdi movl $40, %esi xorl %edx, %edx callq hipHostMalloc xorl %r14d, %r14d .p2align 4, 0x90 .LBB1_1: # %.preheader162 # =>This Loop Header: Depth=1 # Child Loop BB1_2 Depth 2 xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_2: # Parent Loop BB1_1 Depth=1 # => This Inner Loop Header: Depth=2 callq rand # kill: def $eax killed $eax def $rax leal 1023(%rax), %ecx testl %eax, %eax cmovnsl %eax, %ecx andl $-1024, %ecx # imm = 0xFC00 subl %ecx, %eax movq 136(%rsp), %rcx addq %rbx, %rcx movl %eax, (%rcx,%r15,4) incq %r15 cmpq $4096, %r15 # imm = 0x1000 jne .LBB1_2 # %bb.3: # in Loop: Header=BB1_1 Depth=1 incq %r14 addq $16384, %rbx # imm = 0x4000 cmpq $65536, %r14 # imm = 0x10000 jne .LBB1_1 # %bb.4: # %.preheader160.preheader xorl %ebx, %ebx xorl %r14d, %r14d .p2align 4, 0x90 .LBB1_5: # %.preheader160 # =>This Loop Header: Depth=1 # Child Loop BB1_6 Depth 2 xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_6: # Parent Loop BB1_5 Depth=1 # => This Inner Loop Header: Depth=2 callq rand # kill: def $eax killed $eax def $rax leal 1023(%rax), %ecx testl %eax, %eax cmovnsl %eax, %ecx andl $-1024, %ecx # imm = 0xFC00 subl %ecx, %eax movq 128(%rsp), %rcx addq %rbx, %rcx movl %eax, (%rcx,%r15,4) incq %r15 cmpq $1024, %r15 # imm = 0x400 jne .LBB1_6 # %bb.7: # in Loop: Header=BB1_5 Depth=1 incq %r14 addq $4096, %rbx # imm = 0x1000 cmpq $4096, %r14 # imm = 0x1000 jne .LBB1_5 # %bb.8: # %.preheader158.preheader xorl %ebx, %ebx xorl %r14d, %r14d .p2align 4, 0x90 .LBB1_9: # %.preheader158 # =>This Loop Header: Depth=1 # Child Loop BB1_10 Depth 2 xorl %r15d, %r15d .p2align 4, 0x90 .LBB1_10: # Parent Loop BB1_9 Depth=1 # => This Inner Loop Header: Depth=2 callq rand # kill: def $eax killed $eax def $rax leal 1023(%rax), %ecx testl %eax, %eax cmovnsl %eax, %ecx andl $-1024, %ecx # imm = 0xFC00 subl %ecx, %eax movq 240(%rsp), %rcx addq %rbx, %rcx movl %eax, (%rcx,%r15,4) incq %r15 cmpq $10, %r15 jne .LBB1_10 # %bb.11: # in Loop: Header=BB1_9 Depth=1 incq %r14 addq $40, %rbx cmpq $4096, %r14 # imm = 0x1000 jne .LBB1_9 # %bb.12: movabsq $68719476752, %rbx # imm = 0x1000000010 movabsq $4294967297, %r14 # imm = 0x100000001 leaq 232(%rsp), %rdi callq hipEventCreate leaq 120(%rsp), %rdi callq hipEventCreate leaq 112(%rsp), %rdi movl $262144, %esi # imm = 0x40000 callq hipMalloc leaq 104(%rsp), %rdi movl $1073741824, %esi # imm = 0x40000000 callq hipMalloc leaq 88(%rsp), %rdi movl $16384, %esi # imm = 0x4000 callq hipMalloc leaq 96(%rsp), %rdi movl $16777216, %esi # imm = 0x1000000 callq hipMalloc leaq 224(%rsp), %rdi movl $4096, %esi # imm = 0x1000 callq hipMalloc leaq 216(%rsp), %rdi movl $163840, %esi # imm = 0x28000 callq hipMalloc leaq 208(%rsp), %rdi movl $40, %esi callq hipMalloc movq 104(%rsp), %rdi movq 136(%rsp), %rsi movl $1073741824, %edx # imm = 0x40000000 movl $1, %ecx callq hipMemcpy movq 96(%rsp), %rdi movq 128(%rsp), %rsi movl $16777216, %edx # imm = 0x1000000 movl $1, %ecx callq hipMemcpy movq 216(%rsp), %rdi movq 240(%rsp), %rsi movl $163840, %edx # imm = 0x28000 movl $1, %ecx callq hipMemcpy xorpd %xmm2, %xmm2 xorl %ebp, %ebp leaq 255(%r14), %r15 leaq 160(%rsp), %r12 leaq 63(%r14), %r13 jmp .LBB1_13 .p2align 4, 0x90 .LBB1_21: # in Loop: Header=BB1_13 Depth=1 movq 256(%rsp), %rdi movq 208(%rsp), %rsi movl $4096, %edx # imm = 0x1000 movl $2, %ecx callq hipMemcpy callq hipDeviceSynchronize movq 120(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 120(%rsp), %rdi callq hipEventSynchronize movq 232(%rsp), %rsi movq 120(%rsp), %rdx leaq 156(%rsp), %rdi callq hipEventElapsedTime incl %ebp movss 156(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 movsd 248(%rsp), %xmm2 # 8-byte Reload # xmm2 = mem[0],zero addsd %xmm0, %xmm2 cmpl $1024, %ebp # imm = 0x400 je .LBB1_22 .LBB1_13: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB1_14 Depth 2 movsd %xmm2, 248(%rsp) # 8-byte Spill xorl %r14d, %r14d .p2align 4, 0x90 .LBB1_14: # Parent Loop BB1_13 Depth=1 # => This Inner Loop Header: Depth=2 callq rand # kill: def $eax killed $eax def $rax leal 1023(%rax), %ecx testl %eax, %eax cmovnsl %eax, %ecx andl $-1024, %ecx # imm = 0xFC00 subl %ecx, %eax movq 144(%rsp), %rcx movl %eax, (%rcx,%r14,4) incq %r14 cmpq $65536, %r14 # imm = 0x10000 jne .LBB1_14 # %bb.15: # %.critedge # in Loop: Header=BB1_13 Depth=1 movq 232(%rsp), %rdi xorl %esi, %esi callq hipEventRecord movq 112(%rsp), %rdi movq 144(%rsp), %rsi movl $262144, %edx # imm = 0x40000 movl $1, %ecx callq hipMemcpy movq %r15, %rdi movl $1, %esi movq %rbx, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_17 # %bb.16: # in Loop: Header=BB1_13 Depth=1 movq 112(%rsp), %rax movq 104(%rsp), %rcx movq 88(%rsp), %rdx movq %rax, 80(%rsp) movq %rcx, 72(%rsp) movq %rdx, 64(%rsp) movl $1, 12(%rsp) movl $65536, 8(%rsp) # imm = 0x10000 movl $4096, 4(%rsp) # imm = 0x1000 leaq 80(%rsp), %rax movq %rax, 160(%rsp) leaq 72(%rsp), %rax movq %rax, 168(%rsp) leaq 64(%rsp), %rax movq %rax, 176(%rsp) leaq 12(%rsp), %rax movq %rax, 184(%rsp) leaq 8(%rsp), %rax movq %rax, 192(%rsp) leaq 4(%rsp), %rax movq %rax, 200(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movl $_Z15gpu_matrix_multPiS_S_iii, %edi movq %r12, %r9 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_17: # in Loop: Header=BB1_13 Depth=1 callq hipDeviceSynchronize movq %r13, %rdi movl $1, %esi movq %rbx, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_19 # %bb.18: # in Loop: Header=BB1_13 Depth=1 movq 88(%rsp), %rax movq 96(%rsp), %rcx movq 224(%rsp), %rdx movq %rax, 80(%rsp) movq %rcx, 72(%rsp) movq %rdx, 64(%rsp) movl $1, 12(%rsp) movl $4096, 8(%rsp) # imm = 0x1000 movl $1024, 4(%rsp) # imm = 0x400 leaq 80(%rsp), %rax movq %rax, 160(%rsp) leaq 72(%rsp), %rax movq %rax, 168(%rsp) leaq 64(%rsp), %rax movq %rax, 176(%rsp) leaq 12(%rsp), %rax movq %rax, 184(%rsp) leaq 8(%rsp), %rax movq %rax, 192(%rsp) leaq 4(%rsp), %rax movq %rax, 200(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movl $_Z15gpu_matrix_multPiS_S_iii, %edi movq %r12, %r9 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_19: # in Loop: Header=BB1_13 Depth=1 movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rbx, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_21 # %bb.20: # in Loop: Header=BB1_13 Depth=1 movq 88(%rsp), %rax movq 216(%rsp), %rcx movq 208(%rsp), %rdx movq %rax, 80(%rsp) movq %rcx, 72(%rsp) movq %rdx, 64(%rsp) movl $1, 12(%rsp) movl $4096, 8(%rsp) # imm = 0x1000 movl $10, 4(%rsp) leaq 80(%rsp), %rax movq %rax, 160(%rsp) leaq 72(%rsp), %rax movq %rax, 168(%rsp) leaq 64(%rsp), %rax movq %rax, 176(%rsp) leaq 12(%rsp), %rax movq %rax, 184(%rsp) leaq 8(%rsp), %rax movq %rax, 192(%rsp) leaq 4(%rsp), %rax movq %rax, 200(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d movl $_Z15gpu_matrix_multPiS_S_iii, %edi movq %r12, %r9 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 jmp .LBB1_21 .LBB1_22: movsd .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero mulsd %xmm2, %xmm0 movsd .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero divsd %xmm2, %xmm1 movl $.L.str, %edi movb $2, %al callq printf movq 112(%rsp), %rdi callq hipFree movq 104(%rsp), %rdi callq hipFree movq 88(%rsp), %rdi callq hipFree movq 96(%rsp), %rdi callq hipFree movq 224(%rsp), %rdi callq hipFree movq 144(%rsp), %rdi callq hipHostFree movq 136(%rsp), %rdi callq hipHostFree movq 272(%rsp), %rdi callq hipHostFree movq 128(%rsp), %rdi callq hipHostFree movq 264(%rsp), %rdi callq hipHostFree xorl %eax, %eax addq $280, %rsp # imm = 0x118 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z15gpu_matrix_multPiS_S_iii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z15gpu_matrix_multPiS_S_iii,@object # @_Z15gpu_matrix_multPiS_S_iii .section .rodata,"a",@progbits .globl _Z15gpu_matrix_multPiS_S_iii .p2align 3, 0x0 _Z15gpu_matrix_multPiS_S_iii: .quad _Z30__device_stub__gpu_matrix_multPiS_S_iii .size _Z15gpu_matrix_multPiS_S_iii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Avg. Latency: %g ms :: Avg. Throughput: %g examples/sec\n" .size .L.str, 57 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z15gpu_matrix_multPiS_S_iii" .size .L__unnamed_1, 29 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z30__device_stub__gpu_matrix_multPiS_S_iii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z15gpu_matrix_multPiS_S_iii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z15gpu_matrix_multPiS_S_iii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e280000002100 */ /*0030*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */ /* 0x000e680000002600 */ /*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e620000002200 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0205 */ /*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */ /* 0x000fe20003f06270 */ /*0070*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */ /* 0x002fca00078e0202 */ /*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */ /* 0x000fda0000706670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ MOV R4, c[0x0][0x17c] ; /* 0x00005f0000047a02 */ /* 0x000fe20000000f00 */ /*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*00c0*/ HFMA2.MMA R24, -RZ, RZ, 0, 0 ; /* 0x00000000ff187435 */ /* 0x000fe400000001ff */ /*00d0*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */ /* 0x000fda0003f06270 */ /*00e0*/ @!P0 BRA 0xc30 ; /* 0x00000b4000008947 */ /* 0x000fea0003800000 */ /*00f0*/ IADD3 R2, R4.reuse, -0x1, RZ ; /* 0xffffffff04027810 */ /* 0x040fe40007ffe0ff */ /*0100*/ LOP3.LUT R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */ /* 0x000fe400078ec0ff */ /*0110*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */ /* 0x000fe40003f06070 */ /*0120*/ MOV R2, RZ ; /* 0x000000ff00027202 */ /* 0x000fe40000000f00 */ /*0130*/ MOV R24, RZ ; /* 0x000000ff00187202 */ /* 0x000fd20000000f00 */ /*0140*/ @!P0 BRA 0xb20 ; /* 0x000009d000008947 */ /* 0x000fea0003800000 */ /*0150*/ IADD3 R5, -R4, c[0x0][0x17c], RZ ; /* 0x00005f0004057a10 */ /* 0x000fe20007ffe1ff */ /*0160*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */ /* 0x000fe200000001ff */ /*0170*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */ /* 0x000fe20000000a00 */ /*0180*/ IMAD R6, R3, c[0x0][0x17c], RZ ; /* 0x00005f0003067a24 */ /* 0x000fe200078e02ff */ /*0190*/ ISETP.GT.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fe40003f04270 */ /*01a0*/ MOV R2, RZ ; /* 0x000000ff00027202 */ /* 0x000fca0000000f00 */ /*01b0*/ IMAD.WIDE R8, R0, R9, c[0x0][0x168] ; /* 0x00005a0000087625 */ /* 0x000fcc00078e0209 */ /*01c0*/ @!P0 BRA 0x980 ; /* 0x000007b000008947 */ /* 0x000fea0003800000 */ /*01d0*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */ /* 0x000fe40003f24270 */ /*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */ /* 0x000fd60003f0f070 */ /*01f0*/ @!P1 BRA 0x6b0 ; /* 0x000004b000009947 */ /* 0x000fea0003800000 */ /*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fe40003f0e170 */ /*0210*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */ /* 0x000fe20008000f00 */ /*0220*/ LDG.E R21, [R8.64] ; /* 0x0000000408157981 */ /* 0x0000a2000c1e1900 */ /*0230*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */ /* 0x000fca0008000f00 */ /*0240*/ IMAD.WIDE R12, R6, 0x4, R12 ; /* 0x00000004060c7825 */ /* 0x000fca00078e020c */ /*0250*/ LDG.E R20, [R12.64] ; /* 0x000000040c147981 */ /* 0x000ea2000c1e1900 */ /*0260*/ MOV R7, c[0x0][0x180] ; /* 0x0000600000077a02 */ /* 0x000fc60000000f00 */ /*0270*/ LDG.E R14, [R12.64+0x4] ; /* 0x000004040c0e7981 */ /* 0x000ee4000c1e1900 */ /*0280*/ IMAD.WIDE R10, R7.reuse, 0x4, R8 ; /* 0x00000004070a7825 */ /* 0x040fe400078e0208 */ /*0290*/ LDG.E R27, [R12.64+0x8] ; /* 0x000008040c1b7981 */ /* 0x000f28000c1e1900 */ /*02a0*/ LDG.E R15, [R10.64] ; /* 0x000000040a0f7981 */ /* 0x0002e2000c1e1900 */ /*02b0*/ IMAD.WIDE R22, R7, 0x4, R10 ; /* 0x0000000407167825 */ /* 0x000fc600078e020a */ /*02c0*/ LDG.E R18, [R12.64+0xc] ; /* 0x00000c040c127981 */ /* 0x000f66000c1e1900 */ /*02d0*/ IMAD.WIDE R28, R7.reuse, 0x4, R22 ; /* 0x00000004071c7825 */ /* 0x040fe200078e0216 */ /*02e0*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */ /* 0x000328000c1e1900 */ /*02f0*/ LDG.E R19, [R28.64] ; /* 0x000000041c137981 */ /* 0x000362000c1e1900 */ /*0300*/ IMAD.WIDE R16, R7, 0x4, R28 ; /* 0x0000000407107825 */ /* 0x000fc600078e021c */ /*0310*/ LDG.E R8, [R12.64+0x10] ; /* 0x000010040c087981 */ /* 0x001f68000c1e1900 */ /*0320*/ LDG.E R9, [R16.64] ; /* 0x0000000410097981 */ /* 0x000168000c1e1900 */ /*0330*/ LDG.E R10, [R12.64+0x14] ; /* 0x000014040c0a7981 */ /* 0x002f68000c1e1900 */ /*0340*/ LDG.E R28, [R12.64+0x1c] ; /* 0x00001c040c1c7981 */ /* 0x000f62000c1e1900 */ /*0350*/ IMAD.WIDE R16, R7, 0x4, R16 ; /* 0x0000000407107825 */ /* 0x001fca00078e0210 */ /*0360*/ LDG.E R11, [R16.64] ; /* 0x00000004100b7981 */ /* 0x000562000c1e1900 */ /*0370*/ IMAD.WIDE R22, R7, 0x4, R16 ; /* 0x0000000407167825 */ /* 0x000fc800078e0210 */ /*0380*/ IMAD R16, R21, R20, R24 ; /* 0x0000001415107224 */ /* 0x004fe400078e0218 */ /*0390*/ LDG.E R20, [R12.64+0x18] ; /* 0x000018040c147981 */ /* 0x000ea2000c1e1900 */ /*03a0*/ IMAD.WIDE R24, R7, 0x4, R22 ; /* 0x0000000407187825 */ /* 0x000fc600078e0216 */ /*03b0*/ LDG.E R21, [R22.64] ; /* 0x0000000416157981 */ /* 0x0000a8000c1e1900 */ /*03c0*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */ /* 0x0002a2000c1e1900 */ /*03d0*/ IMAD R16, R15, R14, R16 ; /* 0x0000000e0f107224 */ /* 0x008fe400078e0210 */ /*03e0*/ IMAD.WIDE R14, R7.reuse, 0x4, R24 ; /* 0x00000004070e7825 */ /* 0x040fe200078e0218 */ /*03f0*/ LDG.E R23, [R12.64+0x20] ; /* 0x000020040c177981 */ /* 0x001ee6000c1e1900 */ /*0400*/ IMAD R26, R26, R27, R16 ; /* 0x0000001b1a1a7224 */ /* 0x010fe200078e0210 */ /*0410*/ LDG.E R25, [R12.64+0x24] ; /* 0x000024040c197981 */ /* 0x002f22000c1e1900 */ /*0420*/ IMAD.WIDE R16, R7, 0x4, R14 ; /* 0x0000000407107825 */ /* 0x000fc600078e020e */ /*0430*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x0000e2000c1e1900 */ /*0440*/ IMAD R26, R19, R18, R26 ; /* 0x00000012131a7224 */ /* 0x020fe400078e021a */ /*0450*/ IMAD.WIDE R18, R7, 0x4, R16 ; /* 0x0000000407127825 */ /* 0x000fe200078e0210 */ /*0460*/ LDG.E R22, [R12.64+0x28] ; /* 0x000028040c167981 */ /* 0x000f66000c1e1900 */ /*0470*/ IMAD R26, R9, R8, R26 ; /* 0x00000008091a7224 */ /* 0x000fe200078e021a */ /*0480*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */ /* 0x000322000c1e1900 */ /*0490*/ IMAD.WIDE R8, R7, 0x4, R18 ; /* 0x0000000407087825 */ /* 0x000fc600078e0212 */ /*04a0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */ /* 0x000368000c1e1900 */ /*04b0*/ LDG.E R24, [R8.64] ; /* 0x0000000408187981 */ /* 0x000568000c1e1900 */ /*04c0*/ LDG.E R15, [R12.64+0x2c] ; /* 0x00002c040c0f7981 */ /* 0x001f62000c1e1900 */ /*04d0*/ IMAD R26, R11, R10, R26 ; /* 0x0000000a0b1a7224 */ /* 0x000fe400078e021a */ /*04e0*/ IMAD.WIDE R10, R7, 0x4, R8 ; /* 0x00000004070a7825 */ /* 0x000fe200078e0208 */ /*04f0*/ LDG.E R17, [R12.64+0x30] ; /* 0x000030040c117981 */ /* 0x002f66000c1e1900 */ /*0500*/ IMAD R26, R21, R20, R26 ; /* 0x00000014151a7224 */ /* 0x004fc400078e021a */ /*0510*/ IMAD.WIDE R20, R7, 0x4, R10 ; /* 0x0000000407147825 */ /* 0x000fe400078e020a */ /*0520*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */ /* 0x0000a4000c1e1900 */ /*0530*/ IMAD R28, R29, R28, R26 ; /* 0x0000001c1d1c7224 */ /* 0x000fe400078e021a */ /*0540*/ IMAD.WIDE R26, R7.reuse, 0x4, R20 ; /* 0x00000004071a7825 */ /* 0x040fe200078e0214 */ /*0550*/ LDG.E R29, [R12.64+0x34] ; /* 0x000034040c1d7981 */ /* 0x000ea8000c1e1900 */ /*0560*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */ /* 0x0002a2000c1e1900 */ /*0570*/ IMAD.WIDE R8, R7, 0x4, R26 ; /* 0x0000000407087825 */ /* 0x000fc600078e021a */ /*0580*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */ /* 0x0006a8000c1e1900 */ /*0590*/ LDG.E R11, [R8.64] ; /* 0x00000004080b7981 */ /* 0x0010a8000c1e1900 */ /*05a0*/ LDG.E R21, [R12.64+0x38] ; /* 0x000038040c157981 */ /* 0x002ea8000c1e1900 */ /*05b0*/ LDG.E R26, [R12.64+0x3c] ; /* 0x00003c040c1a7981 */ /* 0x008ee2000c1e1900 */ /*05c0*/ IMAD R14, R14, R23, R28 ; /* 0x000000170e0e7224 */ /* 0x000fc800078e021c */ /*05d0*/ IMAD R25, R16, R25, R14 ; /* 0x0000001910197224 */ /* 0x010fe200078e020e */ /*05e0*/ IADD3 R5, R5, -0x10, RZ ; /* 0xfffffff005057810 */ /* 0x000fc60007ffe0ff */ /*05f0*/ IMAD R18, R18, R22, R25 ; /* 0x0000001612127224 */ /* 0x020fe200078e0219 */ /*0600*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */ /* 0x000fc60003f24270 */ /*0610*/ IMAD R15, R24, R15, R18 ; /* 0x0000000f180f7224 */ /* 0x000fe200078e0212 */ /*0620*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */ /* 0x000fe2000ff1e03f */ /*0630*/ IMAD.WIDE R8, R7, 0x4, R8 ; /* 0x0000000407087825 */ /* 0x001fc600078e0208 */ /*0640*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */ /* 0x000fe200087fe43f */ /*0650*/ IADD3 R2, R2, 0x10, RZ ; /* 0x0000001002027810 */ /* 0x000fe20007ffe0ff */ /*0660*/ IMAD R10, R10, R17, R15 ; /* 0x000000110a0a7224 */ /* 0x004fc800078e020f */ /*0670*/ IMAD R10, R20, R29, R10 ; /* 0x0000001d140a7224 */ /* 0x000fc800078e020a */ /*0680*/ IMAD R10, R19, R21, R10 ; /* 0x00000015130a7224 */ /* 0x000fc800078e020a */ /*0690*/ IMAD R24, R11, R26, R10 ; /* 0x0000001a0b187224 */ /* 0x008fe200078e020a */ /*06a0*/ @P1 BRA 0x210 ; /* 0xfffffb6000001947 */ /* 0x000fea000383ffff */ /*06b0*/ ISETP.GT.AND P1, PT, R5, 0x4, PT ; /* 0x000000040500780c */ /* 0x000fda0003f24270 */ /*06c0*/ @!P1 BRA 0x960 ; /* 0x0000029000009947 */ /* 0x000fea0003800000 */ /*06d0*/ MOV R7, c[0x0][0x180] ; /* 0x0000600000077a02 */ /* 0x000fe20000000f00 */ /*06e0*/ LDG.E R23, [R8.64] ; /* 0x0000000408177981 */ /* 0x0000a2000c1e1900 */ /*06f0*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */ /* 0x000fe40008000f00 */ /*0700*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */ /* 0x000fe20008000f00 */ /*0710*/ IMAD.WIDE R16, R7, 0x4, R8 ; /* 0x0000000407107825 */ /* 0x000fc800078e0208 */ /*0720*/ IMAD.WIDE R10, R6, 0x4, R10 ; /* 0x00000004060a7825 */ /* 0x000fc800078e020a */ /*0730*/ IMAD.WIDE R12, R7.reuse, 0x4, R16 ; /* 0x00000004070c7825 */ /* 0x040fe200078e0210 */ /*0740*/ LDG.E R22, [R10.64] ; /* 0x000000040a167981 */ /* 0x000ea8000c1e1900 */ /*0750*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */ /* 0x0002e2000c1e1900 */ /*0760*/ IMAD.WIDE R14, R7, 0x4, R12 ; /* 0x00000004070e7825 */ /* 0x000fc600078e020c */ /*0770*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */ /* 0x000ee6000c1e1900 */ /*0780*/ IMAD.WIDE R18, R7.reuse, 0x4, R14 ; /* 0x0000000407127825 */ /* 0x040fe200078e020e */ /*0790*/ LDG.E R26, [R12.64] ; /* 0x000000040c1a7981 */ /* 0x000968000c1e1900 */ /*07a0*/ LDG.E R27, [R10.64+0x8] ; /* 0x000008040a1b7981 */ /* 0x000f62000c1e1900 */ /*07b0*/ IMAD.WIDE R20, R7, 0x4, R18 ; /* 0x0000000407147825 */ /* 0x000fc600078e0212 */ /*07c0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */ /* 0x000368000c1e1900 */ /*07d0*/ LDG.E R29, [R10.64+0xc] ; /* 0x00000c040a1d7981 */ /* 0x000f62000c1e1900 */ /*07e0*/ IMAD.WIDE R8, R7, 0x4, R20 ; /* 0x0000000407087825 */ /* 0x001fc600078e0214 */ /*07f0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */ /* 0x000168000c1e1900 */ /*0800*/ LDG.E R28, [R10.64+0x10] ; /* 0x000010040a1c7981 */ /* 0x000f62000c1e1900 */ /*0810*/ IMAD.WIDE R12, R7, 0x4, R8 ; /* 0x00000004070c7825 */ /* 0x010fc600078e0208 */ /*0820*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */ /* 0x000968000c1e1900 */ /*0830*/ LDG.E R15, [R10.64+0x14] ; /* 0x000014040a0f7981 */ /* 0x002f68000c1e1900 */ /*0840*/ LDG.E R17, [R8.64] ; /* 0x0000000408117981 */ /* 0x000368000c1e1900 */ /*0850*/ LDG.E R21, [R10.64+0x1c] ; /* 0x00001c040a157981 */ /* 0x010f28000c1e1900 */ /*0860*/ LDG.E R19, [R12.64] ; /* 0x000000040c137981 */ /* 0x001f28000c1e1900 */ /*0870*/ LDG.E R8, [R10.64+0x18] ; /* 0x000018040a087981 */ /* 0x002f22000c1e1900 */ /*0880*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */ /* 0x000fe2000ff1e03f */ /*0890*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */ /* 0x000fc40003f0e170 */ /*08a0*/ IADD3 R2, R2, 0x8, RZ ; /* 0x0000000802027810 */ /* 0x000fe40007ffe0ff */ /*08b0*/ IADD3 R5, R5, -0x8, RZ ; /* 0xfffffff805057810 */ /* 0x000fe20007ffe0ff */ /*08c0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */ /* 0x000fe200087fe43f */ /*08d0*/ IMAD R22, R23, R22, R24 ; /* 0x0000001617167224 */ /* 0x004fc800078e0218 */ /*08e0*/ IMAD R16, R16, R25, R22 ; /* 0x0000001910107224 */ /* 0x008fc800078e0216 */ /*08f0*/ IMAD R16, R26, R27, R16 ; /* 0x0000001b1a107224 */ /* 0x020fc800078e0210 */ /*0900*/ IMAD R29, R14, R29, R16 ; /* 0x0000001d0e1d7224 */ /* 0x000fc800078e0210 */ /*0910*/ IMAD R18, R18, R28, R29 ; /* 0x0000001c12127224 */ /* 0x000fc800078e021d */ /*0920*/ IMAD R15, R20, R15, R18 ; /* 0x0000000f140f7224 */ /* 0x000fc800078e0212 */ /*0930*/ IMAD R24, R17, R8, R15 ; /* 0x0000000811187224 */ /* 0x010fe400078e020f */ /*0940*/ IMAD.WIDE R8, R7, 0x4, R12 ; /* 0x0000000407087825 */ /* 0x000fc800078e020c */ /*0950*/ IMAD R24, R19, R21, R24 ; /* 0x0000001513187224 */ /* 0x000fe400078e0218 */ /*0960*/ ISETP.NE.OR P0, PT, R5, RZ, P0 ; /* 0x000000ff0500720c */ /* 0x000fda0000705670 */ /*0970*/ @!P0 BRA 0xb20 ; /* 0x000001a000008947 */ /* 0x000fea0003800000 */ /*0980*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */ /* 0x000fe40008000f00 */ /*0990*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */ /* 0x000fe40008000f00 */ /*09a0*/ MOV R7, c[0x0][0x180] ; /* 0x0000600000077a02 */ /* 0x000fc60000000f00 */ /*09b0*/ IMAD.WIDE R10, R6, 0x4, R10 ; /* 0x00000004060a7825 */ /* 0x000fc800078e020a */ /*09c0*/ IMAD.WIDE R16, R7.reuse, 0x4, R8 ; /* 0x0000000407107825 */ /* 0x040fe200078e0208 */ /*09d0*/ LDG.E R18, [R10.64] ; /* 0x000000040a127981 */ /* 0x000ea8000c1e1900 */ /*09e0*/ LDG.E R9, [R8.64] ; /* 0x0000000408097981 */ /* 0x000ea2000c1e1900 */ /*09f0*/ IMAD.WIDE R12, R7, 0x4, R16 ; /* 0x00000004070c7825 */ /* 0x000fc600078e0210 */ /*0a00*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */ /* 0x000ee8000c1e1900 */ /*0a10*/ LDG.E R19, [R10.64+0x4] ; /* 0x000004040a137981 */ /* 0x000ee2000c1e1900 */ /*0a20*/ IMAD.WIDE R14, R7, 0x4, R12 ; /* 0x00000004070e7825 */ /* 0x000fc600078e020c */ /*0a30*/ LDG.E R21, [R12.64] ; /* 0x000000040c157981 */ /* 0x000f28000c1e1900 */ /*0a40*/ LDG.E R20, [R10.64+0x8] ; /* 0x000008040a147981 */ /* 0x000f28000c1e1900 */ /*0a50*/ LDG.E R22, [R10.64+0xc] ; /* 0x00000c040a167981 */ /* 0x000f68000c1e1900 */ /*0a60*/ LDG.E R23, [R14.64] ; /* 0x000000040e177981 */ /* 0x000f62000c1e1900 */ /*0a70*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */ /* 0x000fc80007ffe0ff */ /*0a80*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */ /* 0x000fe20003f05270 */ /*0a90*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */ /* 0x000fe2000ff1e03f */ /*0aa0*/ IADD3 R2, R2, 0x4, RZ ; /* 0x0000000402027810 */ /* 0x000fc60007ffe0ff */ /*0ab0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */ /* 0x000fe200087fe43f */ /*0ac0*/ IMAD R18, R9, R18, R24 ; /* 0x0000001209127224 */ /* 0x004fc800078e0218 */ /*0ad0*/ IMAD R18, R17, R19, R18 ; /* 0x0000001311127224 */ /* 0x008fe400078e0212 */ /*0ae0*/ IMAD.WIDE R8, R7, 0x4, R14 ; /* 0x0000000407087825 */ /* 0x000fc800078e020e */ /*0af0*/ IMAD R18, R21, R20, R18 ; /* 0x0000001415127224 */ /* 0x010fc800078e0212 */ /*0b00*/ IMAD R24, R23, R22, R18 ; /* 0x0000001617187224 */ /* 0x020fe200078e0212 */ /*0b10*/ @P0 BRA 0x980 ; /* 0xfffffe6000000947 */ /* 0x000fea000383ffff */ /*0b20*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fda0003f05270 */ /*0b30*/ @!P0 BRA 0xc30 ; /* 0x000000f000008947 */ /* 0x000fea0003800000 */ /*0b40*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */ /* 0x000fe200000001ff */ /*0b50*/ IMAD R6, R3, c[0x0][0x17c], R2 ; /* 0x00005f0003067a24 */ /* 0x000fe400078e0202 */ /*0b60*/ IMAD R2, R2, c[0x0][0x180], R0 ; /* 0x0000600002027a24 */ /* 0x000fce00078e0200 */ /*0b70*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */ /* 0x000fc800078e0209 */ /*0b80*/ IMAD.WIDE R8, R2, R9, c[0x0][0x168] ; /* 0x00005a0002087625 */ /* 0x000fca00078e0209 */ /*0b90*/ LDG.E R5, [R8.64] ; /* 0x0000000408057981 */ /* 0x0000a8000c1e1900 */ /*0ba0*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */ /* 0x0002a2000c1e1900 */ /*0bb0*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */ /* 0x000fe40007ffe0ff */ /*0bc0*/ MOV R11, c[0x0][0x180] ; /* 0x00006000000b7a02 */ /* 0x000fe40000000f00 */ /*0bd0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */ /* 0x000fc60003f05270 */ /*0be0*/ IMAD.WIDE R8, R11, 0x4, R8 ; /* 0x000000040b087825 */ /* 0x001fe200078e0208 */ /*0bf0*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */ /* 0x002fc80007f3e0ff */ /*0c00*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */ /* 0x000fe20000ffe4ff */ /*0c10*/ IMAD R24, R5, R2, R24 ; /* 0x0000000205187224 */ /* 0x004fc800078e0218 */ /*0c20*/ @P0 BRA 0xb90 ; /* 0xffffff6000000947 */ /* 0x000fea000383ffff */ /*0c30*/ MOV R2, 0x4 ; /* 0x0000000400027802 */ /* 0x000fe20000000f00 */ /*0c40*/ IMAD R3, R3, c[0x0][0x180], R0 ; /* 0x0000600003037a24 */ /* 0x000fc800078e0200 */ /*0c50*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */ /* 0x000fca00078e0202 */ /*0c60*/ STG.E [R2.64], R24 ; /* 0x0000001802007986 */ /* 0x000fe2000c101904 */ /*0c70*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0c80*/ BRA 0xc80; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0c90*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ca0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cb0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cc0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cd0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0ce0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0cf0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d00*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d10*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d20*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d30*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d40*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d50*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d60*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0d70*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z15gpu_matrix_multPiS_S_iii .globl _Z15gpu_matrix_multPiS_S_iii .p2align 8 .type _Z15gpu_matrix_multPiS_S_iii,@function _Z15gpu_matrix_multPiS_S_iii: s_clause 0x2 s_load_b32 s2, s[0:1], 0x34 s_load_b32 s3, s[0:1], 0x20 s_load_b32 s4, s[0:1], 0x18 v_and_b32_e32 v2, 0x3ff, v0 v_bfe_u32 v3, v0, 10, 10 s_waitcnt lgkmcnt(0) s_lshr_b32 s5, s2, 16 s_and_b32 s2, s2, 0xffff s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) v_mad_u64_u32 v[0:1], null, s14, s2, v[2:3] v_mad_u64_u32 v[1:2], null, s15, s5, v[3:4] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, s3, v0 v_cmp_gt_i32_e64 s2, s4, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, s2, vcc_lo s_and_saveexec_b32 s4, s2 s_cbranch_execz .LBB0_6 s_load_b32 s2, s[0:1], 0x1c s_waitcnt lgkmcnt(0) s_cmp_lt_i32 s2, 1 s_cbranch_scc1 .LBB0_4 s_load_b128 s[4:7], s[0:1], 0x0 v_mul_lo_u32 v2, v1, s2 v_mov_b32_e32 v5, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v3, 31, v2 v_lshlrev_b64 v[3:4], 2, v[2:3] v_mov_b32_e32 v2, 0 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v3, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo .p2align 6 .LBB0_3: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) v_ashrrev_i32_e32 v6, 31, v5 s_add_i32 s2, s2, -1 s_cmp_eq_u32 s2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[6:7], 2, v[5:6] v_add_co_u32 v6, vcc_lo, s6, v6 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo global_load_b32 v8, v[3:4], off global_load_b32 v9, v[6:7], off s_waitcnt vmcnt(0) v_mad_u64_u32 v[6:7], null, v9, v8, v[2:3] v_add_co_u32 v3, vcc_lo, v3, 4 v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo s_delay_alu instid0(VALU_DEP_3) v_dual_mov_b32 v2, v6 :: v_dual_add_nc_u32 v5, s3, v5 s_cbranch_scc0 .LBB0_3 s_branch .LBB0_5 .LBB0_4: v_mov_b32_e32 v2, 0 .LBB0_5: s_load_b64 s[0:1], s[0:1], 0x10 v_mad_u64_u32 v[3:4], null, v1, s3, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v4, 31, v3 v_lshlrev_b64 v[0:1], 2, v[3:4] s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b32 v[0:1], v2, off .LBB0_6: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z15gpu_matrix_multPiS_S_iii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 296 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 10 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z15gpu_matrix_multPiS_S_iii, .Lfunc_end0-_Z15gpu_matrix_multPiS_S_iii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: hidden_block_count_x - .offset: 44 .size: 4 .value_kind: hidden_block_count_y - .offset: 48 .size: 4 .value_kind: hidden_block_count_z - .offset: 52 .size: 2 .value_kind: hidden_group_size_x - .offset: 54 .size: 2 .value_kind: hidden_group_size_y - .offset: 56 .size: 2 .value_kind: hidden_group_size_z - .offset: 58 .size: 2 .value_kind: hidden_remainder_x - .offset: 60 .size: 2 .value_kind: hidden_remainder_y - .offset: 62 .size: 2 .value_kind: hidden_remainder_z - .offset: 80 .size: 8 .value_kind: hidden_global_offset_x - .offset: 88 .size: 8 .value_kind: hidden_global_offset_y - .offset: 96 .size: 8 .value_kind: hidden_global_offset_z - .offset: 104 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 296 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z15gpu_matrix_multPiS_S_iii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z15gpu_matrix_multPiS_S_iii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 10 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata